From a007452f3e573ee61a3015598e607466f4f5aef4 Mon Sep 17 00:00:00 2001 From: Cassandra Heart Date: Wed, 14 Aug 2024 04:05:02 -0500 Subject: [PATCH] fix: remove unnecessary panics, recovers, defers, also fix some weird things that have been tragically broken in libp2p because they never knew what was wrong --- go-libp2p-blossomsub/go.mod | 15 +- go-libp2p-blossomsub/go.sum | 166 +- go-libp2p-kad-dht/CODEOWNERS | 15 + go-libp2p-kad-dht/LICENSE | 21 + go-libp2p-kad-dht/README.md | 51 + go-libp2p-kad-dht/crawler/crawler.go | 266 ++ go-libp2p-kad-dht/crawler/options.go | 72 + go-libp2p-kad-dht/dht.go | 942 +++++++ go-libp2p-kad-dht/dht_bootstrap.go | 84 + go-libp2p-kad-dht/dht_bootstrap_test.go | 201 ++ go-libp2p-kad-dht/dht_filters.go | 243 ++ go-libp2p-kad-dht/dht_filters_test.go | 79 + go-libp2p-kad-dht/dht_net.go | 166 ++ go-libp2p-kad-dht/dht_options.go | 358 +++ go-libp2p-kad-dht/dht_test.go | 2478 +++++++++++++++++ go-libp2p-kad-dht/doc.go | 3 + go-libp2p-kad-dht/dual/dual.go | 394 +++ go-libp2p-kad-dht/dual/dual_test.go | 399 +++ go-libp2p-kad-dht/events.go | 247 ++ go-libp2p-kad-dht/ext_test.go | 48 + go-libp2p-kad-dht/fullrt/dht.go | 1555 +++++++++++ go-libp2p-kad-dht/fullrt/dht_test.go | 86 + go-libp2p-kad-dht/fullrt/options.go | 98 + go-libp2p-kad-dht/go.mod | 144 + go-libp2p-kad-dht/go.sum | 820 ++++++ go-libp2p-kad-dht/handlers.go | 378 +++ go-libp2p-kad-dht/handlers_test.go | 141 + go-libp2p-kad-dht/internal/config/config.go | 172 ++ go-libp2p-kad-dht/internal/config/quorum.go | 16 + go-libp2p-kad-dht/internal/ctx_mutex.go | 28 + go-libp2p-kad-dht/internal/errors.go | 5 + go-libp2p-kad-dht/internal/logging.go | 92 + go-libp2p-kad-dht/internal/logging_test.go | 76 + .../internal/net/message_manager.go | 387 +++ .../internal/net/message_manager_test.go | 39 + go-libp2p-kad-dht/internal/testing/helper.go | 31 + go-libp2p-kad-dht/internal/tracing.go | 32 + go-libp2p-kad-dht/log_test.go | 7 + go-libp2p-kad-dht/lookup.go | 85 + go-libp2p-kad-dht/lookup_optim.go | 313 +++ go-libp2p-kad-dht/lookup_optim_test.go | 106 + go-libp2p-kad-dht/metrics/metrics.go | 117 + go-libp2p-kad-dht/netsize/netsize.go | 284 ++ go-libp2p-kad-dht/netsize/netsize_test.go | 44 + go-libp2p-kad-dht/nofile_test.go | 23 + go-libp2p-kad-dht/optimizations.md | 7 + go-libp2p-kad-dht/opts/options.go | 68 + go-libp2p-kad-dht/pb/Makefile | 11 + go-libp2p-kad-dht/pb/bytestring.go | 42 + go-libp2p-kad-dht/pb/dht.pb.go | 959 +++++++ go-libp2p-kad-dht/pb/dht.proto | 72 + go-libp2p-kad-dht/pb/message.go | 171 ++ go-libp2p-kad-dht/pb/message_test.go | 15 + go-libp2p-kad-dht/pb/protocol_messenger.go | 261 ++ go-libp2p-kad-dht/protocol.go | 12 + go-libp2p-kad-dht/providers/provider_set.go | 34 + .../providers/providers_manager.go | 412 +++ .../providers/providers_manager_test.go | 366 +++ go-libp2p-kad-dht/qpeerset/qpeerset.go | 159 ++ go-libp2p-kad-dht/qpeerset/qpeerset_test.go | 86 + go-libp2p-kad-dht/query.go | 556 ++++ go-libp2p-kad-dht/query_test.go | 118 + go-libp2p-kad-dht/records.go | 138 + go-libp2p-kad-dht/records_test.go | 384 +++ go-libp2p-kad-dht/routing.go | 694 +++++ go-libp2p-kad-dht/routing_options.go | 21 + go-libp2p-kad-dht/rt_diversity_filter.go | 103 + go-libp2p-kad-dht/rt_diversity_filter_test.go | 155 ++ .../rtrefresh/rt_refresh_manager.go | 372 +++ .../rtrefresh/rt_refresh_manager_test.go | 102 + go-libp2p-kad-dht/subscriber_notifee.go | 141 + go-libp2p-kad-dht/version.json | 3 + .../core/canonicallog/canonicallog_test.go | 5 +- go-libp2p/core/peer/addrinfo.go | 22 +- go-libp2p/core/peer/addrinfo_test.go | 34 +- go-libp2p/core/peer/record.go | 2 +- go-libp2p/go.mod | 5 + go-libp2p/go.sum | 41 +- go-libp2p/libp2p_test.go | 26 +- .../p2p/discovery/backoff/backoffcache.go | 16 +- .../p2p/discovery/backoff/backoffconnector.go | 3 +- go-libp2p/p2p/discovery/mdns/mdns.go | 9 +- go-libp2p/p2p/discovery/routing/routing.go | 3 +- go-libp2p/p2p/host/autonat/autonat.go | 17 +- go-libp2p/p2p/host/autonat/client.go | 12 +- go-libp2p/p2p/host/autonat/dialpolicy.go | 14 +- go-libp2p/p2p/host/autonat/metrics.go | 6 +- go-libp2p/p2p/host/autonat/notify.go | 3 +- go-libp2p/p2p/host/autonat/svc.go | 39 +- go-libp2p/p2p/host/autorelay/addrsplosion.go | 16 +- .../p2p/host/autorelay/addrsplosion_test.go | 3 +- go-libp2p/p2p/host/autorelay/autorelay.go | 12 +- .../p2p/host/autorelay/autorelay_test.go | 8 +- go-libp2p/p2p/host/autorelay/metrics.go | 14 +- go-libp2p/p2p/host/autorelay/options.go | 2 +- go-libp2p/p2p/host/autorelay/relay_finder.go | 46 +- go-libp2p/p2p/host/basic/basic_host.go | 44 +- go-libp2p/p2p/host/basic/basic_host_test.go | 59 +- go-libp2p/p2p/host/basic/natmgr.go | 61 +- go-libp2p/p2p/host/basic/natmgr_test.go | 28 +- go-libp2p/p2p/host/eventbus/basic.go | 4 +- go-libp2p/p2p/host/eventbus/basic_metrics.go | 12 +- go-libp2p/p2p/host/peerstore/metrics.go | 5 +- .../p2p/host/peerstore/pstoreds/addr_book.go | 6 +- .../host/peerstore/pstoreds/addr_book_gc.go | 12 +- .../p2p/host/peerstore/pstoremem/addr_book.go | 56 +- .../p2p/host/peerstore/pstoremem/keybook.go | 5 +- .../p2p/host/peerstore/pstoremem/metadata.go | 8 +- .../p2p/host/peerstore/pstoremem/protobook.go | 16 +- .../host/peerstore/pstoremem/sorting_test.go | 8 +- .../p2p/host/resource-manager/allowlist.go | 21 +- .../host/resource-manager/allowlist_test.go | 23 +- .../p2p/host/resource-manager/conn_limiter.go | 13 +- go-libp2p/p2p/host/resource-manager/rcmgr.go | 92 +- .../p2p/host/resource-manager/rcmgr_test.go | 31 +- go-libp2p/p2p/host/resource-manager/scope.go | 5 +- go-libp2p/p2p/host/routed/routed_test.go | 7 +- go-libp2p/p2p/http/example_test.go | 23 +- go-libp2p/p2p/http/libp2phttp.go | 27 +- go-libp2p/p2p/http/libp2phttp_test.go | 18 +- go-libp2p/p2p/metricshelper/conn.go | 11 +- go-libp2p/p2p/net/conngater/conngater_test.go | 73 +- .../p2p/net/swarm/black_hole_detector.go | 6 +- .../p2p/net/swarm/black_hole_detector_test.go | 38 +- go-libp2p/p2p/net/swarm/dial_error_test.go | 6 +- go-libp2p/p2p/net/swarm/dial_ranker.go | 7 +- go-libp2p/p2p/net/swarm/dial_ranker_test.go | 60 +- go-libp2p/p2p/net/swarm/dial_test.go | 8 +- go-libp2p/p2p/net/swarm/dial_worker.go | 2 +- go-libp2p/p2p/net/swarm/dial_worker_test.go | 53 +- go-libp2p/p2p/net/swarm/limiter_test.go | 11 +- go-libp2p/p2p/net/swarm/swarm_addr_test.go | 19 +- go-libp2p/p2p/net/swarm/swarm_dial.go | 18 +- go-libp2p/p2p/net/swarm/swarm_dial_test.go | 59 +- go-libp2p/p2p/net/swarm/swarm_event_test.go | 7 +- go-libp2p/p2p/net/swarm/swarm_metrics.go | 12 +- go-libp2p/p2p/net/swarm/swarm_test.go | 8 +- go-libp2p/p2p/net/swarm/testing/testing.go | 6 +- .../p2p/protocol/circuitv2/client/dial.go | 12 +- .../protocol/circuitv2/client/transport.go | 2 +- .../p2p/protocol/circuitv2/relay/relay.go | 7 +- .../protocol/circuitv2/relay/relay_test.go | 3 +- .../p2p/protocol/holepunch/holepunch_test.go | 19 +- .../p2p/protocol/holepunch/holepuncher.go | 2 +- go-libp2p/p2p/protocol/holepunch/metrics.go | 6 +- .../p2p/protocol/holepunch/metrics_test.go | 15 +- go-libp2p/p2p/protocol/holepunch/util.go | 2 +- go-libp2p/p2p/protocol/identify/id.go | 11 +- .../p2p/protocol/identify/id_glass_test.go | 15 +- go-libp2p/p2p/protocol/identify/id_test.go | 19 +- go-libp2p/p2p/protocol/identify/obsaddr.go | 5 +- .../protocol/identify/obsaddr_glass_test.go | 30 +- .../p2p/protocol/identify/obsaddr_test.go | 178 +- .../p2p/protocol/identify/snapshot_test.go | 4 +- .../p2p/test/basichost/basic_host_test.go | 13 +- .../test/notifications/notification_test.go | 7 +- go-libp2p/p2p/test/swarm/swarm_test.go | 9 +- go-libp2p/p2p/test/transport/gating_test.go | 2 +- .../test/webtransport/webtransport_test.go | 14 +- .../p2p/transport/quic/cmd/lib/lib_test.go | 2 +- go-libp2p/p2p/transport/quic/conn_test.go | 15 +- go-libp2p/p2p/transport/quic/listener_test.go | 10 +- .../p2p/transport/quicreuse/connmgr_test.go | 21 +- .../p2p/transport/quicreuse/quic_multiaddr.go | 12 +- go-libp2p/p2p/transport/tcp/tcp_test.go | 11 +- go-libp2p/p2p/transport/webrtc/listener.go | 11 +- go-libp2p/p2p/transport/webrtc/transport.go | 17 +- .../p2p/transport/webrtc/transport_test.go | 47 +- go-libp2p/p2p/transport/websocket/addrs.go | 5 +- .../p2p/transport/websocket/addrs_test.go | 11 +- go-libp2p/p2p/transport/websocket/listener.go | 10 +- .../p2p/transport/websocket/websocket.go | 14 +- .../p2p/transport/websocket/websocket_test.go | 38 +- .../webtransport/cert_manager_test.go | 2 +- .../p2p/transport/webtransport/multiaddr.go | 17 +- .../transport/webtransport/multiaddr_test.go | 13 +- .../p2p/transport/webtransport/transport.go | 22 +- .../transport/webtransport/transport_test.go | 75 +- go-multiaddr-dns/.gitignore | 1 + go-multiaddr-dns/LICENSE | 21 + go-multiaddr-dns/README.md | 57 + go-multiaddr-dns/go.mod | 28 + go-multiaddr-dns/go.sum | 64 + go-multiaddr-dns/madns/main.go | 44 + go-multiaddr-dns/mock.go | 31 + go-multiaddr-dns/resolve.go | 296 ++ go-multiaddr-dns/resolve_test.go | 335 +++ go-multiaddr-dns/util.go | 67 + go-multiaddr-dns/version.json | 3 + go-multiaddr/.gitignore | 3 + go-multiaddr/LICENSE | 21 + go-multiaddr/README.md | 117 + go-multiaddr/codec.go | 181 ++ go-multiaddr/codecov.yml | 2 + go-multiaddr/component.go | 184 ++ go-multiaddr/doc.go | 35 + go-multiaddr/filter.go | 153 + go-multiaddr/filter_test.go | 202 ++ go-multiaddr/go.mod | 27 + go-multiaddr/go.sum | 56 + go-multiaddr/interface.go | 61 + go-multiaddr/multiaddr.go | 258 ++ go-multiaddr/multiaddr/main.go | 100 + go-multiaddr/multiaddr_test.go | 1064 +++++++ go-multiaddr/net/convert.go | 375 +++ go-multiaddr/net/convert_test.go | 266 ++ go-multiaddr/net/doc.go | 5 + go-multiaddr/net/ip.go | 132 + go-multiaddr/net/ip_test.go | 60 + go-multiaddr/net/net.go | 430 +++ go-multiaddr/net/net_test.go | 693 +++++ go-multiaddr/net/private.go | 218 ++ go-multiaddr/net/private_test.go | 95 + go-multiaddr/net/registry.go | 101 + go-multiaddr/net/registry_test.go | 42 + go-multiaddr/net/resolve.go | 94 + go-multiaddr/net/resolve_test.go | 60 + go-multiaddr/package.json | 23 + go-multiaddr/protocol.go | 102 + go-multiaddr/protocols.go | 371 +++ .../FuzzNewMultiaddrBytes/0487b63847656fd4 | 2 + .../FuzzNewMultiaddrBytes/04a87ae2740f7195 | 2 + .../FuzzNewMultiaddrBytes/239d3594e0ee93bb | 2 + .../FuzzNewMultiaddrBytes/2ef0d600700564d4 | 2 + .../FuzzNewMultiaddrBytes/385d14fbb016b8c3 | 2 + .../FuzzNewMultiaddrBytes/511b72740453a863 | 2 + .../FuzzNewMultiaddrBytes/69ba454c4217999e | 2 + .../FuzzNewMultiaddrBytes/9f0d778549d2b28e | 2 + .../FuzzNewMultiaddrBytes/af9576bc28339a8d | 2 + .../FuzzNewMultiaddrBytes/e9317f1a3c43de50 | 2 + .../FuzzNewMultiaddrBytes/f1ebd17c93085805 | 2 + .../FuzzNewMultiaddrString/382a5bb1eff47833 | 2 + .../FuzzNewMultiaddrString/53eb3b6be337b1d7 | 2 + .../FuzzNewMultiaddrString/63891c9534054d61 | 2 + .../FuzzNewMultiaddrString/95a479f85dc92117 | 2 + .../FuzzNewMultiaddrString/9dba3b166a74fc47 | 2 + .../FuzzNewMultiaddrString/a2b937de623ded67 | 2 + .../FuzzNewMultiaddrString/bc05ef53a41e422a | 2 + .../FuzzNewMultiaddrString/d857c283ff1b2f2a | 2 + .../FuzzNewMultiaddrString/dffb2baac63c66ae | 2 + go-multiaddr/transcoders.go | 491 ++++ go-multiaddr/util.go | 211 ++ go-multiaddr/util_test.go | 151 + go-multiaddr/varint.go | 28 + go-multiaddr/version.json | 3 + node/go.mod | 21 +- node/go.sum | 88 +- 247 files changed, 25394 insertions(+), 980 deletions(-) create mode 100644 go-libp2p-kad-dht/CODEOWNERS create mode 100644 go-libp2p-kad-dht/LICENSE create mode 100644 go-libp2p-kad-dht/README.md create mode 100644 go-libp2p-kad-dht/crawler/crawler.go create mode 100644 go-libp2p-kad-dht/crawler/options.go create mode 100644 go-libp2p-kad-dht/dht.go create mode 100644 go-libp2p-kad-dht/dht_bootstrap.go create mode 100644 go-libp2p-kad-dht/dht_bootstrap_test.go create mode 100644 go-libp2p-kad-dht/dht_filters.go create mode 100644 go-libp2p-kad-dht/dht_filters_test.go create mode 100644 go-libp2p-kad-dht/dht_net.go create mode 100644 go-libp2p-kad-dht/dht_options.go create mode 100644 go-libp2p-kad-dht/dht_test.go create mode 100644 go-libp2p-kad-dht/doc.go create mode 100644 go-libp2p-kad-dht/dual/dual.go create mode 100644 go-libp2p-kad-dht/dual/dual_test.go create mode 100644 go-libp2p-kad-dht/events.go create mode 100644 go-libp2p-kad-dht/ext_test.go create mode 100644 go-libp2p-kad-dht/fullrt/dht.go create mode 100644 go-libp2p-kad-dht/fullrt/dht_test.go create mode 100644 go-libp2p-kad-dht/fullrt/options.go create mode 100644 go-libp2p-kad-dht/go.mod create mode 100644 go-libp2p-kad-dht/go.sum create mode 100644 go-libp2p-kad-dht/handlers.go create mode 100644 go-libp2p-kad-dht/handlers_test.go create mode 100644 go-libp2p-kad-dht/internal/config/config.go create mode 100644 go-libp2p-kad-dht/internal/config/quorum.go create mode 100644 go-libp2p-kad-dht/internal/ctx_mutex.go create mode 100644 go-libp2p-kad-dht/internal/errors.go create mode 100644 go-libp2p-kad-dht/internal/logging.go create mode 100644 go-libp2p-kad-dht/internal/logging_test.go create mode 100644 go-libp2p-kad-dht/internal/net/message_manager.go create mode 100644 go-libp2p-kad-dht/internal/net/message_manager_test.go create mode 100644 go-libp2p-kad-dht/internal/testing/helper.go create mode 100644 go-libp2p-kad-dht/internal/tracing.go create mode 100644 go-libp2p-kad-dht/log_test.go create mode 100644 go-libp2p-kad-dht/lookup.go create mode 100644 go-libp2p-kad-dht/lookup_optim.go create mode 100644 go-libp2p-kad-dht/lookup_optim_test.go create mode 100644 go-libp2p-kad-dht/metrics/metrics.go create mode 100644 go-libp2p-kad-dht/netsize/netsize.go create mode 100644 go-libp2p-kad-dht/netsize/netsize_test.go create mode 100644 go-libp2p-kad-dht/nofile_test.go create mode 100644 go-libp2p-kad-dht/optimizations.md create mode 100644 go-libp2p-kad-dht/opts/options.go create mode 100644 go-libp2p-kad-dht/pb/Makefile create mode 100644 go-libp2p-kad-dht/pb/bytestring.go create mode 100644 go-libp2p-kad-dht/pb/dht.pb.go create mode 100644 go-libp2p-kad-dht/pb/dht.proto create mode 100644 go-libp2p-kad-dht/pb/message.go create mode 100644 go-libp2p-kad-dht/pb/message_test.go create mode 100644 go-libp2p-kad-dht/pb/protocol_messenger.go create mode 100644 go-libp2p-kad-dht/protocol.go create mode 100644 go-libp2p-kad-dht/providers/provider_set.go create mode 100644 go-libp2p-kad-dht/providers/providers_manager.go create mode 100644 go-libp2p-kad-dht/providers/providers_manager_test.go create mode 100644 go-libp2p-kad-dht/qpeerset/qpeerset.go create mode 100644 go-libp2p-kad-dht/qpeerset/qpeerset_test.go create mode 100644 go-libp2p-kad-dht/query.go create mode 100644 go-libp2p-kad-dht/query_test.go create mode 100644 go-libp2p-kad-dht/records.go create mode 100644 go-libp2p-kad-dht/records_test.go create mode 100644 go-libp2p-kad-dht/routing.go create mode 100644 go-libp2p-kad-dht/routing_options.go create mode 100644 go-libp2p-kad-dht/rt_diversity_filter.go create mode 100644 go-libp2p-kad-dht/rt_diversity_filter_test.go create mode 100644 go-libp2p-kad-dht/rtrefresh/rt_refresh_manager.go create mode 100644 go-libp2p-kad-dht/rtrefresh/rt_refresh_manager_test.go create mode 100644 go-libp2p-kad-dht/subscriber_notifee.go create mode 100644 go-libp2p-kad-dht/version.json create mode 100644 go-multiaddr-dns/.gitignore create mode 100644 go-multiaddr-dns/LICENSE create mode 100644 go-multiaddr-dns/README.md create mode 100644 go-multiaddr-dns/go.mod create mode 100644 go-multiaddr-dns/go.sum create mode 100644 go-multiaddr-dns/madns/main.go create mode 100644 go-multiaddr-dns/mock.go create mode 100644 go-multiaddr-dns/resolve.go create mode 100644 go-multiaddr-dns/resolve_test.go create mode 100644 go-multiaddr-dns/util.go create mode 100644 go-multiaddr-dns/version.json create mode 100644 go-multiaddr/.gitignore create mode 100644 go-multiaddr/LICENSE create mode 100644 go-multiaddr/README.md create mode 100644 go-multiaddr/codec.go create mode 100644 go-multiaddr/codecov.yml create mode 100644 go-multiaddr/component.go create mode 100644 go-multiaddr/doc.go create mode 100644 go-multiaddr/filter.go create mode 100644 go-multiaddr/filter_test.go create mode 100644 go-multiaddr/go.mod create mode 100644 go-multiaddr/go.sum create mode 100644 go-multiaddr/interface.go create mode 100644 go-multiaddr/multiaddr.go create mode 100644 go-multiaddr/multiaddr/main.go create mode 100644 go-multiaddr/multiaddr_test.go create mode 100644 go-multiaddr/net/convert.go create mode 100644 go-multiaddr/net/convert_test.go create mode 100644 go-multiaddr/net/doc.go create mode 100644 go-multiaddr/net/ip.go create mode 100644 go-multiaddr/net/ip_test.go create mode 100644 go-multiaddr/net/net.go create mode 100644 go-multiaddr/net/net_test.go create mode 100644 go-multiaddr/net/private.go create mode 100644 go-multiaddr/net/private_test.go create mode 100644 go-multiaddr/net/registry.go create mode 100644 go-multiaddr/net/registry_test.go create mode 100644 go-multiaddr/net/resolve.go create mode 100644 go-multiaddr/net/resolve_test.go create mode 100644 go-multiaddr/package.json create mode 100644 go-multiaddr/protocol.go create mode 100644 go-multiaddr/protocols.go create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/0487b63847656fd4 create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/04a87ae2740f7195 create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/239d3594e0ee93bb create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/2ef0d600700564d4 create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/385d14fbb016b8c3 create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/511b72740453a863 create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/69ba454c4217999e create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/9f0d778549d2b28e create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/af9576bc28339a8d create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/e9317f1a3c43de50 create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/f1ebd17c93085805 create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/382a5bb1eff47833 create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/53eb3b6be337b1d7 create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/63891c9534054d61 create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/95a479f85dc92117 create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/9dba3b166a74fc47 create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/a2b937de623ded67 create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/bc05ef53a41e422a create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/d857c283ff1b2f2a create mode 100644 go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/dffb2baac63c66ae create mode 100644 go-multiaddr/transcoders.go create mode 100644 go-multiaddr/util.go create mode 100644 go-multiaddr/util_test.go create mode 100644 go-multiaddr/varint.go create mode 100644 go-multiaddr/version.json diff --git a/go-libp2p-blossomsub/go.mod b/go-libp2p-blossomsub/go.mod index 1f5963b..4e1e655 100644 --- a/go-libp2p-blossomsub/go.mod +++ b/go-libp2p-blossomsub/go.mod @@ -4,17 +4,19 @@ go 1.21 toolchain go1.22.4 +replace github.com/multiformats/go-multiaddr => ../go-multiaddr + +replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns + replace github.com/libp2p/go-libp2p => ../go-libp2p replace github.com/libp2p/go-libp2p-gostream => ../go-libp2p-gostream require ( github.com/benbjohnson/clock v1.3.5 - github.com/gogo/protobuf v1.3.2 github.com/ipfs/go-log/v2 v2.5.1 github.com/libp2p/go-buffer-pool v0.1.0 github.com/libp2p/go-libp2p v0.35.4 - github.com/libp2p/go-libp2p-gostream v0.6.0 github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-multiaddr v0.12.4 @@ -25,7 +27,7 @@ require ( require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cloudflare/circl v1.3.3 // indirect + github.com/cloudflare/circl v1.3.9 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -37,22 +39,19 @@ require ( github.com/francoispqt/gojay v1.2.13 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/golang/mock v1.6.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/google/uuid v1.4.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huin/goupnp v1.3.0 // indirect - github.com/iden3/go-iden3-crypto v0.0.16 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/klauspost/compress v1.17.8 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/koron/go-ssdp v0.0.4 // indirect - github.com/kr/pretty v0.3.1 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-nat v0.2.0 // indirect @@ -61,7 +60,6 @@ require ( github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/miekg/dns v1.1.58 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect @@ -106,7 +104,6 @@ require ( github.com/raulk/go-watchdog v1.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/stretchr/testify v1.9.0 // indirect - go.uber.org/atomic v1.10.0 // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/fx v1.22.1 // indirect go.uber.org/mock v0.4.0 // indirect diff --git a/go-libp2p-blossomsub/go.sum b/go-libp2p-blossomsub/go.sum index d9e1c38..8996280 100644 --- a/go-libp2p-blossomsub/go.sum +++ b/go-libp2p-blossomsub/go.sum @@ -10,7 +10,6 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGy github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -23,11 +22,9 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.9 h1:QFrlgFYf2Qpi8bSpVPK1HBvWpx16v/1TZivyo7pGuBE= +github.com/cloudflare/circl v1.3.9/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -36,15 +33,13 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -55,7 +50,6 @@ github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0 github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= @@ -64,9 +58,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -81,28 +74,22 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20221203041831-ce31453925ec h1:fR20TYVVwhK4O7r7y+McjRYyaTH6/vjwJOajE+XhlzM= -github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -115,16 +102,11 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4= -github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk= -github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= -github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= -github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= @@ -139,14 +121,12 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= -github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.1 h1:U33DW0aiEj633gHYw3LoDNfkDiYnE5Q8M/TKJn2f2jI= -github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= @@ -164,10 +144,6 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.25.0 h1:ND6Hc6ZYCzC8S++C4mOD7LdPnLXRkNbr12/8FXgUfIo= -github.com/libp2p/go-libp2p v0.25.0/go.mod h1:vXHmFpcfl+xIGN4qW58Bw3a0/SKGAesr5/T4IuJHE3o= -github.com/libp2p/go-libp2p v0.35.4 h1:FDiBUYLkueFwsuNJUZaxKRdpKvBOWU64qQPL768bSeg= -github.com/libp2p/go-libp2p v0.35.4/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= @@ -178,12 +154,8 @@ github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= -github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= -github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= -github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= -github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= @@ -191,17 +163,11 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= @@ -212,57 +178,45 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdn github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= -github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= -github.com/multiformats/go-multiaddr v0.12.4 h1:rrKqpY9h+n80EwhhC/kkcunCZZ7URIF8yN1WEUt2Hvc= -github.com/multiformats/go-multiaddr v0.12.4/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= -github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multicodec v0.7.0 h1:rTUjGOwjlhGHbEMbPoSUJowG1spZTVsITRANCjKTUAQ= -github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= -github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= -github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-multistream v0.4.0 h1:5i4JbawClkbuaX+mIVXiHQYVPxUW+zjv6w7jtSRukxc= -github.com/multiformats/go-multistream v0.4.0/go.mod h1:BS6ZSYcA4NwYEaIMeCtpJydp2Dc+fNRA6uJMSu/m8+4= github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= -github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw= -github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= -github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -306,57 +260,40 @@ github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLh github.com/pion/transport/v2 v2.2.5 h1:iyi25i/21gQck4hfRhomF6SktmUQjRsRW4WJdhfc3Kc= github.com/pion/transport/v2 v2.2.5/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= +github.com/pion/transport/v3 v3.0.2 h1:r+40RJR25S9w3jbA6/5uEPTzcdn7ncyU44RWCbHkLg4= github.com/pion/transport/v3 v3.0.2/go.mod h1:nIToODoOlb5If2jF9y2Igfx3PFYWfuXi37m0IlWa/D0= github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/webrtc/v3 v3.2.40 h1:Wtfi6AZMQg+624cvCXUuSmrKWepSB7zfgYDOYqsSOVU= github.com/pion/webrtc/v3 v3.2.40/go.mod h1:M1RAe3TNTD1tzyvqHrbVODfwdPGSXOUo/OgpoGGJqFY= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-18 v0.2.0 h1:5ViXqBZ90wpUcZS0ge79rf029yx0dYB0McyPJwqqj7U= -github.com/quic-go/qtls-go1-18 v0.2.0/go.mod h1:moGulGHK7o6O8lSPSZNoOwcLvJKJ85vVNc7oJFD65bc= -github.com/quic-go/qtls-go1-19 v0.2.0 h1:Cvn2WdhyViFUHoOqK52i51k4nDX8EwIh5VJiVM4nttk= -github.com/quic-go/qtls-go1-19 v0.2.0/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= -github.com/quic-go/qtls-go1-20 v0.1.0 h1:d1PK3ErFy9t7zxKsG3NXBJXZjp/kMLoIb3y/kV54oAI= -github.com/quic-go/qtls-go1-20 v0.1.0/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= -github.com/quic-go/quic-go v0.32.0 h1:lY02md31s1JgPiiyfqJijpu/UX/Iun304FI3yUqX7tA= -github.com/quic-go/quic-go v0.32.0/go.mod h1:/fCsKANhQIeD5l76c2JFU+07gVE3KaA0FP+0zMWwfwo= github.com/quic-go/quic-go v0.44.0 h1:So5wOr7jyO4vzL2sd8/pD9Kesciv91zSk8BoFngItQ0= github.com/quic-go/quic-go v0.44.0/go.mod h1:z4cx/9Ny9UtGITIPzmPTXh1ULfOyWh4qGQlpnPcWmek= -github.com/quic-go/webtransport-go v0.5.1 h1:1eVb7WDWCRoaeTtFHpFBJ6WDN1bSrPrRoW6tZgSw0Ow= github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -395,13 +332,10 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= @@ -416,24 +350,19 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= @@ -447,8 +376,8 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= -golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= @@ -458,8 +387,7 @@ golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOM golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -472,9 +400,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -493,11 +421,10 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= @@ -521,7 +448,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= @@ -546,9 +472,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -562,6 +487,7 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= @@ -575,8 +501,8 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= @@ -585,6 +511,8 @@ golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -596,13 +524,10 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -625,10 +550,6 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -642,16 +563,15 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= -lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/go-libp2p-kad-dht/CODEOWNERS b/go-libp2p-kad-dht/CODEOWNERS new file mode 100644 index 0000000..43a81df --- /dev/null +++ b/go-libp2p-kad-dht/CODEOWNERS @@ -0,0 +1,15 @@ +# CODEOWNERS + +# default owner is the libp2p team +*.go @libp2p/go-libp2p-maintainers @guillaumemichel +/pb/ @libp2p/go-libp2p-maintainers @guillaumemichel + +# dual is an application for IPFS +/dual/ @libp2p/kubo-maintainers @guillaumemichel +# fullrt is IPFS specific +/fullrt/ @libp2p/kubo-maintainers @guillaumemichel +# providers describe the IPFS specific providers +/providers/ @libp2p/kubo-maintainers @guillaumemichel +# records are IPFS specific +/records.go @libp2p/kubo-maintainers @guillaumemichel +/records_test.go @libp2p/kubo-maintainers @guillaumemichel diff --git a/go-libp2p-kad-dht/LICENSE b/go-libp2p-kad-dht/LICENSE new file mode 100644 index 0000000..0e32302 --- /dev/null +++ b/go-libp2p-kad-dht/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Protocol Labs, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/go-libp2p-kad-dht/README.md b/go-libp2p-kad-dht/README.md new file mode 100644 index 0000000..b9b0a69 --- /dev/null +++ b/go-libp2p-kad-dht/README.md @@ -0,0 +1,51 @@ +# go-libp2p-kad-dht + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) +[![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io) +[![GoDoc](https://godoc.org/github.com/libp2p/go-libp2p-kad-dht?status.svg)](https://godoc.org/github.com/libp2p/go-libp2p-kad-dht) +[![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io) + +> A Go implementation of [libp2p Kademlia DHT specification](https://github.com/libp2p/specs/tree/master/kad-dht) + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [Optimizations](#optimizations) +- [Contribute](#contribute) +- [Maintainers](#maintainers) +- [License](#license) + +## Install + +```sh +go get github.com/libp2p/go-libp2p-kad-dht +``` + +## Optimizations + +Client-side optimizations are described in [optimizations.md](./optimizations.md) + +## Usage + +Go to https://godoc.org/github.com/libp2p/go-libp2p-kad-dht. + +## Contribute + +Contributions welcome. Please check out [the issues](https://github.com/libp2p/go-libp2p-kad-dht/issues). + +Check out our [contributing document](https://github.com/libp2p/community/blob/master/CONTRIBUTE.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to libp2p are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + + + +## Maintainers + +- [@ipfs/kubo-maintainers](https://github.com/orgs/ipfs/teams/kubo-maintainers) +- [@libp2p/go-libp2p-maintainers](https://github.com/orgs/libp2p/teams/go-libp2p-maintainers) +- [@guillaumemichel](https://github.com/guillaumemichel) + +See [CODEOWNERS](./CODEOWNERS). + +## License + +[MIT](LICENSE) © Protocol Labs Inc. diff --git a/go-libp2p-kad-dht/crawler/crawler.go b/go-libp2p-kad-dht/crawler/crawler.go new file mode 100644 index 0000000..e055c54 --- /dev/null +++ b/go-libp2p-kad-dht/crawler/crawler.go @@ -0,0 +1,266 @@ +package crawler + +import ( + "context" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + logging "github.com/ipfs/go-log/v2" + //lint:ignore SA1019 TODO migrate away from gogo pb + "github.com/libp2p/go-msgio/protoio" + + pb "github.com/libp2p/go-libp2p-kad-dht/pb" + kbucket "github.com/libp2p/go-libp2p-kbucket" +) + +var ( + logger = logging.Logger("dht-crawler") + + _ Crawler = (*DefaultCrawler)(nil) +) + +type ( + // Crawler connects to hosts in the DHT to track routing tables of peers. + Crawler interface { + // Run crawls the DHT starting from the startingPeers, and calls either handleSuccess or handleFail depending on whether a peer was successfully contacted or not. + Run(ctx context.Context, startingPeers []*peer.AddrInfo, handleSuccess HandleQueryResult, handleFail HandleQueryFail) + } + // DefaultCrawler provides a default implementation of Crawler. + DefaultCrawler struct { + parallelism int + connectTimeout time.Duration + host host.Host + dhtRPC *pb.ProtocolMessenger + dialAddressExtendDur time.Duration + } +) + +// NewDefaultCrawler creates a new DefaultCrawler +func NewDefaultCrawler(host host.Host, opts ...Option) (*DefaultCrawler, error) { + o := new(options) + if err := defaults(o); err != nil { + return nil, err + } + for _, opt := range opts { + if err := opt(o); err != nil { + return nil, err + } + } + + pm, err := pb.NewProtocolMessenger(&messageSender{h: host, protocols: o.protocols, timeout: o.perMsgTimeout}) + if err != nil { + return nil, err + } + + return &DefaultCrawler{ + parallelism: o.parallelism, + connectTimeout: o.connectTimeout, + host: host, + dhtRPC: pm, + dialAddressExtendDur: o.dialAddressExtendDur, + }, nil +} + +// MessageSender handles sending wire protocol messages to a given peer +type messageSender struct { + h host.Host + protocols []protocol.ID + timeout time.Duration +} + +// SendRequest sends a peer a message and waits for its response +func (ms *messageSender) SendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { + s, err := ms.h.NewStream(ctx, p, ms.protocols...) + if err != nil { + return nil, err + } + + w := protoio.NewDelimitedWriter(s) + if err := w.WriteMsg(pmes); err != nil { + return nil, err + } + + r := protoio.NewDelimitedReader(s, network.MessageSizeMax) + tctx, cancel := context.WithTimeout(ctx, ms.timeout) + defer cancel() + defer func() { _ = s.Close() }() + + msg := new(pb.Message) + if err := ctxReadMsg(tctx, r, msg); err != nil { + _ = s.Reset() + return nil, err + } + + return msg, nil +} + +func ctxReadMsg(ctx context.Context, rc protoio.ReadCloser, mes *pb.Message) error { + errc := make(chan error, 1) + go func(r protoio.ReadCloser) { + defer close(errc) + err := r.ReadMsg(mes) + errc <- err + }(rc) + + select { + case err := <-errc: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// SendMessage sends a peer a message without waiting on a response +func (ms *messageSender) SendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error { + s, err := ms.h.NewStream(ctx, p, ms.protocols...) + if err != nil { + return err + } + defer func() { _ = s.Close() }() + + w := protoio.NewDelimitedWriter(s) + return w.WriteMsg(pmes) +} + +// HandleQueryResult is a callback on successful peer query +type HandleQueryResult func(p peer.ID, rtPeers []*peer.AddrInfo) + +// HandleQueryFail is a callback on failed peer query +type HandleQueryFail func(p peer.ID, err error) + +// Run crawls dht peers from an initial seed of `startingPeers` +func (c *DefaultCrawler) Run(ctx context.Context, startingPeers []*peer.AddrInfo, handleSuccess HandleQueryResult, handleFail HandleQueryFail) { + jobs := make(chan peer.ID, 1) + results := make(chan *queryResult, 1) + + // Start worker goroutines + var wg sync.WaitGroup + wg.Add(c.parallelism) + for i := 0; i < c.parallelism; i++ { + go func() { + defer wg.Done() + for p := range jobs { + res := c.queryPeer(ctx, p) + results <- res + } + }() + } + + defer wg.Wait() + defer close(jobs) + + var toDial []*peer.AddrInfo + peersSeen := make(map[peer.ID]struct{}) + + numSkipped := 0 + for _, ai := range startingPeers { + extendAddrs := c.host.Peerstore().Addrs(ai.ID) + if len(ai.Addrs) > 0 { + extendAddrs = append(extendAddrs, ai.Addrs...) + c.host.Peerstore().AddAddrs(ai.ID, extendAddrs, c.dialAddressExtendDur) + } + if len(extendAddrs) == 0 { + numSkipped++ + continue + } + + toDial = append(toDial, ai) + peersSeen[ai.ID] = struct{}{} + } + + if numSkipped > 0 { + logger.Infof("%d starting peers were skipped due to lack of addresses. Starting crawl with %d peers", numSkipped, len(toDial)) + } + + numQueried := 0 + outstanding := 0 + + for len(toDial) > 0 || outstanding > 0 { + var jobCh chan peer.ID + var nextPeerID peer.ID + if len(toDial) > 0 { + jobCh = jobs + nextPeerID = toDial[0].ID + } + + select { + case res := <-results: + if len(res.data) > 0 { + logger.Debugf("peer %v had %d peers", res.peer, len(res.data)) + rtPeers := make([]*peer.AddrInfo, 0, len(res.data)) + for p, ai := range res.data { + c.host.Peerstore().AddAddrs(p, ai.Addrs, c.dialAddressExtendDur) + if _, ok := peersSeen[p]; !ok { + peersSeen[p] = struct{}{} + toDial = append(toDial, ai) + } + rtPeers = append(rtPeers, ai) + } + if handleSuccess != nil { + handleSuccess(res.peer, rtPeers) + } + } else if handleFail != nil { + handleFail(res.peer, res.err) + } + outstanding-- + case jobCh <- nextPeerID: + outstanding++ + numQueried++ + toDial = toDial[1:] + logger.Debugf("starting %d out of %d", numQueried, len(peersSeen)) + } + } +} + +type queryResult struct { + peer peer.ID + data map[peer.ID]*peer.AddrInfo + err error +} + +func (c *DefaultCrawler) queryPeer(ctx context.Context, nextPeer peer.ID) *queryResult { + tmpRT, err := kbucket.NewRoutingTable(20, kbucket.ConvertPeerID(nextPeer), time.Hour, c.host.Peerstore(), time.Hour, nil) + if err != nil { + logger.Errorf("error creating rt for peer %v : %v", nextPeer, err) + return &queryResult{nextPeer, nil, err} + } + + connCtx, cancel := context.WithTimeout(ctx, c.connectTimeout) + defer cancel() + err = c.host.Connect(connCtx, peer.AddrInfo{ID: nextPeer}) + if err != nil { + logger.Debugf("could not connect to peer %v: %v", nextPeer, err) + return &queryResult{nextPeer, nil, err} + } + + localPeers := make(map[peer.ID]*peer.AddrInfo) + var retErr error + for cpl := 0; cpl <= 15; cpl++ { + generatePeer, err := tmpRT.GenRandPeerID(uint(cpl)) + if err != nil { + panic(err) + } + peers, err := c.dhtRPC.GetClosestPeers(ctx, nextPeer, generatePeer) + if err != nil { + logger.Debugf("error finding data on peer %v with cpl %d : %v", nextPeer, cpl, err) + retErr = err + break + } + for _, ai := range peers { + if _, ok := localPeers[ai.ID]; !ok { + localPeers[ai.ID] = ai + } + } + } + + if retErr != nil { + return &queryResult{nextPeer, nil, retErr} + } + + return &queryResult{nextPeer, localPeers, retErr} +} diff --git a/go-libp2p-kad-dht/crawler/options.go b/go-libp2p-kad-dht/crawler/options.go new file mode 100644 index 0000000..8c7602f --- /dev/null +++ b/go-libp2p-kad-dht/crawler/options.go @@ -0,0 +1,72 @@ +package crawler + +import ( + "time" + + "github.com/libp2p/go-libp2p/core/protocol" +) + +// Option DHT Crawler option type. +type Option func(*options) error + +type options struct { + protocols []protocol.ID + parallelism int + connectTimeout time.Duration + perMsgTimeout time.Duration + dialAddressExtendDur time.Duration +} + +// defaults are the default crawler options. This option will be automatically +// prepended to any options you pass to the crawler constructor. +var defaults = func(o *options) error { + o.protocols = []protocol.ID{"/ipfs/kad/1.0.0"} + o.parallelism = 1000 + o.connectTimeout = time.Second * 5 + o.perMsgTimeout = time.Second * 5 + o.dialAddressExtendDur = time.Minute * 30 + + return nil +} + +// WithProtocols defines the ordered set of protocols the crawler will use to talk to other nodes +func WithProtocols(protocols []protocol.ID) Option { + return func(o *options) error { + o.protocols = append([]protocol.ID{}, protocols...) + return nil + } +} + +// WithParallelism defines the number of queries that can be issued in parallel +func WithParallelism(parallelism int) Option { + return func(o *options) error { + o.parallelism = parallelism + return nil + } +} + +// WithMsgTimeout defines the amount of time a single DHT message is allowed to take before it's deemed failed +func WithMsgTimeout(timeout time.Duration) Option { + return func(o *options) error { + o.perMsgTimeout = timeout + return nil + } +} + +// WithConnectTimeout defines the time for peer connection before timing out +func WithConnectTimeout(timeout time.Duration) Option { + return func(o *options) error { + o.connectTimeout = timeout + return nil + } +} + +// WithDialAddrExtendDuration sets the duration by which the TTL of dialed address in peer store are +// extended. +// Defaults to 30 minutes if unset. +func WithDialAddrExtendDuration(ext time.Duration) Option { + return func(o *options) error { + o.dialAddressExtendDur = ext + return nil + } +} diff --git a/go-libp2p-kad-dht/dht.go b/go-libp2p-kad-dht/dht.go new file mode 100644 index 0000000..f7ac66d --- /dev/null +++ b/go-libp2p-kad-dht/dht.go @@ -0,0 +1,942 @@ +package dht + +import ( + "context" + "fmt" + "math" + "math/rand" + "sync" + "time" + + "github.com/libp2p/go-libp2p-routing-helpers/tracing" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/routing" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/libp2p/go-libp2p-kad-dht/internal" + dhtcfg "github.com/libp2p/go-libp2p-kad-dht/internal/config" + "github.com/libp2p/go-libp2p-kad-dht/internal/net" + "github.com/libp2p/go-libp2p-kad-dht/metrics" + "github.com/libp2p/go-libp2p-kad-dht/netsize" + pb "github.com/libp2p/go-libp2p-kad-dht/pb" + "github.com/libp2p/go-libp2p-kad-dht/providers" + "github.com/libp2p/go-libp2p-kad-dht/rtrefresh" + kb "github.com/libp2p/go-libp2p-kbucket" + "github.com/libp2p/go-libp2p-kbucket/peerdiversity" + record "github.com/libp2p/go-libp2p-record" + recpb "github.com/libp2p/go-libp2p-record/pb" + + "github.com/gogo/protobuf/proto" + ds "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + "github.com/multiformats/go-base32" + ma "github.com/multiformats/go-multiaddr" + "go.opencensus.io/tag" + "go.uber.org/multierr" + "go.uber.org/zap" +) + +const tracer = tracing.Tracer("go-libp2p-kad-dht") +const dhtName = "IpfsDHT" + +var ( + logger = logging.Logger("dht") + baseLogger = logger.Desugar() + + rtFreezeTimeout = 1 * time.Minute +) + +const ( + // BaseConnMgrScore is the base of the score set on the connection + // manager "kbucket" tag. It is added with the common prefix length + // between two peer IDs. + baseConnMgrScore = 5 +) + +type mode int + +const ( + modeServer mode = iota + 1 + modeClient +) + +const ( + kad1 protocol.ID = "/kad/1.0.0" +) + +const ( + kbucketTag = "kbucket" + protectedBuckets = 2 +) + +// IpfsDHT is an implementation of Kademlia with S/Kademlia modifications. +// It is used to implement the base Routing module. +type IpfsDHT struct { + host host.Host // the network services we need + self peer.ID // Local peer (yourself) + selfKey kb.ID + peerstore peerstore.Peerstore // Peer Registry + + datastore ds.Datastore // Local data + + routingTable *kb.RoutingTable // Array of routing tables for differently distanced nodes + // providerStore stores & manages the provider records for this Dht peer. + providerStore providers.ProviderStore + + // manages Routing Table refresh + rtRefreshManager *rtrefresh.RtRefreshManager + + birth time.Time // When this peer started up + + Validator record.Validator + + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + protoMessenger *pb.ProtocolMessenger + msgSender pb.MessageSenderWithDisconnect + + stripedPutLocks [256]sync.Mutex + + // DHT protocols we query with. We'll only add peers to our routing + // table if they speak these protocols. + protocols []protocol.ID + + // DHT protocols we can respond to. + serverProtocols []protocol.ID + + auto ModeOpt + mode mode + modeLk sync.Mutex + + bucketSize int + alpha int // The concurrency parameter per path + beta int // The number of peers closest to a target that must have responded for a query path to terminate + + queryPeerFilter QueryFilterFunc + routingTablePeerFilter RouteTableFilterFunc + rtPeerDiversityFilter peerdiversity.PeerIPGroupFilter + + autoRefresh bool + + // timeout for the lookupCheck operation + lookupCheckTimeout time.Duration + // number of concurrent lookupCheck operations + lookupCheckCapacity int + lookupChecksLk sync.Mutex + + // A function returning a set of bootstrap peers to fallback on if all other attempts to fix + // the routing table fail (or, e.g., this is the first time this node is + // connecting to the network). + bootstrapPeers func() []peer.AddrInfo + + maxRecordAge time.Duration + + // Allows disabling dht subsystems. These should _only_ be set on + // "forked" DHTs (e.g., DHTs with custom protocols and/or private + // networks). + enableProviders, enableValues bool + + disableFixLowPeers bool + fixLowPeersChan chan struct{} + + addPeerToRTChan chan peer.ID + refreshFinishedCh chan struct{} + + rtFreezeTimeout time.Duration + + // network size estimator + nsEstimator *netsize.Estimator + enableOptProv bool + + // a bound channel to limit asynchronicity of in-flight ADD_PROVIDER RPCs + optProvJobsPool chan struct{} + + // configuration variables for tests + testAddressUpdateProcessing bool + + // addrFilter is used to filter the addresses we put into the peer store. + // Mostly used to filter out localhost and local addresses. + addrFilter func([]ma.Multiaddr) []ma.Multiaddr +} + +// Assert that IPFS assumptions about interfaces aren't broken. These aren't a +// guarantee, but we can use them to aid refactoring. +var ( + _ routing.ContentRouting = (*IpfsDHT)(nil) + _ routing.Routing = (*IpfsDHT)(nil) + _ routing.PeerRouting = (*IpfsDHT)(nil) + _ routing.PubKeyFetcher = (*IpfsDHT)(nil) + _ routing.ValueStore = (*IpfsDHT)(nil) +) + +// New creates a new DHT with the specified host and options. +// Please note that being connected to a DHT peer does not necessarily imply that it's also in the DHT Routing Table. +// If the Routing Table has more than "minRTRefreshThreshold" peers, we consider a peer as a Routing Table candidate ONLY when +// we successfully get a query response from it OR if it send us a query. +func New(ctx context.Context, h host.Host, options ...Option) (*IpfsDHT, error) { + var cfg dhtcfg.Config + if err := cfg.Apply(append([]Option{dhtcfg.Defaults}, options...)...); err != nil { + return nil, err + } + if err := cfg.ApplyFallbacks(h); err != nil { + return nil, err + } + + if err := cfg.Validate(); err != nil { + return nil, err + } + + dht, err := makeDHT(h, cfg) + if err != nil { + return nil, fmt.Errorf("failed to create DHT, err=%s", err) + } + + dht.autoRefresh = cfg.RoutingTable.AutoRefresh + + dht.maxRecordAge = cfg.MaxRecordAge + dht.enableProviders = cfg.EnableProviders + dht.enableValues = cfg.EnableValues + dht.disableFixLowPeers = cfg.DisableFixLowPeers + + dht.Validator = cfg.Validator + dht.msgSender = net.NewMessageSenderImpl(h, dht.protocols) + dht.protoMessenger, err = pb.NewProtocolMessenger(dht.msgSender) + if err != nil { + return nil, err + } + + dht.testAddressUpdateProcessing = cfg.TestAddressUpdateProcessing + + dht.auto = cfg.Mode + switch cfg.Mode { + case ModeAuto, ModeClient: + dht.mode = modeClient + case ModeAutoServer, ModeServer: + dht.mode = modeServer + default: + return nil, fmt.Errorf("invalid dht mode %d", cfg.Mode) + } + + if dht.mode == modeServer { + if err := dht.moveToServerMode(); err != nil { + return nil, err + } + } + + // register for event bus and network notifications + if err := dht.startNetworkSubscriber(); err != nil { + return nil, err + } + + // go-routine to make sure we ALWAYS have RT peer addresses in the peerstore + // since RT membership is decoupled from connectivity + go dht.persistRTPeersInPeerStore() + + dht.rtPeerLoop() + + // Fill routing table with currently connected peers that are DHT servers + for _, p := range dht.host.Network().Peers() { + dht.peerFound(p) + } + + dht.rtRefreshManager.Start() + + // listens to the fix low peers chan and tries to fix the Routing Table + if !dht.disableFixLowPeers { + dht.runFixLowPeersLoop() + } + + return dht, nil +} + +// NewDHT creates a new DHT object with the given peer as the 'local' host. +// IpfsDHT's initialized with this function will respond to DHT requests, +// whereas IpfsDHT's initialized with NewDHTClient will not. +func NewDHT(ctx context.Context, h host.Host, dstore ds.Batching) *IpfsDHT { + dht, err := New(ctx, h, Datastore(dstore)) + if err != nil { + panic(err) + } + return dht +} + +// NewDHTClient creates a new DHT object with the given peer as the 'local' +// host. IpfsDHT clients initialized with this function will not respond to DHT +// requests. If you need a peer to respond to DHT requests, use NewDHT instead. +func NewDHTClient(ctx context.Context, h host.Host, dstore ds.Batching) *IpfsDHT { + dht, err := New(ctx, h, Datastore(dstore), Mode(ModeClient)) + if err != nil { + panic(err) + } + return dht +} + +func makeDHT(h host.Host, cfg dhtcfg.Config) (*IpfsDHT, error) { + var protocols, serverProtocols []protocol.ID + + v1proto := cfg.ProtocolPrefix + kad1 + + if cfg.V1ProtocolOverride != "" { + v1proto = cfg.V1ProtocolOverride + } + + protocols = []protocol.ID{v1proto} + serverProtocols = []protocol.ID{v1proto} + + dht := &IpfsDHT{ + datastore: cfg.Datastore, + self: h.ID(), + selfKey: kb.ConvertPeerID(h.ID()), + peerstore: h.Peerstore(), + host: h, + birth: time.Now(), + protocols: protocols, + serverProtocols: serverProtocols, + bucketSize: cfg.BucketSize, + alpha: cfg.Concurrency, + beta: cfg.Resiliency, + lookupCheckCapacity: cfg.LookupCheckConcurrency, + queryPeerFilter: cfg.QueryPeerFilter, + routingTablePeerFilter: cfg.RoutingTable.PeerFilter, + rtPeerDiversityFilter: cfg.RoutingTable.DiversityFilter, + addrFilter: cfg.AddressFilter, + + fixLowPeersChan: make(chan struct{}, 1), + + addPeerToRTChan: make(chan peer.ID), + refreshFinishedCh: make(chan struct{}), + + enableOptProv: cfg.EnableOptimisticProvide, + optProvJobsPool: nil, + } + + var maxLastSuccessfulOutboundThreshold time.Duration + + // The threshold is calculated based on the expected amount of time that should pass before we + // query a peer as part of our refresh cycle. + // To grok the Math Wizardy that produced these exact equations, please be patient as a document explaining it will + // be published soon. + if cfg.Concurrency < cfg.BucketSize { // (alpha < K) + l1 := math.Log(float64(1) / float64(cfg.BucketSize)) // (Log(1/K)) + l2 := math.Log(float64(1) - (float64(cfg.Concurrency) / float64(cfg.BucketSize))) // Log(1 - (alpha / K)) + maxLastSuccessfulOutboundThreshold = time.Duration(l1 / l2 * float64(cfg.RoutingTable.RefreshInterval)) + } else { + maxLastSuccessfulOutboundThreshold = cfg.RoutingTable.RefreshInterval + } + + // construct routing table + // use twice the theoritical usefulness threhold to keep older peers around longer + rt, err := makeRoutingTable(dht, cfg, 2*maxLastSuccessfulOutboundThreshold) + if err != nil { + return nil, fmt.Errorf("failed to construct routing table,err=%s", err) + } + dht.routingTable = rt + dht.bootstrapPeers = cfg.BootstrapPeers + + dht.lookupCheckTimeout = cfg.RoutingTable.RefreshQueryTimeout + + // init network size estimator + dht.nsEstimator = netsize.NewEstimator(h.ID(), rt, cfg.BucketSize) + + if dht.enableOptProv { + dht.optProvJobsPool = make(chan struct{}, cfg.OptimisticProvideJobsPoolSize) + } + + // rt refresh manager + dht.rtRefreshManager, err = makeRtRefreshManager(dht, cfg, maxLastSuccessfulOutboundThreshold) + if err != nil { + return nil, fmt.Errorf("failed to construct RT Refresh Manager,err=%s", err) + } + + // create a tagged context derived from the original context + // the DHT context should be done when the process is closed + dht.ctx, dht.cancel = context.WithCancel(dht.newContextWithLocalTags(context.Background())) + + if cfg.ProviderStore != nil { + dht.providerStore = cfg.ProviderStore + } else { + dht.providerStore, err = providers.NewProviderManager(h.ID(), dht.peerstore, cfg.Datastore) + if err != nil { + return nil, fmt.Errorf("initializing default provider manager (%v)", err) + } + } + + dht.rtFreezeTimeout = rtFreezeTimeout + + return dht, nil +} + +// lookupCheck performs a lookup request to a remote peer.ID, verifying that it is able to +// answer it correctly +func (dht *IpfsDHT) lookupCheck(ctx context.Context, p peer.ID) error { + // lookup request to p requesting for its own peer.ID + peerids, err := dht.protoMessenger.GetClosestPeers(ctx, p, p) + // p is expected to return at least 1 peer id, unless our routing table has + // less than bucketSize peers, in which case we aren't picky about who we + // add to the routing table. + if err == nil && len(peerids) == 0 && dht.routingTable.Size() >= dht.bucketSize { + return fmt.Errorf("peer %s failed to return its closest peers, got %d", p, len(peerids)) + } + return err +} + +func makeRtRefreshManager(dht *IpfsDHT, cfg dhtcfg.Config, maxLastSuccessfulOutboundThreshold time.Duration) (*rtrefresh.RtRefreshManager, error) { + keyGenFnc := func(cpl uint) (string, error) { + p, err := dht.routingTable.GenRandPeerID(cpl) + return string(p), err + } + + queryFnc := func(ctx context.Context, key string) error { + _, err := dht.GetClosestPeers(ctx, key) + return err + } + + r, err := rtrefresh.NewRtRefreshManager( + dht.host, dht.routingTable, cfg.RoutingTable.AutoRefresh, + keyGenFnc, + queryFnc, + dht.lookupCheck, + cfg.RoutingTable.RefreshQueryTimeout, + cfg.RoutingTable.RefreshInterval, + maxLastSuccessfulOutboundThreshold, + dht.refreshFinishedCh) + + return r, err +} + +func makeRoutingTable(dht *IpfsDHT, cfg dhtcfg.Config, maxLastSuccessfulOutboundThreshold time.Duration) (*kb.RoutingTable, error) { + // make a Routing Table Diversity Filter + var filter *peerdiversity.Filter + if dht.rtPeerDiversityFilter != nil { + df, err := peerdiversity.NewFilter(dht.rtPeerDiversityFilter, "rt/diversity", func(p peer.ID) int { + return kb.CommonPrefixLen(dht.selfKey, kb.ConvertPeerID(p)) + }) + + if err != nil { + return nil, fmt.Errorf("failed to construct peer diversity filter: %w", err) + } + + filter = df + } + + rt, err := kb.NewRoutingTable(cfg.BucketSize, dht.selfKey, time.Minute, dht.host.Peerstore(), maxLastSuccessfulOutboundThreshold, filter) + if err != nil { + return nil, err + } + + cmgr := dht.host.ConnManager() + + rt.PeerAdded = func(p peer.ID) { + commonPrefixLen := kb.CommonPrefixLen(dht.selfKey, kb.ConvertPeerID(p)) + if commonPrefixLen < protectedBuckets { + cmgr.Protect(p, kbucketTag) + } else { + cmgr.TagPeer(p, kbucketTag, baseConnMgrScore) + } + } + rt.PeerRemoved = func(p peer.ID) { + cmgr.Unprotect(p, kbucketTag) + cmgr.UntagPeer(p, kbucketTag) + + // try to fix the RT + dht.fixRTIfNeeded() + } + + return rt, err +} + +// ProviderStore returns the provider storage object for storing and retrieving provider records. +func (dht *IpfsDHT) ProviderStore() providers.ProviderStore { + return dht.providerStore +} + +// GetRoutingTableDiversityStats returns the diversity stats for the Routing Table. +func (dht *IpfsDHT) GetRoutingTableDiversityStats() []peerdiversity.CplDiversityStats { + return dht.routingTable.GetDiversityStats() +} + +// Mode allows introspection of the operation mode of the DHT +func (dht *IpfsDHT) Mode() ModeOpt { + return dht.auto +} + +// runFixLowPeersLoop manages simultaneous requests to fixLowPeers +func (dht *IpfsDHT) runFixLowPeersLoop() { + dht.wg.Add(1) + go func() { + defer dht.wg.Done() + + dht.fixLowPeers() + + ticker := time.NewTicker(periodicBootstrapInterval) + defer ticker.Stop() + + for { + select { + case <-dht.fixLowPeersChan: + case <-ticker.C: + case <-dht.ctx.Done(): + return + } + + dht.fixLowPeers() + } + }() +} + +// fixLowPeers tries to get more peers into the routing table if we're below the threshold +func (dht *IpfsDHT) fixLowPeers() { + if dht.routingTable.Size() > minRTRefreshThreshold { + return + } + + // we try to add all peers we are connected to to the Routing Table + // in case they aren't already there. + for _, p := range dht.host.Network().Peers() { + dht.peerFound(p) + } + + // TODO Active Bootstrapping + // We should first use non-bootstrap peers we knew of from previous + // snapshots of the Routing Table before we connect to the bootstrappers. + // See https://github.com/libp2p/go-libp2p-kad-dht/issues/387. + if dht.routingTable.Size() == 0 && dht.bootstrapPeers != nil { + bootstrapPeers := dht.bootstrapPeers() + if len(bootstrapPeers) == 0 { + // No point in continuing, we have no peers! + return + } + + found := 0 + for _, i := range rand.Perm(len(bootstrapPeers)) { + ai := bootstrapPeers[i] + err := dht.Host().Connect(dht.ctx, ai) + if err == nil { + found++ + } else { + logger.Warnw("failed to bootstrap", "peer", ai.ID, "error", err) + } + + // Wait for two bootstrap peers, or try them all. + // + // Why two? In theory, one should be enough + // normally. However, if the network were to + // restart and everyone connected to just one + // bootstrapper, we'll end up with a mostly + // partitioned network. + // + // So we always bootstrap with two random peers. + if found == maxNBoostrappers { + break + } + } + } + + // if we still don't have peers in our routing table(probably because Identify hasn't completed), + // there is no point in triggering a Refresh. + if dht.routingTable.Size() == 0 { + return + } + + if dht.autoRefresh { + dht.rtRefreshManager.RefreshNoWait() + } +} + +// TODO This is hacky, horrible and the programmer needs to have his mother called a hamster. +// SHOULD be removed once https://github.com/libp2p/go-libp2p/issues/800 goes in. +func (dht *IpfsDHT) persistRTPeersInPeerStore() { + tickr := time.NewTicker(peerstore.RecentlyConnectedAddrTTL / 3) + defer tickr.Stop() + + for { + select { + case <-tickr.C: + ps := dht.routingTable.ListPeers() + for _, p := range ps { + dht.peerstore.UpdateAddrs(p, peerstore.RecentlyConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL) + } + case <-dht.ctx.Done(): + return + } + } +} + +// getLocal attempts to retrieve the value from the datastore. +// +// returns nil, nil when either nothing is found or the value found doesn't properly validate. +// returns nil, some_error when there's a *datastore* error (i.e., something goes very wrong) +func (dht *IpfsDHT) getLocal(ctx context.Context, key string) (*recpb.Record, error) { + logger.Debugw("finding value in datastore", "key", internal.LoggableRecordKeyString(key)) + + rec, err := dht.getRecordFromDatastore(ctx, mkDsKey(key)) + if err != nil { + logger.Warnw("get local failed", "key", internal.LoggableRecordKeyString(key), "error", err) + return nil, err + } + + // Double check the key. Can't hurt. + if rec != nil && string(rec.GetKey()) != key { + logger.Errorw("BUG: found a DHT record that didn't match it's key", "expected", internal.LoggableRecordKeyString(key), "got", rec.GetKey()) + return nil, nil + + } + return rec, nil +} + +// putLocal stores the key value pair in the datastore +func (dht *IpfsDHT) putLocal(ctx context.Context, key string, rec *recpb.Record) error { + data, err := proto.Marshal(rec) + if err != nil { + logger.Warnw("failed to put marshal record for local put", "error", err, "key", internal.LoggableRecordKeyString(key)) + return err + } + + return dht.datastore.Put(ctx, mkDsKey(key), data) +} + +func (dht *IpfsDHT) rtPeerLoop() { + dht.wg.Add(1) + go func() { + defer dht.wg.Done() + + var bootstrapCount uint + var isBootsrapping bool + var timerCh <-chan time.Time + + for { + select { + case <-timerCh: + dht.routingTable.MarkAllPeersIrreplaceable() + case p := <-dht.addPeerToRTChan: + if dht.routingTable.Size() == 0 { + isBootsrapping = true + bootstrapCount = 0 + timerCh = nil + } + // queryPeer set to true as we only try to add queried peers to the RT + newlyAdded, err := dht.routingTable.TryAddPeer(p, true, isBootsrapping) + if err != nil { + // peer not added. + continue + } + if newlyAdded { + // peer was added to the RT, it can now be fixed if needed. + dht.fixRTIfNeeded() + } else { + // the peer is already in our RT, but we just successfully queried it and so let's give it a + // bump on the query time so we don't ping it too soon for a liveliness check. + dht.routingTable.UpdateLastSuccessfulOutboundQueryAt(p, time.Now()) + } + case <-dht.refreshFinishedCh: + bootstrapCount = bootstrapCount + 1 + if bootstrapCount == 2 { + timerCh = time.NewTimer(dht.rtFreezeTimeout).C + } + + old := isBootsrapping + isBootsrapping = false + if old { + dht.rtRefreshManager.RefreshNoWait() + } + + case <-dht.ctx.Done(): + return + } + } + }() +} + +// peerFound verifies whether the found peer advertises DHT protocols +// and probe it to make sure it answers DHT queries as expected. If +// it fails to answer, it isn't added to the routingTable. +func (dht *IpfsDHT) peerFound(p peer.ID) { + // if the peer is already in the routing table or the appropriate bucket is + // already full, don't try to add the new peer.ID + if !dht.routingTable.UsefulNewPeer(p) { + return + } + + // verify whether the remote peer advertises the right dht protocol + b, err := dht.validRTPeer(p) + if err != nil { + logger.Errorw("failed to validate if peer is a DHT peer", "peer", p, "error", err) + } else if b { + + // check if the maximal number of concurrent lookup checks is reached + dht.lookupChecksLk.Lock() + if dht.lookupCheckCapacity == 0 { + dht.lookupChecksLk.Unlock() + // drop the new peer.ID if the maximal number of concurrent lookup + // checks is reached + return + } + dht.lookupCheckCapacity-- + dht.lookupChecksLk.Unlock() + + go func() { + livelinessCtx, cancel := context.WithTimeout(dht.ctx, dht.lookupCheckTimeout) + defer cancel() + + // performing a FIND_NODE query + err := dht.lookupCheck(livelinessCtx, p) + + dht.lookupChecksLk.Lock() + dht.lookupCheckCapacity++ + dht.lookupChecksLk.Unlock() + + if err != nil { + logger.Debugw("connected peer not answering DHT request as expected", "peer", p, "error", err) + return + } + + // if the FIND_NODE succeeded, the peer is considered as valid + dht.validPeerFound(p) + }() + } +} + +// validPeerFound signals the routingTable that we've found a peer that +// supports the DHT protocol, and just answered correctly to a DHT FindPeers +func (dht *IpfsDHT) validPeerFound(p peer.ID) { + if c := baseLogger.Check(zap.DebugLevel, "peer found"); c != nil { + c.Write(zap.String("peer", p.String())) + } + + select { + case dht.addPeerToRTChan <- p: + case <-dht.ctx.Done(): + return + } +} + +// peerStoppedDHT signals the routing table that a peer is unable to responsd to DHT queries anymore. +func (dht *IpfsDHT) peerStoppedDHT(p peer.ID) { + logger.Debugw("peer stopped dht", "peer", p) + // A peer that does not support the DHT protocol is dead for us. + // There's no point in talking to anymore till it starts supporting the DHT protocol again. + dht.routingTable.RemovePeer(p) +} + +func (dht *IpfsDHT) fixRTIfNeeded() { + select { + case dht.fixLowPeersChan <- struct{}{}: + default: + } +} + +// FindLocal looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in. +func (dht *IpfsDHT) FindLocal(ctx context.Context, id peer.ID) peer.AddrInfo { + _, span := internal.StartSpan(ctx, "IpfsDHT.FindLocal", trace.WithAttributes(attribute.Stringer("PeerID", id))) + defer span.End() + + switch dht.host.Network().Connectedness(id) { + case network.Connected, network.CanConnect: + return dht.peerstore.PeerInfo(id) + default: + return peer.AddrInfo{} + } +} + +// nearestPeersToQuery returns the routing tables closest peers. +func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.ID { + closer := dht.routingTable.NearestPeers(kb.ConvertKey(string(pmes.GetKey())), count) + return closer +} + +// betterPeersToQuery returns nearestPeersToQuery with some additional filtering +func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, from peer.ID, count int) []peer.ID { + closer := dht.nearestPeersToQuery(pmes, count) + + // no node? nil + if closer == nil { + logger.Infow("no closer peers to send", from) + return nil + } + + filtered := make([]peer.ID, 0, len(closer)) + for _, clp := range closer { + + // == to self? thats bad + if clp == dht.self { + logger.Error("BUG betterPeersToQuery: attempted to return self! this shouldn't happen...") + return nil + } + // Dont send a peer back themselves + if clp == from { + continue + } + + filtered = append(filtered, clp) + } + + // ok seems like closer nodes + return filtered +} + +func (dht *IpfsDHT) setMode(m mode) error { + dht.modeLk.Lock() + defer dht.modeLk.Unlock() + + if m == dht.mode { + return nil + } + + switch m { + case modeServer: + return dht.moveToServerMode() + case modeClient: + return dht.moveToClientMode() + default: + return fmt.Errorf("unrecognized dht mode: %d", m) + } +} + +// moveToServerMode advertises (via libp2p identify updates) that we are able to respond to DHT queries and sets the appropriate stream handlers. +// Note: We may support responding to queries with protocols aside from our primary ones in order to support +// interoperability with older versions of the DHT protocol. +func (dht *IpfsDHT) moveToServerMode() error { + dht.mode = modeServer + for _, p := range dht.serverProtocols { + dht.host.SetStreamHandler(p, dht.handleNewStream) + } + return nil +} + +// moveToClientMode stops advertising (and rescinds advertisements via libp2p identify updates) that we are able to +// respond to DHT queries and removes the appropriate stream handlers. We also kill all inbound streams that were +// utilizing the handled protocols. +// Note: We may support responding to queries with protocols aside from our primary ones in order to support +// interoperability with older versions of the DHT protocol. +func (dht *IpfsDHT) moveToClientMode() error { + dht.mode = modeClient + for _, p := range dht.serverProtocols { + dht.host.RemoveStreamHandler(p) + } + + pset := make(map[protocol.ID]bool) + for _, p := range dht.serverProtocols { + pset[p] = true + } + + for _, c := range dht.host.Network().Conns() { + for _, s := range c.GetStreams() { + if pset[s.Protocol()] { + if s.Stat().Direction == network.DirInbound { + _ = s.Reset() + } + } + } + } + return nil +} + +func (dht *IpfsDHT) getMode() mode { + dht.modeLk.Lock() + defer dht.modeLk.Unlock() + return dht.mode +} + +// Context returns the DHT's context. +func (dht *IpfsDHT) Context() context.Context { + return dht.ctx +} + +// RoutingTable returns the DHT's routingTable. +func (dht *IpfsDHT) RoutingTable() *kb.RoutingTable { + return dht.routingTable +} + +// Close calls Process Close. +func (dht *IpfsDHT) Close() error { + dht.cancel() + dht.wg.Wait() + + var wg sync.WaitGroup + closes := [...]func() error{ + dht.rtRefreshManager.Close, + dht.providerStore.Close, + } + var errors [len(closes)]error + wg.Add(len(errors)) + for i, c := range closes { + go func(i int, c func() error) { + defer wg.Done() + errors[i] = c() + }(i, c) + } + wg.Wait() + + return multierr.Combine(errors[:]...) +} + +func mkDsKey(s string) ds.Key { + return ds.NewKey(base32.RawStdEncoding.EncodeToString([]byte(s))) +} + +// PeerID returns the DHT node's Peer ID. +func (dht *IpfsDHT) PeerID() peer.ID { + return dht.self +} + +// PeerKey returns a DHT key, converted from the DHT node's Peer ID. +func (dht *IpfsDHT) PeerKey() []byte { + return kb.ConvertPeerID(dht.self) +} + +// Host returns the libp2p host this DHT is operating with. +func (dht *IpfsDHT) Host() host.Host { + return dht.host +} + +// Ping sends a ping message to the passed peer and waits for a response. +func (dht *IpfsDHT) Ping(ctx context.Context, p peer.ID) error { + ctx, span := internal.StartSpan(ctx, "IpfsDHT.Ping", trace.WithAttributes(attribute.Stringer("PeerID", p))) + defer span.End() + return dht.protoMessenger.Ping(ctx, p) +} + +// NetworkSize returns the most recent estimation of the DHT network size. +// EXPERIMENTAL: We do not provide any guarantees that this method will +// continue to exist in the codebase. Use it at your own risk. +func (dht *IpfsDHT) NetworkSize() (int32, error) { + return dht.nsEstimator.NetworkSize() +} + +// newContextWithLocalTags returns a new context.Context with the InstanceID and +// PeerID keys populated. It will also take any extra tags that need adding to +// the context as tag.Mutators. +func (dht *IpfsDHT) newContextWithLocalTags(ctx context.Context, extraTags ...tag.Mutator) context.Context { + extraTags = append( + extraTags, + tag.Upsert(metrics.KeyPeerID, dht.self.String()), + tag.Upsert(metrics.KeyInstanceID, fmt.Sprintf("%p", dht)), + ) + ctx, _ = tag.New( + ctx, + extraTags..., + ) // ignoring error as it is unrelated to the actual function of this code. + return ctx +} + +func (dht *IpfsDHT) maybeAddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) { + // Don't add addresses for self or our connected peers. We have better ones. + if p == dht.self || dht.host.Network().Connectedness(p) == network.Connected { + return + } + dht.peerstore.AddAddrs(p, dht.filterAddrs(addrs), ttl) +} + +func (dht *IpfsDHT) filterAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { + if f := dht.addrFilter; f != nil { + return f(addrs) + } + return addrs +} diff --git a/go-libp2p-kad-dht/dht_bootstrap.go b/go-libp2p-kad-dht/dht_bootstrap.go new file mode 100644 index 0000000..03029ad --- /dev/null +++ b/go-libp2p-kad-dht/dht_bootstrap.go @@ -0,0 +1,84 @@ +package dht + +import ( + "context" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/multiformats/go-multiaddr" +) + +// DefaultBootstrapPeers is a set of public DHT bootstrap peers provided by libp2p. +var DefaultBootstrapPeers []multiaddr.Multiaddr + +// Minimum number of peers in the routing table. If we drop below this and we +// see a new peer, we trigger a bootstrap round. +var minRTRefreshThreshold = 10 + +const ( + periodicBootstrapInterval = 2 * time.Minute + maxNBoostrappers = 2 +) + +func init() { + for _, s := range []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", + "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io + } { + ma, err := multiaddr.NewMultiaddr(s) + if err != nil { + panic(err) + } + DefaultBootstrapPeers = append(DefaultBootstrapPeers, ma) + } +} + +// GetDefaultBootstrapPeerAddrInfos returns the peer.AddrInfos for the default +// bootstrap peers so we can use these for initializing the DHT by passing these to the +// BootstrapPeers(...) option. +func GetDefaultBootstrapPeerAddrInfos() []peer.AddrInfo { + ds := make([]peer.AddrInfo, 0, len(DefaultBootstrapPeers)) + + for i := range DefaultBootstrapPeers { + info, err := peer.AddrInfoFromP2pAddr(DefaultBootstrapPeers[i]) + if err != nil { + logger.Errorw("failed to convert bootstrapper address to peer addr info", "address", + DefaultBootstrapPeers[i].String(), err, "err") + continue + } + ds = append(ds, *info) + } + return ds +} + +// Bootstrap tells the DHT to get into a bootstrapped state satisfying the +// IpfsRouter interface. +func (dht *IpfsDHT) Bootstrap(ctx context.Context) (err error) { + _, end := tracer.Bootstrap(dhtName, ctx) + defer func() { end(err) }() + + dht.fixRTIfNeeded() + dht.rtRefreshManager.RefreshNoWait() + return nil +} + +// RefreshRoutingTable tells the DHT to refresh it's routing tables. +// +// The returned channel will block until the refresh finishes, then yield the +// error and close. The channel is buffered and safe to ignore. +func (dht *IpfsDHT) RefreshRoutingTable() <-chan error { + return dht.rtRefreshManager.Refresh(false) +} + +// ForceRefresh acts like RefreshRoutingTable but forces the DHT to refresh all +// buckets in the Routing Table irrespective of when they were last refreshed. +// +// The returned channel will block until the refresh finishes, then yield the +// error and close. The channel is buffered and safe to ignore. +func (dht *IpfsDHT) ForceRefresh() <-chan error { + return dht.rtRefreshManager.Refresh(true) +} diff --git a/go-libp2p-kad-dht/dht_bootstrap_test.go b/go-libp2p-kad-dht/dht_bootstrap_test.go new file mode 100644 index 0000000..e2236f5 --- /dev/null +++ b/go-libp2p-kad-dht/dht_bootstrap_test.go @@ -0,0 +1,201 @@ +package dht + +import ( + "context" + "testing" + "time" + + kb "github.com/libp2p/go-libp2p-kbucket" + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/stretchr/testify/require" +) + +func TestSelfWalkOnAddressChange(t *testing.T) { + ctx := context.Background() + // create three DHT instances with auto refresh disabled + d1 := setupDHT(ctx, t, false, DisableAutoRefresh(), forceAddressUpdateProcessing(t)) + d2 := setupDHT(ctx, t, false, DisableAutoRefresh()) + d3 := setupDHT(ctx, t, false, DisableAutoRefresh()) + + var connectedTo *IpfsDHT + // connect d1 to whoever is "further" + if kb.CommonPrefixLen(kb.ConvertPeerID(d1.self), kb.ConvertPeerID(d2.self)) <= + kb.CommonPrefixLen(kb.ConvertPeerID(d1.self), kb.ConvertPeerID(d3.self)) { + connect(t, ctx, d1, d3) + connectedTo = d3 + } else { + connect(t, ctx, d1, d2) + connectedTo = d2 + } + + // then connect d2 AND d3 + connect(t, ctx, d2, d3) + + // d1 should have ONLY 1 peer in it's RT + waitForWellFormedTables(t, []*IpfsDHT{d1}, 1, 1, 2*time.Second) + require.Equal(t, connectedTo.self, d1.routingTable.ListPeers()[0]) + + // now emit the address change event + em, err := d1.host.EventBus().Emitter(&event.EvtLocalAddressesUpdated{}) + require.NoError(t, err) + require.NoError(t, em.Emit(event.EvtLocalAddressesUpdated{})) + waitForWellFormedTables(t, []*IpfsDHT{d1}, 2, 2, 2*time.Second) + // it should now have both peers in the RT + ps := d1.routingTable.ListPeers() + require.Contains(t, ps, d2.self) + require.Contains(t, ps, d3.self) +} + +func TestDefaultBootstrappers(t *testing.T) { + ds := GetDefaultBootstrapPeerAddrInfos() + require.NotEmpty(t, ds) + require.Len(t, ds, len(DefaultBootstrapPeers)) + + dfmap := make(map[peer.ID]peer.AddrInfo) + for _, p := range DefaultBootstrapPeers { + info, err := peer.AddrInfoFromP2pAddr(p) + require.NoError(t, err) + dfmap[info.ID] = *info + } + + for _, p := range ds { + inf, ok := dfmap[p.ID] + require.True(t, ok) + require.ElementsMatch(t, p.Addrs, inf.Addrs) + delete(dfmap, p.ID) + } + require.Empty(t, dfmap) +} + +func TestBootstrappersReplacable(t *testing.T) { + old := rtFreezeTimeout + rtFreezeTimeout = 100 * time.Millisecond + defer func() { + rtFreezeTimeout = old + }() + ctx := context.Background() + d := setupDHT(ctx, t, false, disableFixLowPeersRoutine(t), BucketSize(2)) + defer d.host.Close() + defer d.Close() + + var d1 *IpfsDHT + var d2 *IpfsDHT + + // d1 & d2 have a cpl of 0 + for { + d1 = setupDHT(ctx, t, false, disableFixLowPeersRoutine(t)) + if kb.CommonPrefixLen(d.selfKey, d1.selfKey) == 0 { + break + } + } + + for { + d2 = setupDHT(ctx, t, false, disableFixLowPeersRoutine(t)) + if kb.CommonPrefixLen(d.selfKey, d2.selfKey) == 0 { + break + } + } + defer d1.host.Close() + defer d1.Close() + + defer d2.host.Close() + defer d2.Close() + + connect(t, ctx, d, d1) + connect(t, ctx, d, d2) + require.Len(t, d.routingTable.ListPeers(), 2) + + // d3 & d4 with cpl=0 will go in as d1 & d2 are replacable. + var d3 *IpfsDHT + var d4 *IpfsDHT + + for { + d3 = setupDHT(ctx, t, false, disableFixLowPeersRoutine(t)) + if kb.CommonPrefixLen(d.selfKey, d3.selfKey) == 0 { + break + } + } + + for { + d4 = setupDHT(ctx, t, false, disableFixLowPeersRoutine(t)) + if kb.CommonPrefixLen(d.selfKey, d4.selfKey) == 0 { + break + } + } + + defer d3.host.Close() + defer d3.Close() + defer d4.host.Close() + defer d4.Close() + + connect(t, ctx, d, d3) + connect(t, ctx, d, d4) + require.Len(t, d.routingTable.ListPeers(), 2) + require.Contains(t, d.routingTable.ListPeers(), d3.self) + require.Contains(t, d.routingTable.ListPeers(), d4.self) + + // do couple of refreshes and wait for the Routing Table to be "frozen". + <-d.RefreshRoutingTable() + <-d.RefreshRoutingTable() + time.Sleep(1 * time.Second) + + // adding d5 fails because RT is frozen + var d5 *IpfsDHT + for { + d5 = setupDHT(ctx, t, false, disableFixLowPeersRoutine(t)) + if kb.CommonPrefixLen(d.selfKey, d5.selfKey) == 0 { + break + } + } + defer d5.host.Close() + defer d5.Close() + + connectNoSync(t, ctx, d, d5) + time.Sleep(500 * time.Millisecond) + require.Len(t, d.routingTable.ListPeers(), 2) + require.Contains(t, d.routingTable.ListPeers(), d3.self) + require.Contains(t, d.routingTable.ListPeers(), d4.self) + + // Let's empty the routing table + for _, p := range d.routingTable.ListPeers() { + d.routingTable.RemovePeer(p) + } + require.Len(t, d.routingTable.ListPeers(), 0) + + // adding d1 & d2 works now because there is space in the Routing Table + require.NoError(t, d.host.Network().ClosePeer(d1.self)) + require.NoError(t, d.host.Network().ClosePeer(d2.self)) + connect(t, ctx, d, d1) + connect(t, ctx, d, d2) + require.Len(t, d.routingTable.ListPeers(), 2) + require.Contains(t, d.routingTable.ListPeers(), d1.self) + require.Contains(t, d.routingTable.ListPeers(), d2.self) + + // adding d3 & d4 also works because the RT is not frozen. + require.NoError(t, d.host.Network().ClosePeer(d3.self)) + require.NoError(t, d.host.Network().ClosePeer(d4.self)) + connect(t, ctx, d, d3) + connect(t, ctx, d, d4) + require.Len(t, d.routingTable.ListPeers(), 2) + require.Contains(t, d.routingTable.ListPeers(), d3.self) + require.Contains(t, d.routingTable.ListPeers(), d4.self) + + // run refreshes and freeze the RT + <-d.RefreshRoutingTable() + <-d.RefreshRoutingTable() + time.Sleep(1 * time.Second) + // cant add d1 & d5 because RT is frozen. + require.NoError(t, d.host.Network().ClosePeer(d1.self)) + require.NoError(t, d.host.Network().ClosePeer(d5.self)) + connectNoSync(t, ctx, d, d1) + connectNoSync(t, ctx, d, d5) + d.peerFound(d5.self) + d.peerFound(d1.self) + time.Sleep(1 * time.Second) + + require.Len(t, d.routingTable.ListPeers(), 2) + require.Contains(t, d.routingTable.ListPeers(), d3.self) + require.Contains(t, d.routingTable.ListPeers(), d4.self) +} diff --git a/go-libp2p-kad-dht/dht_filters.go b/go-libp2p-kad-dht/dht_filters.go new file mode 100644 index 0000000..a4b0903 --- /dev/null +++ b/go-libp2p-kad-dht/dht_filters.go @@ -0,0 +1,243 @@ +package dht + +import ( + "bytes" + "net" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/google/gopacket/routing" + netroute "github.com/libp2p/go-netroute" + + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + + dhtcfg "github.com/libp2p/go-libp2p-kad-dht/internal/config" +) + +// QueryFilterFunc is a filter applied when considering peers to dial when querying +type QueryFilterFunc = dhtcfg.QueryFilterFunc + +// RouteTableFilterFunc is a filter applied when considering connections to keep in +// the local route table. +type RouteTableFilterFunc = dhtcfg.RouteTableFilterFunc + +var publicCIDR6 = "2000::/3" +var public6 *net.IPNet + +func init() { + _, public6, _ = net.ParseCIDR(publicCIDR6) +} + +// isPublicAddr follows the logic of manet.IsPublicAddr, except it uses +// a stricter definition of "public" for ipv6: namely "is it in 2000::/3"? +func isPublicAddr(a ma.Multiaddr) bool { + ip, err := manet.ToIP(a) + if err != nil { + return false + } + if ip.To4() != nil { + return !inAddrRange(ip, manet.Private4) && !inAddrRange(ip, manet.Unroutable4) + } + + return public6.Contains(ip) +} + +// isPrivateAddr follows the logic of manet.IsPrivateAddr, except that +// it uses a stricter definition of "public" for ipv6 +func isPrivateAddr(a ma.Multiaddr) bool { + ip, err := manet.ToIP(a) + if err != nil { + return false + } + if ip.To4() != nil { + return inAddrRange(ip, manet.Private4) + } + + return !public6.Contains(ip) && !inAddrRange(ip, manet.Unroutable6) +} + +// PublicQueryFilter returns true if the peer is suspected of being publicly accessible +func PublicQueryFilter(_ interface{}, ai peer.AddrInfo) bool { + if len(ai.Addrs) == 0 { + return false + } + + var hasPublicAddr bool + for _, a := range ai.Addrs { + if !isRelayAddr(a) && isPublicAddr(a) { + hasPublicAddr = true + } + } + return hasPublicAddr +} + +type hasHost interface { + Host() host.Host +} + +var _ QueryFilterFunc = PublicQueryFilter + +// PublicRoutingTableFilter allows a peer to be added to the routing table if the connections to that peer indicate +// that it is on a public network +func PublicRoutingTableFilter(dht interface{}, p peer.ID) bool { + d := dht.(hasHost) + + conns := d.Host().Network().ConnsToPeer(p) + if len(conns) == 0 { + return false + } + + // Do we have a public address for this peer? + id := conns[0].RemotePeer() + known := d.Host().Peerstore().PeerInfo(id) + for _, a := range known.Addrs { + if !isRelayAddr(a) && isPublicAddr(a) { + return true + } + } + + return false +} + +var _ RouteTableFilterFunc = PublicRoutingTableFilter + +// PrivateQueryFilter doens't currently restrict which peers we are willing to query from the local DHT. +func PrivateQueryFilter(_ interface{}, ai peer.AddrInfo) bool { + return len(ai.Addrs) > 0 +} + +var _ QueryFilterFunc = PrivateQueryFilter + +// We call this very frequently but routes can technically change at runtime. +// Cache it for two minutes. +const routerCacheTime = 2 * time.Minute + +var routerCache struct { + sync.RWMutex + router routing.Router + expires time.Time +} + +func getCachedRouter() routing.Router { + routerCache.RLock() + router := routerCache.router + expires := routerCache.expires + routerCache.RUnlock() + + if time.Now().Before(expires) { + return router + } + + routerCache.Lock() + defer routerCache.Unlock() + + now := time.Now() + if now.Before(routerCache.expires) { + return router + } + routerCache.router, _ = netroute.New() + routerCache.expires = now.Add(routerCacheTime) + return router +} + +// PrivateRoutingTableFilter allows a peer to be added to the routing table if the connections to that peer indicate +// that it is on a private network +func PrivateRoutingTableFilter(dht interface{}, p peer.ID) bool { + d := dht.(hasHost) + conns := d.Host().Network().ConnsToPeer(p) + return privRTFilter(d, conns) +} + +func privRTFilter(dht interface{}, conns []network.Conn) bool { + d := dht.(hasHost) + h := d.Host() + + router := getCachedRouter() + myAdvertisedIPs := make([]net.IP, 0) + for _, a := range h.Addrs() { + if isPublicAddr(a) && !isRelayAddr(a) { + ip, err := manet.ToIP(a) + if err != nil { + continue + } + myAdvertisedIPs = append(myAdvertisedIPs, ip) + } + } + + for _, c := range conns { + ra := c.RemoteMultiaddr() + if isPrivateAddr(ra) && !isRelayAddr(ra) { + return true + } + + if isPublicAddr(ra) { + ip, err := manet.ToIP(ra) + if err != nil { + continue + } + + // if the ip is the same as one of the local host's public advertised IPs - then consider it local + for _, i := range myAdvertisedIPs { + if i.Equal(ip) { + return true + } + if ip.To4() == nil { + if i.To4() == nil && isEUI(ip) && sameV6Net(i, ip) { + return true + } + } + } + + // if there's no gateway - a direct host in the OS routing table - then consider it local + // This is relevant in particular to ipv6 networks where the addresses may all be public, + // but the nodes are aware of direct links between each other. + if router != nil { + _, gw, _, err := router.Route(ip) + if gw == nil && err == nil { + return true + } + } + } + } + + return false +} + +var _ RouteTableFilterFunc = PrivateRoutingTableFilter + +func isEUI(ip net.IP) bool { + // per rfc 2373 + return len(ip) == net.IPv6len && ip[11] == 0xff && ip[12] == 0xfe +} + +func sameV6Net(a, b net.IP) bool { + //lint:ignore SA1021 We're comparing only parts of the IP address here. + return len(a) == net.IPv6len && len(b) == net.IPv6len && bytes.Equal(a[0:8], b[0:8]) //nolint +} + +func isRelayAddr(a ma.Multiaddr) bool { + found := false + ma.ForEach(a, func(c ma.Component, e error) bool { + if e != nil { + return false + } + found = c.Protocol().Code == ma.P_CIRCUIT + return !found + }) + return found +} + +func inAddrRange(ip net.IP, ipnets []*net.IPNet) bool { + for _, ipnet := range ipnets { + if ipnet.Contains(ip) { + return true + } + } + + return false +} diff --git a/go-libp2p-kad-dht/dht_filters_test.go b/go-libp2p-kad-dht/dht_filters_test.go new file mode 100644 index 0000000..7714b8d --- /dev/null +++ b/go-libp2p-kad-dht/dht_filters_test.go @@ -0,0 +1,79 @@ +package dht + +import ( + "context" + "net" + "sync/atomic" + "testing" + + ic "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" +) + +func TestIsRelay(t *testing.T) { + a, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/5002/p2p/QmdPU7PfRyKehdrP5A3WqmjyD6bhVpU1mLGKppa2FjGDjZ/p2p-circuit/p2p/QmVT6GYwjeeAF5TR485Yc58S3xRF5EFsZ5YAF4VcP3URHt") + if !isRelayAddr(a) { + t.Fatalf("thought %s was not a relay", a) + } + a, _ = ma.NewMultiaddr("/p2p-circuit/p2p/QmVT6GYwjeeAF5TR485Yc58S3xRF5EFsZ5YAF4VcP3URHt") + if !isRelayAddr(a) { + t.Fatalf("thought %s was not a relay", a) + } + a, _ = ma.NewMultiaddr("/ip4/127.0.0.1/tcp/5002/p2p/QmdPU7PfRyKehdrP5A3WqmjyD6bhVpU1mLGKppa2FjGDjZ") + if isRelayAddr(a) { + t.Fatalf("thought %s was a relay", a) + } + +} + +type mockConn struct { + local peer.AddrInfo + remote peer.AddrInfo + + isClosed atomic.Bool +} + +var _ network.Conn = (*mockConn)(nil) + +func (m *mockConn) ID() string { return "0" } +func (m *mockConn) Close() error { + m.isClosed.Store(true) + return nil +} +func (m *mockConn) NewStream(context.Context) (network.Stream, error) { return nil, nil } +func (m *mockConn) GetStreams() []network.Stream { return []network.Stream{} } +func (m *mockConn) Stat() network.ConnStats { + return network.ConnStats{Stats: network.Stats{Direction: network.DirOutbound}} +} +func (m *mockConn) Scope() network.ConnScope { return &network.NullScope{} } +func (m *mockConn) LocalMultiaddr() ma.Multiaddr { return m.local.Addrs[0] } +func (m *mockConn) RemoteMultiaddr() ma.Multiaddr { return m.remote.Addrs[0] } +func (m *mockConn) LocalPeer() peer.ID { return m.local.ID } +func (m *mockConn) LocalPrivateKey() ic.PrivKey { return nil } +func (m *mockConn) RemotePeer() peer.ID { return m.remote.ID } +func (m *mockConn) RemotePublicKey() ic.PubKey { return nil } +func (m *mockConn) ConnState() network.ConnectionState { return network.ConnectionState{} } +func (m *mockConn) IsClosed() bool { return m.isClosed.Load() } + +func TestFilterCaching(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + d := setupDHT(ctx, t, true) + + remote, _ := manet.FromIP(net.IPv4(8, 8, 8, 8)) + if privRTFilter(d, []network.Conn{&mockConn{ + local: d.Host().Peerstore().PeerInfo(d.Host().ID()), + remote: peer.AddrInfo{ID: "", Addrs: []ma.Multiaddr{remote}}, + }}) { + t.Fatal("filter should prevent public remote peers.") + } + + r1 := getCachedRouter() + r2 := getCachedRouter() + if r1 != r2 { + t.Fatal("router should be returned multiple times.") + } +} diff --git a/go-libp2p-kad-dht/dht_net.go b/go-libp2p-kad-dht/dht_net.go new file mode 100644 index 0000000..3e135df --- /dev/null +++ b/go-libp2p-kad-dht/dht_net.go @@ -0,0 +1,166 @@ +package dht + +import ( + "io" + "time" + + "github.com/libp2p/go-libp2p/core/network" + + "github.com/libp2p/go-libp2p-kad-dht/internal/net" + "github.com/libp2p/go-libp2p-kad-dht/metrics" + pb "github.com/libp2p/go-libp2p-kad-dht/pb" + + "github.com/libp2p/go-msgio" + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.uber.org/zap" +) + +var dhtStreamIdleTimeout = 1 * time.Minute + +// ErrReadTimeout is an error that occurs when no message is read within the timeout period. +var ErrReadTimeout = net.ErrReadTimeout + +// handleNewStream implements the network.StreamHandler +func (dht *IpfsDHT) handleNewStream(s network.Stream) { + if dht.handleNewMessage(s) { + // If we exited without error, close gracefully. + _ = s.Close() + } else { + // otherwise, send an error. + _ = s.Reset() + } +} + +// Returns true on orderly completion of writes (so we can Close the stream). +func (dht *IpfsDHT) handleNewMessage(s network.Stream) bool { + ctx := dht.ctx + r := msgio.NewVarintReaderSize(s, network.MessageSizeMax) + + mPeer := s.Conn().RemotePeer() + + timer := time.AfterFunc(dhtStreamIdleTimeout, func() { _ = s.Reset() }) + defer timer.Stop() + + for { + if dht.getMode() != modeServer { + logger.Debugf("ignoring incoming dht message while not in server mode") + return false + } + + var req pb.Message + msgbytes, err := r.ReadMsg() + msgLen := len(msgbytes) + if err != nil { + r.ReleaseMsg(msgbytes) + if err == io.EOF { + return true + } + // This string test is necessary because there isn't a single stream reset error + // instance in use. + if c := baseLogger.Check(zap.DebugLevel, "error reading message"); c != nil && err.Error() != "stream reset" { + c.Write(zap.String("from", mPeer.String()), + zap.Error(err)) + } + if msgLen > 0 { + _ = stats.RecordWithTags(ctx, + []tag.Mutator{tag.Upsert(metrics.KeyMessageType, "UNKNOWN")}, + metrics.ReceivedMessages.M(1), + metrics.ReceivedMessageErrors.M(1), + metrics.ReceivedBytes.M(int64(msgLen)), + ) + } + return false + } + err = req.Unmarshal(msgbytes) + r.ReleaseMsg(msgbytes) + if err != nil { + if c := baseLogger.Check(zap.DebugLevel, "error unmarshaling message"); c != nil { + c.Write(zap.String("from", mPeer.String()), + zap.Error(err)) + } + _ = stats.RecordWithTags(ctx, + []tag.Mutator{tag.Upsert(metrics.KeyMessageType, "UNKNOWN")}, + metrics.ReceivedMessages.M(1), + metrics.ReceivedMessageErrors.M(1), + metrics.ReceivedBytes.M(int64(msgLen)), + ) + return false + } + + timer.Reset(dhtStreamIdleTimeout) + + startTime := time.Now() + ctx, _ := tag.New(ctx, + tag.Upsert(metrics.KeyMessageType, req.GetType().String()), + ) + + stats.Record(ctx, + metrics.ReceivedMessages.M(1), + metrics.ReceivedBytes.M(int64(msgLen)), + ) + + handler := dht.handlerForMsgType(req.GetType()) + if handler == nil { + stats.Record(ctx, metrics.ReceivedMessageErrors.M(1)) + if c := baseLogger.Check(zap.DebugLevel, "can't handle received message"); c != nil { + c.Write(zap.String("from", mPeer.String()), + zap.Int32("type", int32(req.GetType()))) + } + return false + } + + if c := baseLogger.Check(zap.DebugLevel, "handling message"); c != nil { + c.Write(zap.String("from", mPeer.String()), + zap.Int32("type", int32(req.GetType())), + zap.Binary("key", req.GetKey())) + } + resp, err := handler(ctx, mPeer, &req) + if err != nil { + stats.Record(ctx, metrics.ReceivedMessageErrors.M(1)) + if c := baseLogger.Check(zap.DebugLevel, "error handling message"); c != nil { + c.Write(zap.String("from", mPeer.String()), + zap.Int32("type", int32(req.GetType())), + zap.Binary("key", req.GetKey()), + zap.Error(err)) + } + return false + } + + if c := baseLogger.Check(zap.DebugLevel, "handled message"); c != nil { + c.Write(zap.String("from", mPeer.String()), + zap.Int32("type", int32(req.GetType())), + zap.Binary("key", req.GetKey()), + zap.Duration("time", time.Since(startTime))) + } + + if resp == nil { + continue + } + + // send out response msg + err = net.WriteMsg(s, resp) + if err != nil { + stats.Record(ctx, metrics.ReceivedMessageErrors.M(1)) + if c := baseLogger.Check(zap.DebugLevel, "error writing response"); c != nil { + c.Write(zap.String("from", mPeer.String()), + zap.Int32("type", int32(req.GetType())), + zap.Binary("key", req.GetKey()), + zap.Error(err)) + } + return false + } + + elapsedTime := time.Since(startTime) + + if c := baseLogger.Check(zap.DebugLevel, "responded to message"); c != nil { + c.Write(zap.String("from", mPeer.String()), + zap.Int32("type", int32(req.GetType())), + zap.Binary("key", req.GetKey()), + zap.Duration("time", elapsedTime)) + } + + latencyMillis := float64(elapsedTime) / float64(time.Millisecond) + stats.Record(ctx, metrics.InboundRequestLatency.M(latencyMillis)) + } +} diff --git a/go-libp2p-kad-dht/dht_options.go b/go-libp2p-kad-dht/dht_options.go new file mode 100644 index 0000000..250e4ca --- /dev/null +++ b/go-libp2p-kad-dht/dht_options.go @@ -0,0 +1,358 @@ +package dht + +import ( + "fmt" + "testing" + "time" + + dhtcfg "github.com/libp2p/go-libp2p-kad-dht/internal/config" + "github.com/libp2p/go-libp2p-kad-dht/providers" + "github.com/libp2p/go-libp2p-kbucket/peerdiversity" + record "github.com/libp2p/go-libp2p-record" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + ds "github.com/ipfs/go-datastore" + ma "github.com/multiformats/go-multiaddr" +) + +// ModeOpt describes what mode the dht should operate in +type ModeOpt = dhtcfg.ModeOpt + +const ( + // ModeAuto utilizes EvtLocalReachabilityChanged events sent over the event bus to dynamically switch the DHT + // between Client and Server modes based on network conditions + ModeAuto ModeOpt = iota + // ModeClient operates the DHT as a client only, it cannot respond to incoming queries + ModeClient + // ModeServer operates the DHT as a server, it can both send and respond to queries + ModeServer + // ModeAutoServer operates in the same way as ModeAuto, but acts as a server when reachability is unknown + ModeAutoServer +) + +// DefaultPrefix is the application specific prefix attached to all DHT protocols by default. +const DefaultPrefix protocol.ID = "/ipfs" + +type Option = dhtcfg.Option + +// ProviderStore sets the provider storage manager. +func ProviderStore(ps providers.ProviderStore) Option { + return func(c *dhtcfg.Config) error { + c.ProviderStore = ps + return nil + } +} + +// RoutingTableLatencyTolerance sets the maximum acceptable latency for peers +// in the routing table's cluster. +func RoutingTableLatencyTolerance(latency time.Duration) Option { + return func(c *dhtcfg.Config) error { + c.RoutingTable.LatencyTolerance = latency + return nil + } +} + +// RoutingTableRefreshQueryTimeout sets the timeout for routing table refresh +// queries. +func RoutingTableRefreshQueryTimeout(timeout time.Duration) Option { + return func(c *dhtcfg.Config) error { + c.RoutingTable.RefreshQueryTimeout = timeout + return nil + } +} + +// RoutingTableRefreshPeriod sets the period for refreshing buckets in the +// routing table. The DHT will refresh buckets every period by: +// +// 1. First searching for nearby peers to figure out how many buckets we should try to fill. +// 1. Then searching for a random key in each bucket that hasn't been queried in +// the last refresh period. +func RoutingTableRefreshPeriod(period time.Duration) Option { + return func(c *dhtcfg.Config) error { + c.RoutingTable.RefreshInterval = period + return nil + } +} + +// Datastore configures the DHT to use the specified datastore. +// +// Defaults to an in-memory (temporary) map. +func Datastore(ds ds.Batching) Option { + return func(c *dhtcfg.Config) error { + c.Datastore = ds + return nil + } +} + +// Mode configures which mode the DHT operates in (Client, Server, Auto). +// +// Defaults to ModeAuto. +func Mode(m ModeOpt) Option { + return func(c *dhtcfg.Config) error { + c.Mode = m + return nil + } +} + +// Validator configures the DHT to use the specified validator. +// +// Defaults to a namespaced validator that can validate both public key (under the "pk" +// namespace) and IPNS records (under the "ipns" namespace). Setting the validator +// implies that the user wants to control the validators and therefore the default +// public key and IPNS validators will not be added. +func Validator(v record.Validator) Option { + return func(c *dhtcfg.Config) error { + c.Validator = v + c.ValidatorChanged = true + return nil + } +} + +// NamespacedValidator adds a validator namespaced under `ns`. This option fails +// if the DHT is not using a `record.NamespacedValidator` as its validator (it +// uses one by default but this can be overridden with the `Validator` option). +// Adding a namespaced validator without changing the `Validator` will result in +// adding a new validator in addition to the default public key and IPNS validators. +// The "pk" and "ipns" namespaces cannot be overridden here unless a new `Validator` +// has been set first. +// +// Example: Given a validator registered as `NamespacedValidator("ipns", +// myValidator)`, all records with keys starting with `/ipns/` will be validated +// with `myValidator`. +func NamespacedValidator(ns string, v record.Validator) Option { + return func(c *dhtcfg.Config) error { + nsval, ok := c.Validator.(record.NamespacedValidator) + if !ok { + return fmt.Errorf("can only add namespaced validators to a NamespacedValidator") + } + nsval[ns] = v + return nil + } +} + +// ProtocolPrefix sets an application specific prefix to be attached to all DHT protocols. For example, +// /myapp/kad/1.0.0 instead of /ipfs/kad/1.0.0. Prefix should be of the form /myapp. +// +// Defaults to dht.DefaultPrefix +func ProtocolPrefix(prefix protocol.ID) Option { + return func(c *dhtcfg.Config) error { + c.ProtocolPrefix = prefix + return nil + } +} + +// ProtocolExtension adds an application specific protocol to the DHT protocol. For example, +// /ipfs/lan/kad/1.0.0 instead of /ipfs/kad/1.0.0. extension should be of the form /lan. +func ProtocolExtension(ext protocol.ID) Option { + return func(c *dhtcfg.Config) error { + c.ProtocolPrefix += ext + return nil + } +} + +// V1ProtocolOverride overrides the protocolID used for /kad/1.0.0 with another. This is an +// advanced feature, and should only be used to handle legacy networks that have not been +// using protocolIDs of the form /app/kad/1.0.0. +// +// This option will override and ignore the ProtocolPrefix and ProtocolExtension options +func V1ProtocolOverride(proto protocol.ID) Option { + return func(c *dhtcfg.Config) error { + c.V1ProtocolOverride = proto + return nil + } +} + +// BucketSize configures the bucket size (k in the Kademlia paper) of the routing table. +// +// The default value is 20. +func BucketSize(bucketSize int) Option { + return func(c *dhtcfg.Config) error { + c.BucketSize = bucketSize + return nil + } +} + +// Concurrency configures the number of concurrent requests (alpha in the Kademlia paper) for a given query path. +// +// The default value is 10. +func Concurrency(alpha int) Option { + return func(c *dhtcfg.Config) error { + c.Concurrency = alpha + return nil + } +} + +// Resiliency configures the number of peers closest to a target that must have responded in order for a given query +// path to complete. +// +// The default value is 3. +func Resiliency(beta int) Option { + return func(c *dhtcfg.Config) error { + c.Resiliency = beta + return nil + } +} + +// LookupInterval configures maximal number of go routines that can be used to +// perform a lookup check operation, before adding a new node to the routing table. +func LookupCheckConcurrency(n int) Option { + return func(c *dhtcfg.Config) error { + c.LookupCheckConcurrency = n + return nil + } +} + +// MaxRecordAge specifies the maximum time that any node will hold onto a record ("PutValue record") +// from the time its received. This does not apply to any other forms of validity that +// the record may contain. +// For example, a record may contain an ipns entry with an EOL saying its valid +// until the year 2020 (a great time in the future). For that record to stick around +// it must be rebroadcasted more frequently than once every 'MaxRecordAge' +func MaxRecordAge(maxAge time.Duration) Option { + return func(c *dhtcfg.Config) error { + c.MaxRecordAge = maxAge + return nil + } +} + +// DisableAutoRefresh completely disables 'auto-refresh' on the DHT routing +// table. This means that we will neither refresh the routing table periodically +// nor when the routing table size goes below the minimum threshold. +func DisableAutoRefresh() Option { + return func(c *dhtcfg.Config) error { + c.RoutingTable.AutoRefresh = false + return nil + } +} + +// DisableProviders disables storing and retrieving provider records. +// +// Defaults to enabled. +// +// WARNING: do not change this unless you're using a forked DHT (i.e., a private +// network and/or distinct DHT protocols with the `Protocols` option). +func DisableProviders() Option { + return func(c *dhtcfg.Config) error { + c.EnableProviders = false + return nil + } +} + +// DisableValues disables storing and retrieving value records (including +// public keys). +// +// Defaults to enabled. +// +// WARNING: do not change this unless you're using a forked DHT (i.e., a private +// network and/or distinct DHT protocols with the `Protocols` option). +func DisableValues() Option { + return func(c *dhtcfg.Config) error { + c.EnableValues = false + return nil + } +} + +// QueryFilter sets a function that approves which peers may be dialed in a query +func QueryFilter(filter QueryFilterFunc) Option { + return func(c *dhtcfg.Config) error { + c.QueryPeerFilter = filter + return nil + } +} + +// RoutingTableFilter sets a function that approves which peers may be added to the routing table. The host should +// already have at least one connection to the peer under consideration. +func RoutingTableFilter(filter RouteTableFilterFunc) Option { + return func(c *dhtcfg.Config) error { + c.RoutingTable.PeerFilter = filter + return nil + } +} + +// BootstrapPeers configures the bootstrapping nodes that we will connect to to seed +// and refresh our Routing Table if it becomes empty. +func BootstrapPeers(bootstrappers ...peer.AddrInfo) Option { + return func(c *dhtcfg.Config) error { + c.BootstrapPeers = func() []peer.AddrInfo { + return bootstrappers + } + return nil + } +} + +// BootstrapPeersFunc configures the function that returns the bootstrapping nodes that we will +// connect to to seed and refresh our Routing Table if it becomes empty. +func BootstrapPeersFunc(getBootstrapPeers func() []peer.AddrInfo) Option { + return func(c *dhtcfg.Config) error { + c.BootstrapPeers = getBootstrapPeers + return nil + } +} + +// RoutingTablePeerDiversityFilter configures the implementation of the `PeerIPGroupFilter` that will be used +// to construct the diversity filter for the Routing Table. +// Please see the docs for `peerdiversity.PeerIPGroupFilter` AND `peerdiversity.Filter` for more details. +func RoutingTablePeerDiversityFilter(pg peerdiversity.PeerIPGroupFilter) Option { + return func(c *dhtcfg.Config) error { + c.RoutingTable.DiversityFilter = pg + return nil + } +} + +// disableFixLowPeersRoutine disables the "fixLowPeers" routine in the DHT. +// This is ONLY for tests. +func disableFixLowPeersRoutine(t *testing.T) Option { + return func(c *dhtcfg.Config) error { + c.DisableFixLowPeers = true + return nil + } +} + +// forceAddressUpdateProcessing forces the DHT to handle changes to the hosts addresses. +// This occurs even when AutoRefresh has been disabled. +// This is ONLY for tests. +func forceAddressUpdateProcessing(t *testing.T) Option { + return func(c *dhtcfg.Config) error { + c.TestAddressUpdateProcessing = true + return nil + } +} + +// EnableOptimisticProvide enables an optimization that skips the last hops of the provide process. +// This works by using the network size estimator (which uses the keyspace density of queries) +// to optimistically send ADD_PROVIDER requests when we most likely have found the last hop. +// It will also run some ADD_PROVIDER requests asynchronously in the background after returning, +// this allows to optimistically return earlier if some threshold number of RPCs have succeeded. +// The number of background/in-flight queries can be configured with the OptimisticProvideJobsPoolSize +// option. +// +// EXPERIMENTAL: This is an experimental option and might be removed in the future. Use at your own risk. +func EnableOptimisticProvide() Option { + return func(c *dhtcfg.Config) error { + c.EnableOptimisticProvide = true + return nil + } +} + +// OptimisticProvideJobsPoolSize allows to configure the asynchronicity limit for in-flight ADD_PROVIDER RPCs. +// It makes sense to set it to a multiple of optProvReturnRatio * BucketSize. Check the description of +// EnableOptimisticProvide for more details. +// +// EXPERIMENTAL: This is an experimental option and might be removed in the future. Use at your own risk. +func OptimisticProvideJobsPoolSize(size int) Option { + return func(c *dhtcfg.Config) error { + c.OptimisticProvideJobsPoolSize = size + return nil + } +} + +// AddressFilter allows to configure the address filtering function. +// This function is run before addresses are added to the peerstore. +// It is most useful to avoid adding localhost / local addresses. +func AddressFilter(f func([]ma.Multiaddr) []ma.Multiaddr) Option { + return func(c *dhtcfg.Config) error { + c.AddressFilter = f + return nil + } +} diff --git a/go-libp2p-kad-dht/dht_test.go b/go-libp2p-kad-dht/dht_test.go new file mode 100644 index 0000000..e6b4f4e --- /dev/null +++ b/go-libp2p-kad-dht/dht_test.go @@ -0,0 +1,2478 @@ +package dht + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "math/rand" + "runtime" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p-kad-dht/internal/net" + "github.com/libp2p/go-libp2p-kad-dht/providers" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/libp2p/go-msgio" + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/multiformats/go-multihash" + "github.com/multiformats/go-multistream" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + test "github.com/libp2p/go-libp2p-kad-dht/internal/testing" + pb "github.com/libp2p/go-libp2p-kad-dht/pb" + + u "github.com/ipfs/boxo/util" + "github.com/ipfs/go-cid" + detectrace "github.com/ipfs/go-detect-race" + kb "github.com/libp2p/go-libp2p-kbucket" + record "github.com/libp2p/go-libp2p-record" + bhost "github.com/libp2p/go-libp2p/p2p/host/basic" + swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" +) + +var testCaseCids []cid.Cid + +func init() { + for i := 0; i < 100; i++ { + v := fmt.Sprintf("%d -- value", i) + + var newCid cid.Cid + switch i % 3 { + case 0: + mhv := u.Hash([]byte(v)) + newCid = cid.NewCidV0(mhv) + case 1: + mhv := u.Hash([]byte(v)) + newCid = cid.NewCidV1(cid.DagCBOR, mhv) + case 2: + rawMh := make([]byte, 12) + binary.PutUvarint(rawMh, cid.Raw) + binary.PutUvarint(rawMh[1:], 10) + copy(rawMh[2:], []byte(v)[:10]) + _, mhv, err := multihash.MHFromBytes(rawMh) + if err != nil { + panic(err) + } + newCid = cid.NewCidV1(cid.Raw, mhv) + } + testCaseCids = append(testCaseCids, newCid) + } +} + +type blankValidator struct{} + +func (blankValidator) Validate(_ string, _ []byte) error { return nil } +func (blankValidator) Select(_ string, _ [][]byte) (int, error) { return 0, nil } + +type testAtomicPutValidator struct { + test.TestValidator +} + +// selects the entry with the 'highest' last byte +func (testAtomicPutValidator) Select(_ string, bs [][]byte) (int, error) { + index := -1 + max := uint8(0) + for i, b := range bs { + if bytes.Equal(b, []byte("valid")) { + if index == -1 { + index = i + } + continue + } + + str := string(b) + n := str[len(str)-1] + if n > max { + max = n + index = i + } + + } + if index == -1 { + return -1, errors.New("no rec found") + } + return index, nil +} + +var testPrefix = ProtocolPrefix("/test") + +func setupDHT(ctx context.Context, t *testing.T, client bool, options ...Option) *IpfsDHT { + baseOpts := []Option{ + testPrefix, + NamespacedValidator("v", blankValidator{}), + DisableAutoRefresh(), + } + + if client { + baseOpts = append(baseOpts, Mode(ModeClient)) + } else { + baseOpts = append(baseOpts, Mode(ModeServer)) + } + + host, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + host.Start() + t.Cleanup(func() { host.Close() }) + + d, err := New(ctx, host, append(baseOpts, options...)...) + require.NoError(t, err) + t.Cleanup(func() { d.Close() }) + return d +} + +func setupDHTS(t *testing.T, ctx context.Context, n int, options ...Option) []*IpfsDHT { + addrs := make([]ma.Multiaddr, n) + dhts := make([]*IpfsDHT, n) + peers := make([]peer.ID, n) + + sanityAddrsMap := make(map[string]struct{}) + sanityPeersMap := make(map[string]struct{}) + + for i := 0; i < n; i++ { + dhts[i] = setupDHT(ctx, t, false, options...) + peers[i] = dhts[i].PeerID() + addrs[i] = dhts[i].host.Addrs()[0] + + if _, lol := sanityAddrsMap[addrs[i].String()]; lol { + t.Fatal("While setting up DHTs address got duplicated.") + } else { + sanityAddrsMap[addrs[i].String()] = struct{}{} + } + if _, lol := sanityPeersMap[peers[i].String()]; lol { + t.Fatal("While setting up DHTs peerid got duplicated.") + } else { + sanityPeersMap[peers[i].String()] = struct{}{} + } + } + + return dhts +} + +func connectNoSync(t *testing.T, ctx context.Context, a, b *IpfsDHT) { + t.Helper() + + idB := b.self + addrB := b.peerstore.Addrs(idB) + if len(addrB) == 0 { + t.Fatal("peers setup incorrectly: no local address") + } + + if err := a.host.Connect(ctx, peer.AddrInfo{ID: idB, Addrs: addrB}); err != nil { + t.Fatal(err) + } +} + +func wait(t *testing.T, ctx context.Context, a, b *IpfsDHT) { + t.Helper() + + // loop until connection notification has been received. + // under high load, this may not happen as immediately as we would like. + for a.routingTable.Find(b.self) == "" { + select { + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case <-time.After(time.Millisecond * 5): + } + } +} + +func connect(t *testing.T, ctx context.Context, a, b *IpfsDHT) { + t.Helper() + connectNoSync(t, ctx, a, b) + wait(t, ctx, a, b) + wait(t, ctx, b, a) +} + +func bootstrap(t *testing.T, ctx context.Context, dhts []*IpfsDHT) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + logger.Debugf("refreshing DHTs routing tables...") + + // tried async. sequential fares much better. compare: + // 100 async https://gist.github.com/jbenet/56d12f0578d5f34810b2 + // 100 sync https://gist.github.com/jbenet/6c59e7c15426e48aaedd + // probably because results compound + + start := rand.Intn(len(dhts)) // randomize to decrease bias. + for i := range dhts { + dht := dhts[(start+i)%len(dhts)] + select { + case err := <-dht.RefreshRoutingTable(): + if err != nil { + t.Error(err) + } + case <-ctx.Done(): + return + } + } +} + +// Check to make sure we always signal the RefreshRoutingTable channel. +func TestRefreshMultiple(t *testing.T) { + // TODO: What's with this test? How long should it take and why does RefreshRoutingTable not take a context? + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Second) + defer cancel() + + dhts := setupDHTS(t, ctx, 5) + defer func() { + for _, dht := range dhts { + dht.Close() + defer dht.host.Close() + } + }() + + for _, dht := range dhts[1:] { + connect(t, ctx, dhts[0], dht) + } + + a := dhts[0].RefreshRoutingTable() + time.Sleep(time.Nanosecond) + b := dhts[0].RefreshRoutingTable() + time.Sleep(time.Nanosecond) + c := dhts[0].RefreshRoutingTable() + + // make sure that all of these eventually return + select { + case <-a: + case <-ctx.Done(): + t.Fatal("first channel didn't signal") + } + select { + case <-b: + case <-ctx.Done(): + t.Fatal("second channel didn't signal") + } + select { + case <-c: + case <-ctx.Done(): + t.Fatal("third channel didn't signal") + } +} + +func TestValueGetSet(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var dhts [5]*IpfsDHT + + for i := range dhts { + dhts[i] = setupDHT(ctx, t, false) + defer dhts[i].Close() + defer dhts[i].host.Close() + } + + connect(t, ctx, dhts[0], dhts[1]) + + t.Log("adding value on: ", dhts[0].self) + ctxT, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + err := dhts[0].PutValue(ctxT, "/v/hello", []byte("world")) + if err != nil { + t.Fatal(err) + } + + t.Log("requesting value on dhts: ", dhts[1].self) + ctxT, cancel = context.WithTimeout(ctx, time.Second*2*60) + defer cancel() + + val, err := dhts[1].GetValue(ctxT, "/v/hello") + if err != nil { + t.Fatal(err) + } + + if string(val) != "world" { + t.Fatalf("Expected 'world' got '%s'", string(val)) + } + + connect(t, ctx, dhts[2], dhts[0]) + connect(t, ctx, dhts[2], dhts[1]) + + t.Log("requesting value (offline) on dhts: ", dhts[2].self) + vala, err := dhts[2].GetValue(ctxT, "/v/hello", Quorum(0)) + if err != nil { + t.Fatal(err) + } + + if string(vala) != "world" { + t.Fatalf("Expected 'world' got '%s'", string(vala)) + } + t.Log("requesting value (online) on dhts: ", dhts[2].self) + val, err = dhts[2].GetValue(ctxT, "/v/hello") + if err != nil { + t.Fatal(err) + } + + if string(val) != "world" { + t.Fatalf("Expected 'world' got '%s'", string(val)) + } + + for _, d := range dhts[:3] { + connect(t, ctx, dhts[3], d) + } + connect(t, ctx, dhts[4], dhts[3]) + + t.Log("requesting value (requires peer routing) on dhts: ", dhts[4].self) + val, err = dhts[4].GetValue(ctxT, "/v/hello") + if err != nil { + t.Fatal(err) + } + + if string(val) != "world" { + t.Fatalf("Expected 'world' got '%s'", string(val)) + } +} + +func TestValueSetInvalid(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dhtA := setupDHT(ctx, t, false) + dhtB := setupDHT(ctx, t, false) + + defer dhtA.Close() + defer dhtB.Close() + defer dhtA.host.Close() + defer dhtB.host.Close() + + dhtA.Validator.(record.NamespacedValidator)["v"] = test.TestValidator{} + dhtB.Validator.(record.NamespacedValidator)["v"] = blankValidator{} + + connect(t, ctx, dhtA, dhtB) + + testSetGet := func(val string, failset bool, exp string, experr error) { + t.Helper() + + ctxT, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + err := dhtA.PutValue(ctxT, "/v/hello", []byte(val)) + if failset { + if err == nil { + t.Error("expected set to fail") + } + } else { + if err != nil { + t.Error(err) + } + } + + ctxT, cancel = context.WithTimeout(ctx, time.Second*2) + defer cancel() + valb, err := dhtB.GetValue(ctxT, "/v/hello") + if err != experr { + t.Errorf("Set/Get %v: Expected %v error but got %v", val, experr, err) + } else if err == nil && string(valb) != exp { + t.Errorf("Expected '%v' got '%s'", exp, string(valb)) + } + } + + // Expired records should not be set + testSetGet("expired", true, "", routing.ErrNotFound) + // Valid record should be returned + testSetGet("valid", false, "valid", nil) + // Newer record should supersede previous record + testSetGet("newer", false, "newer", nil) + // Attempt to set older record again should be ignored + testSetGet("valid", true, "newer", nil) +} + +func TestContextShutDown(t *testing.T) { + t.Skip("This test is flaky, see https://github.com/libp2p/go-libp2p-kad-dht/issues/724.") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dht := setupDHT(ctx, t, false) + + // context is alive + select { + case <-dht.Context().Done(): + t.Fatal("context should not be done") + default: + } + + // shut down dht + require.NoError(t, dht.Close()) + + // now context should be done + select { + case <-dht.Context().Done(): + default: + t.Fatal("context should be done") + } +} + +func TestSearchValue(t *testing.T) { + t.Skip("This test is flaky, see https://github.com/libp2p/go-libp2p-kad-dht/issues/723.") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dhtA := setupDHT(ctx, t, false) + dhtB := setupDHT(ctx, t, false) + + defer dhtA.Close() + defer dhtB.Close() + defer dhtA.host.Close() + defer dhtB.host.Close() + + connect(t, ctx, dhtA, dhtB) + + dhtA.Validator.(record.NamespacedValidator)["v"] = test.TestValidator{} + dhtB.Validator.(record.NamespacedValidator)["v"] = test.TestValidator{} + + ctxT, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + err := dhtA.PutValue(ctxT, "/v/hello", []byte("valid")) + if err != nil { + t.Error(err) + } + + ctxT, cancel = context.WithTimeout(ctx, time.Second*2) + defer cancel() + valCh, err := dhtA.SearchValue(ctxT, "/v/hello", Quorum(0)) + if err != nil { + t.Fatal(err) + } + + select { + case v := <-valCh: + if string(v) != "valid" { + t.Errorf("expected 'valid', got '%s'", string(v)) + } + case <-ctxT.Done(): + t.Fatal(ctxT.Err()) + } + + err = dhtB.PutValue(ctxT, "/v/hello", []byte("newer")) + if err != nil { + t.Error(err) + } + + select { + case v := <-valCh: + if string(v) != "newer" { + t.Errorf("expected 'newer', got '%s'", string(v)) + } + case <-ctxT.Done(): + t.Fatal(ctxT.Err()) + } +} + +func TestValueGetInvalid(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dhtA := setupDHT(ctx, t, false) + dhtB := setupDHT(ctx, t, false) + + defer dhtA.Close() + defer dhtB.Close() + defer dhtA.host.Close() + defer dhtB.host.Close() + + dhtA.Validator.(record.NamespacedValidator)["v"] = blankValidator{} + dhtB.Validator.(record.NamespacedValidator)["v"] = test.TestValidator{} + + connect(t, ctx, dhtA, dhtB) + + testSetGet := func(val string, exp string, experr error) { + t.Helper() + + ctxT, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + err := dhtA.PutValue(ctxT, "/v/hello", []byte(val)) + if err != nil { + t.Error(err) + } + + ctxT, cancel = context.WithTimeout(ctx, time.Second*2) + defer cancel() + valb, err := dhtB.GetValue(ctxT, "/v/hello") + if err != experr { + t.Errorf("Set/Get %v: Expected '%v' error but got '%v'", val, experr, err) + } else if err == nil && string(valb) != exp { + t.Errorf("Expected '%v' got '%s'", exp, string(valb)) + } + } + + // Expired records should not be returned + testSetGet("expired", "", routing.ErrNotFound) + // Valid record should be returned + testSetGet("valid", "valid", nil) + // Newer record should supersede previous record + testSetGet("newer", "newer", nil) + // Attempt to set older record again should be ignored + testSetGet("valid", "newer", nil) +} + +func TestProvides(t *testing.T) { + // t.Skip("skipping test to debug another") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dhts := setupDHTS(t, ctx, 4) + defer func() { + for i := 0; i < 4; i++ { + dhts[i].Close() + defer dhts[i].host.Close() + } + }() + + connect(t, ctx, dhts[0], dhts[1]) + connect(t, ctx, dhts[1], dhts[2]) + connect(t, ctx, dhts[1], dhts[3]) + + for _, k := range testCaseCids { + logger.Debugf("announcing provider for %s", k) + if err := dhts[3].Provide(ctx, k, true); err != nil { + t.Fatal(err) + } + } + + // what is this timeout for? was 60ms before. + time.Sleep(time.Millisecond * 6) + + n := 0 + for _, c := range testCaseCids { + n = (n + 1) % 3 + + logger.Debugf("getting providers for %s from %d", c, n) + ctxT, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + provchan := dhts[n].FindProvidersAsync(ctxT, c, 1) + + select { + case prov := <-provchan: + if prov.ID == "" { + t.Fatal("Got back nil provider") + } + if prov.ID != dhts[3].self { + t.Fatal("Got back wrong provider") + } + if len(prov.Addrs) == 0 { + t.Fatal("Got no addresses back") + } + case <-ctxT.Done(): + t.Fatal("Did not get a provider back.") + } + } +} + +type testMessageSender struct { + sendRequest func(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) + sendMessage func(ctx context.Context, p peer.ID, pmes *pb.Message) error +} + +var _ pb.MessageSender = (*testMessageSender)(nil) + +func (t testMessageSender) SendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { + return t.sendRequest(ctx, p, pmes) +} + +func (t testMessageSender) SendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error { + return t.sendMessage(ctx, p, pmes) +} + +func TestProvideAddressFilter(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dhts := setupDHTS(t, ctx, 2) + + connect(t, ctx, dhts[0], dhts[1]) + testMaddr, _ := ma.StringCast("/ip4/99.99.99.99/tcp/9999") + + done := make(chan struct{}) + impl := net.NewMessageSenderImpl(dhts[0].host, dhts[0].protocols) + tms := &testMessageSender{ + sendMessage: func(ctx context.Context, p peer.ID, pmes *pb.Message) error { + defer close(done) + assert.Equal(t, pmes.Type, pb.Message_ADD_PROVIDER) + assert.Len(t, pmes.ProviderPeers[0].Addrs, 1) + assert.True(t, pmes.ProviderPeers[0].Addresses()[0].Equal(testMaddr)) + return impl.SendMessage(ctx, p, pmes) + }, + sendRequest: func(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { + return impl.SendRequest(ctx, p, pmes) + }, + } + pm, err := pb.NewProtocolMessenger(tms) + require.NoError(t, err) + + dhts[0].protoMessenger = pm + dhts[0].addrFilter = func(multiaddrs []ma.Multiaddr) []ma.Multiaddr { + return []ma.Multiaddr{testMaddr} + } + + if err := dhts[0].Provide(ctx, testCaseCids[0], true); err != nil { + t.Fatal(err) + } + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("timeout") + } +} + +type testProviderManager struct { + addProvider func(ctx context.Context, key []byte, prov peer.AddrInfo) error + getProviders func(ctx context.Context, key []byte) ([]peer.AddrInfo, error) + close func() error +} + +var _ providers.ProviderStore = (*testProviderManager)(nil) + +func (t *testProviderManager) AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error { + return t.addProvider(ctx, key, prov) +} + +func (t *testProviderManager) GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error) { + return t.getProviders(ctx, key) +} + +func (t *testProviderManager) Close() error { + return t.close() +} + +func TestHandleAddProviderAddressFilter(t *testing.T) { + ctx := context.Background() + + d := setupDHT(ctx, t, false) + provider := setupDHT(ctx, t, false) + + testMaddr, _ := ma.StringCast("/ip4/99.99.99.99/tcp/9999") + + d.addrFilter = func(multiaddrs []ma.Multiaddr) []ma.Multiaddr { + return []ma.Multiaddr{testMaddr} + } + + done := make(chan struct{}) + d.providerStore = &testProviderManager{ + addProvider: func(ctx context.Context, key []byte, prov peer.AddrInfo) error { + defer close(done) + assert.True(t, prov.Addrs[0].Equal(testMaddr)) + return nil + }, + close: func() error { return nil }, + } + + m1, _ := ma.StringCast("/ip4/55.55.55.55/tcp/5555") + m2, _ := ma.StringCast("/ip4/66.66.66.66/tcp/6666") + pmes := &pb.Message{ + Type: pb.Message_ADD_PROVIDER, + Key: []byte("test-key"), + ProviderPeers: pb.RawPeerInfosToPBPeers([]peer.AddrInfo{{ + ID: provider.self, + Addrs: []ma.Multiaddr{ + m1, + m2, + }, + }}), + } + + _, err := d.handleAddProvider(ctx, provider.self, pmes) + require.NoError(t, err) + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("timeout") + } +} + +func TestLocalProvides(t *testing.T) { + // t.Skip("skipping test to debug another") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dhts := setupDHTS(t, ctx, 4) + defer func() { + for i := 0; i < 4; i++ { + dhts[i].Close() + defer dhts[i].host.Close() + } + }() + + connect(t, ctx, dhts[0], dhts[1]) + connect(t, ctx, dhts[1], dhts[2]) + connect(t, ctx, dhts[1], dhts[3]) + + for _, k := range testCaseCids { + logger.Debugf("announcing provider for %s", k) + if err := dhts[3].Provide(ctx, k, false); err != nil { + t.Fatal(err) + } + } + + time.Sleep(time.Millisecond * 10) + + for _, c := range testCaseCids { + for i := 0; i < 3; i++ { + provs, _ := dhts[i].ProviderStore().GetProviders(ctx, c.Hash()) + if len(provs) > 0 { + t.Fatal("shouldnt know this") + } + } + } +} + +func TestAddressFilterProvide(t *testing.T) { + // t.Skip("skipping test to debug another") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testMaddr, _ := ma.StringCast("/ip4/99.99.99.99/tcp/9999") + + d := setupDHT(ctx, t, false) + provider := setupDHT(ctx, t, false) + + d.addrFilter = func(maddrs []ma.Multiaddr) []ma.Multiaddr { + return []ma.Multiaddr{ + testMaddr, + } + } + + _, err := d.handleAddProvider(ctx, provider.self, &pb.Message{ + Type: pb.Message_ADD_PROVIDER, + Key: []byte("random-key"), + ProviderPeers: pb.PeerInfosToPBPeers(provider.host.Network(), []peer.AddrInfo{{ + ID: provider.self, + Addrs: provider.host.Addrs(), + }}), + }) + require.NoError(t, err) + + // because of the identify protocol we add all + // addresses to the peerstore, although the addresses + // will be filtered in the above handleAddProvider call + d.peerstore.AddAddrs(provider.self, provider.host.Addrs(), time.Hour) + + resp, err := d.handleGetProviders(ctx, d.self, &pb.Message{ + Type: pb.Message_GET_PROVIDERS, + Key: []byte("random-key"), + }) + require.NoError(t, err) + + assert.True(t, resp.ProviderPeers[0].Addresses()[0].Equal(testMaddr)) + assert.Len(t, resp.ProviderPeers[0].Addresses(), 1) +} + +// if minPeers or avgPeers is 0, dont test for it. +func waitForWellFormedTables(t *testing.T, dhts []*IpfsDHT, minPeers, avgPeers int, timeout time.Duration) { + // test "well-formed-ness" (>= minPeers peers in every routing table) + t.Helper() + + timeoutA := time.After(timeout) + for { + select { + case <-timeoutA: + t.Errorf("failed to reach well-formed routing tables after %s", timeout) + return + case <-time.After(5 * time.Millisecond): + if checkForWellFormedTablesOnce(t, dhts, minPeers, avgPeers) { + // succeeded + return + } + } + } +} + +func checkForWellFormedTablesOnce(t *testing.T, dhts []*IpfsDHT, minPeers, avgPeers int) bool { + t.Helper() + totalPeers := 0 + for _, dht := range dhts { + rtlen := dht.routingTable.Size() + totalPeers += rtlen + if minPeers > 0 && rtlen < minPeers { + // t.Logf("routing table for %s only has %d peers (should have >%d)", dht.self, rtlen, minPeers) + return false + } + } + actualAvgPeers := totalPeers / len(dhts) + t.Logf("avg rt size: %d", actualAvgPeers) + if avgPeers > 0 && actualAvgPeers < avgPeers { + t.Logf("avg rt size: %d < %d", actualAvgPeers, avgPeers) + return false + } + return true +} + +func printRoutingTables(dhts []*IpfsDHT) { + // the routing tables should be full now. let's inspect them. + fmt.Printf("checking routing table of %d\n", len(dhts)) + for _, dht := range dhts { + fmt.Printf("checking routing table of %s\n", dht.self) + dht.routingTable.Print() + fmt.Println("") + } +} + +func TestRefresh(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nDHTs := 30 + dhts := setupDHTS(t, ctx, nDHTs) + defer func() { + for i := 0; i < nDHTs; i++ { + dhts[i].Close() + defer dhts[i].host.Close() + } + }() + + t.Logf("connecting %d dhts in a ring", nDHTs) + for i := 0; i < nDHTs; i++ { + connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)]) + } + + <-time.After(100 * time.Millisecond) + // bootstrap a few times until we get good tables. + t.Logf("bootstrapping them so they find each other %d", nDHTs) + + for { + bootstrap(t, ctx, dhts) + + if checkForWellFormedTablesOnce(t, dhts, 7, 10) { + break + } + + time.Sleep(time.Microsecond * 50) + } + + if u.Debug { + // the routing tables should be full now. let's inspect them. + printRoutingTables(dhts) + } +} + +func TestRefreshBelowMinRTThreshold(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + host, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + host.Start() + + // enable auto bootstrap on A + dhtA, err := New( + ctx, + host, + testPrefix, + Mode(ModeServer), + NamespacedValidator("v", blankValidator{}), + ) + if err != nil { + t.Fatal(err) + } + + dhtB := setupDHT(ctx, t, false) + dhtC := setupDHT(ctx, t, false) + + defer func() { + dhtA.Close() + dhtA.host.Close() + + dhtB.Close() + dhtB.host.Close() + + dhtC.Close() + dhtC.host.Close() + }() + + connect(t, ctx, dhtA, dhtB) + connect(t, ctx, dhtB, dhtC) + + // we ONLY init bootstrap on A + dhtA.RefreshRoutingTable() + // and wait for one round to complete i.e. A should be connected to both B & C + waitForWellFormedTables(t, []*IpfsDHT{dhtA}, 2, 2, 20*time.Second) + + // now we create two new peers + dhtD := setupDHT(ctx, t, false) + dhtE := setupDHT(ctx, t, false) + + // connect them to each other + connect(t, ctx, dhtD, dhtE) + defer func() { + dhtD.Close() + dhtD.host.Close() + + dhtE.Close() + dhtE.host.Close() + }() + + // and then, on connecting the peer D to A, the min RT threshold gets triggered on A which leads to a bootstrap. + // since the default bootstrap scan interval is 30 mins - 1 hour, we can be sure that if bootstrap happens, + // it is because of the min RT threshold getting triggered (since default min value is 4 & we only have 2 peers in the RT when D gets connected) + connect(t, ctx, dhtA, dhtD) + + // and because of the above bootstrap, A also discovers E ! + waitForWellFormedTables(t, []*IpfsDHT{dhtA}, 4, 4, 10*time.Second) + time.Sleep(100 * time.Millisecond) + assert.Equal(t, dhtE.self, dhtA.routingTable.Find(dhtE.self), "A's routing table should have peer E!") +} + +func TestQueryWithEmptyRTShouldNotPanic(t *testing.T) { + ctx := context.Background() + d := setupDHT(ctx, t, false) + + // TODO This swallows the error for now, should we change it ? + // FindProviders + ps, _ := d.FindProviders(ctx, testCaseCids[0]) + require.Empty(t, ps) + + // GetClosestPeers + pc, err := d.GetClosestPeers(ctx, "key") + require.Nil(t, pc) + require.Equal(t, kb.ErrLookupFailure, err) + + // GetValue + best, err := d.GetValue(ctx, "key") + require.Empty(t, best) + require.Error(t, err) + + // SearchValue + bchan, err := d.SearchValue(ctx, "key") + require.Empty(t, bchan) + require.NoError(t, err) + + // Provide + err = d.Provide(ctx, testCaseCids[0], true) + require.Equal(t, kb.ErrLookupFailure, err) +} + +func TestPeriodicRefresh(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + if runtime.GOOS == "windows" { + t.Skip("skipping due to #760") + } + if detectrace.WithRace() { + t.Skip("skipping due to race detector max goroutines") + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nDHTs := 30 + dhts := setupDHTS(t, ctx, nDHTs) + defer func() { + for i := 0; i < nDHTs; i++ { + dhts[i].Close() + defer dhts[i].host.Close() + } + }() + + t.Logf("dhts are not connected. %d", nDHTs) + for _, dht := range dhts { + rtlen := dht.routingTable.Size() + if rtlen > 0 { + t.Errorf("routing table for %s should have 0 peers. has %d", dht.self, rtlen) + } + } + + for i := 0; i < nDHTs; i++ { + connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)]) + } + + t.Logf("DHTs are now connected to 1-2 others. %d", nDHTs) + for _, dht := range dhts { + rtlen := dht.routingTable.Size() + if rtlen > 2 { + t.Errorf("routing table for %s should have at most 2 peers. has %d", dht.self, rtlen) + } + } + + if u.Debug { + printRoutingTables(dhts) + } + + t.Logf("bootstrapping them so they find each other. %d", nDHTs) + var wg sync.WaitGroup + for _, dht := range dhts { + wg.Add(1) + go func(d *IpfsDHT) { + <-d.RefreshRoutingTable() + wg.Done() + }(dht) + } + + wg.Wait() + // this is async, and we dont know when it's finished with one cycle, so keep checking + // until the routing tables look better, or some long timeout for the failure case. + waitForWellFormedTables(t, dhts, 7, 10, 20*time.Second) + + if u.Debug { + printRoutingTables(dhts) + } +} + +func TestProvidesMany(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping due to #760") + } + if detectrace.WithRace() { + t.Skip("skipping due to race detector max goroutines") + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nDHTs := 40 + dhts := setupDHTS(t, ctx, nDHTs) + defer func() { + for i := 0; i < nDHTs; i++ { + dhts[i].Close() + defer dhts[i].host.Close() + } + }() + + t.Logf("connecting %d dhts in a ring", nDHTs) + for i := 0; i < nDHTs; i++ { + connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)]) + } + + <-time.After(100 * time.Millisecond) + t.Logf("bootstrapping them so they find each other. %d", nDHTs) + ctxT, cancel := context.WithTimeout(ctx, 20*time.Second) + defer cancel() + bootstrap(t, ctxT, dhts) + + if u.Debug { + // the routing tables should be full now. let's inspect them. + t.Logf("checking routing table of %d", nDHTs) + for _, dht := range dhts { + fmt.Printf("checking routing table of %s\n", dht.self) + dht.routingTable.Print() + fmt.Println("") + } + } + + providers := make(map[cid.Cid]peer.ID) + + d := 0 + for _, c := range testCaseCids { + d = (d + 1) % len(dhts) + dht := dhts[d] + providers[c] = dht.self + + t.Logf("announcing provider for %s", c) + if err := dht.Provide(ctx, c, true); err != nil { + t.Fatal(err) + } + } + + // what is this timeout for? was 60ms before. + time.Sleep(time.Millisecond * 6) + + errchan := make(chan error) + + ctxT, cancel = context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + var wg sync.WaitGroup + getProvider := func(dht *IpfsDHT, k cid.Cid) { + defer wg.Done() + + expected := providers[k] + + provchan := dht.FindProvidersAsync(ctxT, k, 1) + select { + case prov := <-provchan: + actual := prov.ID + if actual == "" { + errchan <- fmt.Errorf("Got back nil provider (%s at %s)", k, dht.self) + } else if actual != expected { + errchan <- fmt.Errorf("Got back wrong provider (%s != %s) (%s at %s)", + expected, actual, k, dht.self) + } + case <-ctxT.Done(): + errchan <- fmt.Errorf("Did not get a provider back (%s at %s)", k, dht.self) + } + } + + for _, c := range testCaseCids { + // everyone should be able to find it... + for _, dht := range dhts { + logger.Debugf("getting providers for %s at %s", c, dht.self) + wg.Add(1) + go getProvider(dht, c) + } + } + + // we need this because of printing errors + go func() { + wg.Wait() + close(errchan) + }() + + for err := range errchan { + t.Error(err) + } +} + +func TestProvidesAsync(t *testing.T) { + // t.Skip("skipping test to debug another") + if testing.Short() { + t.SkipNow() + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dhts := setupDHTS(t, ctx, 4) + defer func() { + for i := 0; i < 4; i++ { + dhts[i].Close() + defer dhts[i].host.Close() + } + }() + + connect(t, ctx, dhts[0], dhts[1]) + connect(t, ctx, dhts[1], dhts[2]) + connect(t, ctx, dhts[1], dhts[3]) + + err := dhts[3].Provide(ctx, testCaseCids[0], true) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 60) + + ctxT, cancel := context.WithTimeout(ctx, time.Millisecond*300) + defer cancel() + provs := dhts[0].FindProvidersAsync(ctxT, testCaseCids[0], 5) + select { + case p, ok := <-provs: + if !ok { + t.Fatal("Provider channel was closed...") + } + if p.ID == "" { + t.Fatal("Got back nil provider!") + } + if p.ID != dhts[3].self { + t.Fatalf("got a provider, but not the right one. %s", p) + } + case <-ctxT.Done(): + t.Fatal("Didnt get back providers") + } +} + +func TestLayeredGet(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dhts := setupDHTS(t, ctx, 4) + defer func() { + for i := 0; i < 4; i++ { + dhts[i].Close() + defer dhts[i].host.Close() + } + }() + + connect(t, ctx, dhts[0], dhts[1]) + connect(t, ctx, dhts[1], dhts[2]) + connect(t, ctx, dhts[2], dhts[3]) + + err := dhts[3].PutValue(ctx, "/v/hello", []byte("world")) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 6) + + ctxT, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + val, err := dhts[0].GetValue(ctxT, "/v/hello") + if err != nil { + t.Fatal(err) + } + + if string(val) != "world" { + t.Error("got wrong value") + } +} + +func TestUnfindablePeer(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dhts := setupDHTS(t, ctx, 4) + defer func() { + for i := 0; i < 4; i++ { + dhts[i].Close() + dhts[i].Host().Close() + } + }() + + connect(t, ctx, dhts[0], dhts[1]) + connect(t, ctx, dhts[1], dhts[2]) + connect(t, ctx, dhts[2], dhts[3]) + + // Give DHT 1 a bad addr for DHT 2. + dhts[1].host.Peerstore().ClearAddrs(dhts[2].PeerID()) + dhts[1].host.Peerstore().AddAddr(dhts[2].PeerID(), dhts[0].Host().Addrs()[0], time.Minute) + + ctxT, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + _, err := dhts[0].FindPeer(ctxT, dhts[3].PeerID()) + if err == nil { + t.Error("should have failed to find peer") + } + if ctxT.Err() != nil { + t.Error("FindPeer should have failed before context expired") + } +} + +func TestFindPeer(t *testing.T) { + // t.Skip("skipping test to debug another") + if testing.Short() { + t.SkipNow() + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dhts := setupDHTS(t, ctx, 4) + defer func() { + for i := 0; i < 4; i++ { + dhts[i].Close() + dhts[i].host.Close() + } + }() + + connect(t, ctx, dhts[0], dhts[1]) + connect(t, ctx, dhts[1], dhts[2]) + connect(t, ctx, dhts[1], dhts[3]) + + ctxT, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + p, err := dhts[0].FindPeer(ctxT, dhts[2].PeerID()) + if err != nil { + t.Fatal(err) + } + + if p.ID == "" { + t.Fatal("Failed to find peer.") + } + + if p.ID != dhts[2].PeerID() { + t.Fatal("Didnt find expected peer.") + } +} + +func TestFindPeerWithQueryFilter(t *testing.T) { + // t.Skip("skipping test to debug another") + if testing.Short() { + t.SkipNow() + } + if runtime.GOOS == "windows" { + t.Skip("skipping due to #760") + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + filteredPeer, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + filteredPeer.Start() + defer filteredPeer.Close() + dhts := setupDHTS(t, ctx, 4, QueryFilter(func(_ interface{}, ai peer.AddrInfo) bool { + return ai.ID != filteredPeer.ID() + })) + defer func() { + for i := 0; i < 4; i++ { + dhts[i].Close() + dhts[i].host.Close() + } + }() + + connect(t, ctx, dhts[0], dhts[1]) + connect(t, ctx, dhts[1], dhts[2]) + connect(t, ctx, dhts[1], dhts[3]) + + err = filteredPeer.Connect(ctx, peer.AddrInfo{ + ID: dhts[2].host.ID(), + Addrs: dhts[2].host.Addrs(), + }) + require.NoError(t, err) + + require.Eventually(t, func() bool { + return len(dhts[2].host.Network().ConnsToPeer(filteredPeer.ID())) > 0 + }, 5*time.Millisecond, time.Millisecond, "failed to connect to peer") + + ctxT, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + p, err := dhts[0].FindPeer(ctxT, filteredPeer.ID()) + require.NoError(t, err) + + require.NotEmpty(t, p.ID, "Failed to find peer.") + require.Equal(t, filteredPeer.ID(), p.ID, "Didnt find expected peer.") +} + +func TestConnectCollision(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + runTimes := 10 + + for rtime := 0; rtime < runTimes; rtime++ { + logger.Info("Running Time: ", rtime) + + ctx, cancel := context.WithCancel(context.Background()) + + dhtA := setupDHT(ctx, t, false) + dhtB := setupDHT(ctx, t, false) + + addrA := dhtA.peerstore.Addrs(dhtA.self)[0] + addrB := dhtB.peerstore.Addrs(dhtB.self)[0] + + peerA := dhtA.self + peerB := dhtB.self + + errs := make(chan error) + go func() { + dhtA.peerstore.AddAddr(peerB, addrB, peerstore.TempAddrTTL) + pi := peer.AddrInfo{ID: peerB} + err := dhtA.host.Connect(ctx, pi) + errs <- err + }() + go func() { + dhtB.peerstore.AddAddr(peerA, addrA, peerstore.TempAddrTTL) + pi := peer.AddrInfo{ID: peerA} + err := dhtB.host.Connect(ctx, pi) + errs <- err + }() + + timeout := time.After(5 * time.Second) + select { + case e := <-errs: + if e != nil { + t.Fatal(e) + } + case <-timeout: + t.Fatal("Timeout received!") + } + select { + case e := <-errs: + if e != nil { + t.Fatal(e) + } + case <-timeout: + t.Fatal("Timeout received!") + } + + dhtA.Close() + dhtB.Close() + dhtA.host.Close() + dhtB.host.Close() + cancel() + } +} + +func TestBadProtoMessages(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + d := setupDHT(ctx, t, false) + + nilrec := new(pb.Message) + if _, err := d.handlePutValue(ctx, "testpeer", nilrec); err == nil { + t.Fatal("should have errored on nil record") + } +} + +func TestAtomicPut(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + d := setupDHT(ctx, t, false) + d.Validator = testAtomicPutValidator{} + + // fnc to put a record + key := "testkey" + putRecord := func(value []byte) error { + rec := record.MakePutRecord(key, value) + pmes := pb.NewMessage(pb.Message_PUT_VALUE, rec.Key, 0) + pmes.Record = rec + _, err := d.handlePutValue(ctx, "testpeer", pmes) + return err + } + + // put a valid record + if err := putRecord([]byte("valid")); err != nil { + t.Fatal("should not have errored on a valid record") + } + + // simultaneous puts for old & new values + values := [][]byte{[]byte("newer1"), []byte("newer7"), []byte("newer3"), []byte("newer5")} + var wg sync.WaitGroup + for _, v := range values { + wg.Add(1) + go func(v []byte) { + defer wg.Done() + _ = putRecord(v) // we expect some of these to fail + }(v) + } + wg.Wait() + + // get should return the newest value + pmes := pb.NewMessage(pb.Message_GET_VALUE, []byte(key), 0) + msg, err := d.handleGetValue(ctx, "testkey", pmes) + if err != nil { + t.Fatalf("should not have errored on final get, but got %+v", err) + } + if string(msg.GetRecord().Value) != "newer7" { + t.Fatalf("Expected 'newer7' got '%s'", string(msg.GetRecord().Value)) + } +} + +func TestClientModeConnect(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + a := setupDHT(ctx, t, false) + b := setupDHT(ctx, t, true) + + connectNoSync(t, ctx, a, b) + + c := testCaseCids[0] + p := peer.ID("TestPeer") + a.ProviderStore().AddProvider(ctx, c.Hash(), peer.AddrInfo{ID: p}) + time.Sleep(time.Millisecond * 5) // just in case... + + provs, err := b.FindProviders(ctx, c) + if err != nil { + t.Fatal(err) + } + + if len(provs) == 0 { + t.Fatal("Expected to get a provider back") + } + + if provs[0].ID != p { + t.Fatal("expected it to be our test peer") + } + if a.routingTable.Find(b.self) != "" { + t.Fatal("DHT clients should not be added to routing tables") + } + if b.routingTable.Find(a.self) == "" { + t.Fatal("DHT server should have been added to the dht client's routing table") + } +} + +func TestInvalidServer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s0 := setupDHT(ctx, t, false, BucketSize(2)) // server + s1 := setupDHT(ctx, t, false, BucketSize(2)) // server + m0 := setupDHT(ctx, t, false, BucketSize(2)) // misbehabing server + m1 := setupDHT(ctx, t, false, BucketSize(2)) // misbehabing server + + // make m0 and m1 advertise all dht server protocols, but hang on all requests + for _, proto := range s0.serverProtocols { + for _, m := range []*IpfsDHT{m0, m1} { + // Hang on every request. + m.host.SetStreamHandler(proto, func(s network.Stream) { + r := msgio.NewVarintReaderSize(s, network.MessageSizeMax) + msgbytes, err := r.ReadMsg() + if err != nil { + t.Fatal(err) + } + var req pb.Message + err = req.Unmarshal(msgbytes) + if err != nil { + t.Fatal(err) + } + + // answer with an empty response message + resp := pb.NewMessage(req.GetType(), nil, req.GetClusterLevel()) + + // send out response msg + err = net.WriteMsg(s, resp) + if err != nil { + t.Fatal(err) + } + }) + } + } + + // connect s0 and m0 + connectNoSync(t, ctx, s0, m0) + + // add a provider (p) for a key (k) to s0 + k := testCaseCids[0] + p := peer.ID("TestPeer") + s0.ProviderStore().AddProvider(ctx, k.Hash(), peer.AddrInfo{ID: p}) + time.Sleep(time.Millisecond * 5) // just in case... + + // find the provider for k from m0 + provs, err := m0.FindProviders(ctx, k) + if err != nil { + t.Fatal(err) + } + if len(provs) == 0 { + t.Fatal("Expected to get a provider back") + } + if provs[0].ID != p { + t.Fatal("expected it to be our test peer") + } + + // verify that m0 and s0 contain each other in their routing tables + if s0.routingTable.Find(m0.self) == "" { + // m0 is added to s0 routing table even though it is misbehaving, because + // s0's routing table is not well populated, so s0 isn't picky about who it adds. + t.Fatal("Misbehaving DHT servers should be added to routing table if not well populated") + } + if m0.routingTable.Find(s0.self) == "" { + t.Fatal("DHT server should have been added to the misbehaving server routing table") + } + + // connect s0 to both s1 and m1 + connectNoSync(t, ctx, s0, s1) + connectNoSync(t, ctx, s0, m1) + + // s1 should be added to s0's routing table. Then, because s0's routing table + // contains more than bucketSize (2) entries, lookupCheck is enabled and m1 + // shouldn't be added, because it fails the lookupCheck (hang on all requests). + if s0.routingTable.Find(s1.self) == "" { + t.Fatal("Well behaving DHT server should have been added to the server routing table") + } + if s0.routingTable.Find(m1.self) != "" { + t.Fatal("Misbehaving DHT servers should not be added to routing table if well populated") + } +} + +func TestClientModeFindPeer(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + a := setupDHT(ctx, t, false) + b := setupDHT(ctx, t, true) + c := setupDHT(ctx, t, true) + + connectNoSync(t, ctx, b, a) + connectNoSync(t, ctx, c, a) + + // Can't use `connect` because b and c are only clients. + wait(t, ctx, b, a) + wait(t, ctx, c, a) + + pi, err := c.FindPeer(ctx, b.self) + if err != nil { + t.Fatal(err) + } + if len(pi.Addrs) == 0 { + t.Fatal("should have found addresses for node b") + } + + err = c.host.Connect(ctx, pi) + if err != nil { + t.Fatal(err) + } +} + +func minInt(a, b int) int { + if a < b { + return a + } + return b +} + +func TestFindPeerQueryMinimal(t *testing.T) { + testFindPeerQuery(t, 2, 22, 1) +} + +func TestFindPeerQuery(t *testing.T) { + if detectrace.WithRace() { + t.Skip("skipping due to race detector max goroutines") + } + + if testing.Short() { + t.Skip("skipping test in short mode") + } + testFindPeerQuery(t, 5, 40, 3) +} + +// NOTE: You must have ATLEAST (minRTRefreshThreshold+1) test peers before using this. +func testFindPeerQuery(t *testing.T, + bootstrappers, // Number of nodes connected to the querying node + leafs, // Number of nodes that might be connected to from the bootstrappers + bootstrapConns int, // Number of bootstrappers each leaf should connect to. +) { + if runtime.GOOS == "windows" { + t.Skip("skipping due to #760") + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dhts := setupDHTS(t, ctx, 1+bootstrappers+leafs, BucketSize(4)) + defer func() { + for _, d := range dhts { + d.Close() + d.host.Close() + } + }() + + t.Log("connecting") + + mrand := rand.New(rand.NewSource(42)) + guy := dhts[0] + others := dhts[1:] + for i := 0; i < leafs; i++ { + for _, v := range mrand.Perm(bootstrappers)[:bootstrapConns] { + connectNoSync(t, ctx, others[v], others[bootstrappers+i]) + } + } + + for i := 0; i < bootstrappers; i++ { + connectNoSync(t, ctx, guy, others[i]) + } + + t.Log("waiting for routing tables") + + // give some time for things to settle down + waitForWellFormedTables(t, dhts, bootstrapConns, bootstrapConns, 5*time.Second) + + t.Log("refreshing") + + var wg sync.WaitGroup + for _, dht := range dhts { + wg.Add(1) + go func(d *IpfsDHT) { + <-d.RefreshRoutingTable() + wg.Done() + }(dht) + } + + wg.Wait() + + t.Log("waiting for routing tables again") + + // Wait for refresh to work. At least one bucket should be full. + waitForWellFormedTables(t, dhts, 4, 0, 5*time.Second) + + var peers []peer.ID + for _, d := range others { + peers = append(peers, d.PeerID()) + } + + t.Log("querying") + + val := "foobar" + rtval := kb.ConvertKey(val) + + outpeers, err := guy.GetClosestPeers(ctx, val) + require.NoError(t, err) + + sort.Sort(peer.IDSlice(outpeers)) + + exp := kb.SortClosestPeers(peers, rtval)[:minInt(guy.bucketSize, len(peers))] + t.Logf("got %d peers", len(outpeers)) + got := kb.SortClosestPeers(outpeers, rtval) + + assert.EqualValues(t, exp, got) +} + +func TestFindClosestPeers(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nDHTs := 30 + dhts := setupDHTS(t, ctx, nDHTs) + defer func() { + for i := 0; i < nDHTs; i++ { + dhts[i].Close() + defer dhts[i].host.Close() + } + }() + + t.Logf("connecting %d dhts in a ring", nDHTs) + for i := 0; i < nDHTs; i++ { + connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)]) + } + + querier := dhts[1] + peers, err := querier.GetClosestPeers(ctx, "foo") + if err != nil { + t.Fatal(err) + } + + if len(peers) < querier.beta { + t.Fatalf("got wrong number of peers (got %d, expected at least %d)", len(peers), querier.beta) + } +} + +func TestFixLowPeers(t *testing.T) { + ctx := context.Background() + + dhts := setupDHTS(t, ctx, minRTRefreshThreshold+5) + + defer func() { + for _, d := range dhts { + d.Close() + d.Host().Close() + } + }() + + mainD := dhts[0] + + // connect it to everyone else + for _, d := range dhts[1:] { + mainD.peerstore.AddAddrs(d.self, d.peerstore.Addrs(d.self), peerstore.TempAddrTTL) + require.NoError(t, mainD.Host().Connect(ctx, peer.AddrInfo{ID: d.self})) + } + + waitForWellFormedTables(t, []*IpfsDHT{mainD}, minRTRefreshThreshold, minRTRefreshThreshold+4, 5*time.Second) + + // run a refresh on all of them + for _, d := range dhts { + err := <-d.RefreshRoutingTable() + require.NoError(t, err) + } + + // now remove peers from RT so threshold gets hit + for _, d := range dhts[3:] { + mainD.routingTable.RemovePeer(d.self) + } + + // but we will still get enough peers in the RT because of fix low Peers + waitForWellFormedTables(t, []*IpfsDHT{mainD}, minRTRefreshThreshold, minRTRefreshThreshold, 5*time.Second) +} + +func TestProvideDisabled(t *testing.T) { + k := testCaseCids[0] + kHash := k.Hash() + for i := 0; i < 3; i++ { + enabledA := (i & 0x1) > 0 + enabledB := (i & 0x2) > 0 + t.Run(fmt.Sprintf("a=%v/b=%v", enabledA, enabledB), func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var optsA, optsB []Option + optsA = append(optsA, ProtocolPrefix("/provMaybeDisabled")) + optsB = append(optsB, ProtocolPrefix("/provMaybeDisabled")) + + if !enabledA { + optsA = append(optsA, DisableProviders()) + } + if !enabledB { + optsB = append(optsB, DisableProviders()) + } + + dhtA := setupDHT(ctx, t, false, optsA...) + dhtB := setupDHT(ctx, t, false, optsB...) + + defer dhtA.Close() + defer dhtB.Close() + defer dhtA.host.Close() + defer dhtB.host.Close() + + connect(t, ctx, dhtA, dhtB) + + err := dhtB.Provide(ctx, k, true) + if enabledB { + if err != nil { + t.Fatal("put should have succeeded on node B", err) + } + } else { + if err != routing.ErrNotSupported { + t.Fatal("should not have put the value to node B", err) + } + _, err = dhtB.FindProviders(ctx, k) + if err != routing.ErrNotSupported { + t.Fatal("get should have failed on node B") + } + provs, _ := dhtB.ProviderStore().GetProviders(ctx, kHash) + if len(provs) != 0 { + t.Fatal("node B should not have found local providers") + } + } + + provs, err := dhtA.FindProviders(ctx, k) + if enabledA { + if len(provs) != 0 { + t.Fatal("node A should not have found providers") + } + } else { + if err != routing.ErrNotSupported { + t.Fatal("node A should not have found providers") + } + } + provAddrs, _ := dhtA.ProviderStore().GetProviders(ctx, kHash) + if len(provAddrs) != 0 { + t.Fatal("node A should not have found local providers") + } + }) + } +} + +func TestHandleRemotePeerProtocolChanges(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + os := []Option{ + testPrefix, + Mode(ModeServer), + NamespacedValidator("v", blankValidator{}), + DisableAutoRefresh(), + } + + // start host 1 that speaks dht v1 + hA, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + hA.Start() + defer hA.Close() + dhtA, err := New(ctx, hA, os...) + require.NoError(t, err) + defer dhtA.Close() + + // start host 2 that also speaks dht v1 + hB, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + hB.Start() + defer hB.Close() + dhtB, err := New(ctx, hB, os...) + require.NoError(t, err) + defer dhtB.Close() + + connect(t, ctx, dhtA, dhtB) + + // now assert both have each other in their RT + waitForWellFormedTables(t, []*IpfsDHT{dhtA, dhtB}, 1, 1, 10*time.Second) + + // dhtB becomes a client + require.NoError(t, dhtB.setMode(modeClient)) + + // which means that dhtA should evict it from it's RT + waitForWellFormedTables(t, []*IpfsDHT{dhtA}, 0, 0, 10*time.Second) + + // dhtB becomes a server + require.NoError(t, dhtB.setMode(modeServer)) + + // which means dhtA should have it in the RT again because of fixLowPeers + waitForWellFormedTables(t, []*IpfsDHT{dhtA}, 1, 1, 10*time.Second) +} + +func TestGetSetPluggedProtocol(t *testing.T) { + t.Run("PutValue/GetValue - same protocol", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + os := []Option{ + ProtocolPrefix("/esh"), + Mode(ModeServer), + NamespacedValidator("v", blankValidator{}), + DisableAutoRefresh(), + } + + hA, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + hA.Start() + defer hA.Close() + dhtA, err := New(ctx, hA, os...) + require.NoError(t, err) + defer dhtA.Close() + + hB, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + hB.Start() + defer hB.Close() + dhtB, err := New(ctx, hB, os...) + require.NoError(t, err) + defer dhtB.Close() + + connect(t, ctx, dhtA, dhtB) + + ctxT, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + err = dhtA.PutValue(ctxT, "/v/cat", []byte("meow")) + require.NoError(t, err) + + value, err := dhtB.GetValue(ctxT, "/v/cat") + require.NoError(t, err) + + require.Equal(t, "meow", string(value)) + }) + + t.Run("DHT routing table for peer A won't contain B if A and B don't use same protocol", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + hA, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + hA.Start() + defer hA.Close() + dhtA, err := New(ctx, hA, []Option{ + ProtocolPrefix("/esh"), + Mode(ModeServer), + NamespacedValidator("v", blankValidator{}), + DisableAutoRefresh(), + }...) + require.NoError(t, err) + defer dhtA.Close() + + hB, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + hB.Start() + defer hB.Close() + dhtB, err := New(ctx, hB, []Option{ + ProtocolPrefix("/lsr"), + Mode(ModeServer), + NamespacedValidator("v", blankValidator{}), + DisableAutoRefresh(), + }...) + require.NoError(t, err) + defer dhtB.Close() + + connectNoSync(t, ctx, dhtA, dhtB) + + // We don't expect connection notifications for A to reach B (or vice-versa), given + // that they've been configured with different protocols - but we'll give them a + // chance, anyhow. + time.Sleep(time.Second * 2) + + err = dhtA.PutValue(ctx, "/v/cat", []byte("meow")) + if err == nil || !strings.Contains(err.Error(), "failed to find any peer in table") { + t.Fatalf("put should not have been able to find any peers in routing table, err:'%v'", err) + } + + v, err := dhtB.GetValue(ctx, "/v/cat") + if v != nil || err != routing.ErrNotFound { + t.Fatalf("get should have failed from not being able to find the value, err: '%v'", err) + } + }) +} + +func TestPing(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ds := setupDHTS(t, ctx, 2) + ds[0].Host().Peerstore().AddAddrs(ds[1].PeerID(), ds[1].Host().Addrs(), peerstore.AddressTTL) + assert.NoError(t, ds[0].Ping(context.Background(), ds[1].PeerID())) +} + +func TestClientModeAtInit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + pinger := setupDHT(ctx, t, false) + client := setupDHT(ctx, t, true) + pinger.Host().Peerstore().AddAddrs(client.PeerID(), client.Host().Addrs(), peerstore.AddressTTL) + err := pinger.Ping(context.Background(), client.PeerID()) + assert.True(t, errors.Is(err, multistream.ErrNotSupported[protocol.ID]{})) +} + +func TestModeChange(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + clientOnly := setupDHT(ctx, t, true) + clientToServer := setupDHT(ctx, t, true) + clientOnly.Host().Peerstore().AddAddrs(clientToServer.PeerID(), clientToServer.Host().Addrs(), peerstore.AddressTTL) + err := clientOnly.Ping(ctx, clientToServer.PeerID()) + assert.True(t, errors.Is(err, multistream.ErrNotSupported[protocol.ID]{})) + err = clientToServer.setMode(modeServer) + assert.Nil(t, err) + err = clientOnly.Ping(ctx, clientToServer.PeerID()) + assert.Nil(t, err) + err = clientToServer.setMode(modeClient) + assert.Nil(t, err) + err = clientOnly.Ping(ctx, clientToServer.PeerID()) + assert.NotNil(t, err) +} + +func TestDynamicModeSwitching(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + prober := setupDHT(ctx, t, true) // our test harness + node := setupDHT(ctx, t, true, Mode(ModeAuto)) // the node under test + prober.Host().Peerstore().AddAddrs(node.PeerID(), node.Host().Addrs(), peerstore.AddressTTL) + if _, err := prober.Host().Network().DialPeer(ctx, node.PeerID()); err != nil { + t.Fatal(err) + } + + emitter, err := node.host.EventBus().Emitter(new(event.EvtLocalReachabilityChanged)) + if err != nil { + t.Fatal(err) + } + + assertDHTClient := func() { + err = prober.Ping(ctx, node.PeerID()) + assert.True(t, errors.Is(err, multistream.ErrNotSupported[protocol.ID]{})) + if l := len(prober.RoutingTable().ListPeers()); l != 0 { + t.Errorf("expected routing table length to be 0; instead is %d", l) + } + } + + assertDHTServer := func() { + err = prober.Ping(ctx, node.PeerID()) + assert.Nil(t, err) + // the node should be in the RT for the prober + // because the prober will call fixLowPeers when the node updates it's protocols + if l := len(prober.RoutingTable().ListPeers()); l != 1 { + t.Errorf("expected routing table length to be 1; instead is %d", l) + } + } + + err = emitter.Emit(event.EvtLocalReachabilityChanged{Reachability: network.ReachabilityPrivate}) + if err != nil { + t.Fatal(err) + } + time.Sleep(500 * time.Millisecond) + + assertDHTClient() + + err = emitter.Emit(event.EvtLocalReachabilityChanged{Reachability: network.ReachabilityPublic}) + if err != nil { + t.Fatal(err) + } + time.Sleep(500 * time.Millisecond) + + assertDHTServer() + + err = emitter.Emit(event.EvtLocalReachabilityChanged{Reachability: network.ReachabilityUnknown}) + if err != nil { + t.Fatal(err) + } + time.Sleep(500 * time.Millisecond) + + assertDHTClient() +} + +func TestInvalidKeys(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nDHTs := 2 + dhts := setupDHTS(t, ctx, nDHTs) + defer func() { + for i := 0; i < nDHTs; i++ { + dhts[i].Close() + defer dhts[i].host.Close() + } + }() + + t.Logf("connecting %d dhts in a ring", nDHTs) + for i := 0; i < nDHTs; i++ { + connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)]) + } + + querier := dhts[0] + _, err := querier.GetClosestPeers(ctx, "") + if err == nil { + t.Fatal("get closest peers should have failed") + } + + _, err = querier.FindProviders(ctx, cid.Cid{}) + switch err { + case routing.ErrNotFound, routing.ErrNotSupported, kb.ErrLookupFailure: + t.Fatal("failed with the wrong error: ", err) + case nil: + t.Fatal("find providers should have failed") + } + + _, err = querier.FindPeer(ctx, peer.ID("")) + if err != peer.ErrEmptyPeerID { + t.Fatal("expected to fail due to the empty peer ID") + } + + _, err = querier.GetValue(ctx, "") + if err == nil { + t.Fatal("expected to have failed") + } + + err = querier.PutValue(ctx, "", []byte("foobar")) + if err == nil { + t.Fatal("expected to have failed") + } +} + +func TestV1ProtocolOverride(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + d1 := setupDHT(ctx, t, false, V1ProtocolOverride("/myproto")) + d2 := setupDHT(ctx, t, false, V1ProtocolOverride("/myproto")) + d3 := setupDHT(ctx, t, false, V1ProtocolOverride("/myproto2")) + d4 := setupDHT(ctx, t, false) + + dhts := []*IpfsDHT{d1, d2, d3, d4} + + for i, dout := range dhts { + for _, din := range dhts[i+1:] { + connectNoSync(t, ctx, dout, din) + } + } + + wait(t, ctx, d1, d2) + wait(t, ctx, d2, d1) + + time.Sleep(time.Second) + + if d1.RoutingTable().Size() != 1 || d2.routingTable.Size() != 1 { + t.Fatal("should have one peer in the routing table") + } + + if d3.RoutingTable().Size() > 0 || d4.RoutingTable().Size() > 0 { + t.Fatal("should have an empty routing table") + } +} + +func TestRoutingFilter(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nDHTs := 2 + dhts := setupDHTS(t, ctx, nDHTs) + defer func() { + for i := 0; i < nDHTs; i++ { + dhts[i].Close() + defer dhts[i].host.Close() + } + }() + dhts[0].routingTablePeerFilter = PublicRoutingTableFilter + + connectNoSync(t, ctx, dhts[0], dhts[1]) + wait(t, ctx, dhts[1], dhts[0]) + + select { + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case <-time.After(time.Millisecond * 200): + } +} + +func TestBootStrapWhenRTIsEmpty(t *testing.T) { + if detectrace.WithRace() { + t.Skip("skipping timing dependent test when race detector is running") + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create three boostrap peers each of which is connected to 1 other peer. + nBootStraps := 3 + bootstrappers := setupDHTS(t, ctx, nBootStraps) + defer func() { + for i := 0; i < nBootStraps; i++ { + bootstrappers[i].Close() + defer bootstrappers[i].host.Close() + } + }() + + bootstrapcons := setupDHTS(t, ctx, nBootStraps) + defer func() { + for i := 0; i < nBootStraps; i++ { + bootstrapcons[i].Close() + defer bootstrapcons[i].host.Close() + } + }() + for i := 0; i < nBootStraps; i++ { + connect(t, ctx, bootstrappers[i], bootstrapcons[i]) + } + + // convert the bootstrap addresses to a p2p address + bootstrapAddrs := make([]peer.AddrInfo, nBootStraps) + for i := 0; i < nBootStraps; i++ { + b := peer.AddrInfo{ + ID: bootstrappers[i].self, + Addrs: bootstrappers[i].host.Addrs(), + } + bootstrapAddrs[i] = b + } + + { + + // ---------------- + // We will initialize a DHT with 1 bootstrapper, connect it to another DHT, + // then remove the latter from the Routing Table + // This should add the bootstrap peer and the peer that the bootstrap peer is conencted to + // to it's Routing Table. + // AutoRefresh needs to be enabled for this. + + h1, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + h1.Start() + dht1, err := New( + ctx, + h1, + testPrefix, + NamespacedValidator("v", blankValidator{}), + Mode(ModeServer), + BootstrapPeers(bootstrapAddrs[0]), + ) + require.NoError(t, err) + dht2 := setupDHT(ctx, t, false) + defer func() { + dht1.host.Close() + dht2.host.Close() + dht1.Close() + dht2.Close() + }() + connect(t, ctx, dht1, dht2) + require.NoError(t, dht2.Close()) + require.NoError(t, dht2.host.Close()) + require.NoError(t, dht1.host.Network().ClosePeer(dht2.self)) + dht1.routingTable.RemovePeer(dht2.self) + require.NotContains(t, dht2.self, dht1.routingTable.ListPeers()) + require.Eventually(t, func() bool { + return dht1.routingTable.Size() == 2 && dht1.routingTable.Find(bootstrappers[0].self) != "" && + dht1.routingTable.Find(bootstrapcons[0].self) != "" + }, 5*time.Second, 500*time.Millisecond) + + } + + { + + // ---------------- + // We will initialize a DHT with 2 bootstrappers, connect it to another DHT, + // then remove the DHT handler from the other DHT which should make the first DHT's + // routing table empty. + // This should add the bootstrap peers and the peer thats the bootstrap peers are connected to + // to it's Routing Table. + // AutoRefresh needs to be enabled for this. + h1, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + h1.Start() + dht1, err := New( + ctx, + h1, + testPrefix, + NamespacedValidator("v", blankValidator{}), + Mode(ModeServer), + BootstrapPeers(bootstrapAddrs[1], bootstrapAddrs[2]), + ) + require.NoError(t, err) + + dht2 := setupDHT(ctx, t, false) + connect(t, ctx, dht1, dht2) + defer func() { + dht1.host.Close() + dht2.host.Close() + dht1.Close() + dht2.Close() + }() + connect(t, ctx, dht1, dht2) + require.NoError(t, dht2.setMode(modeClient)) + + require.Eventually(t, func() bool { + rt := dht1.routingTable + + return rt.Size() == 4 && rt.Find(bootstrappers[1].self) != "" && + rt.Find(bootstrappers[2].self) != "" && rt.Find(bootstrapcons[1].self) != "" && rt.Find(bootstrapcons[2].self) != "" + }, 5*time.Second, 500*time.Millisecond) + } +} + +func TestBootstrapPeersFunc(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var lock sync.Mutex + + bootstrapFuncA := func() []peer.AddrInfo { + return []peer.AddrInfo{} + } + dhtA := setupDHT(ctx, t, false, BootstrapPeersFunc(bootstrapFuncA)) + + bootstrapPeersB := []peer.AddrInfo{} + bootstrapFuncB := func() []peer.AddrInfo { + lock.Lock() + defer lock.Unlock() + return bootstrapPeersB + } + + dhtB := setupDHT(ctx, t, false, BootstrapPeersFunc(bootstrapFuncB)) + require.Equal(t, 0, len(dhtB.host.Network().Peers())) + + addrA := peer.AddrInfo{ + ID: dhtA.self, + Addrs: dhtA.host.Addrs(), + } + + lock.Lock() + bootstrapPeersB = []peer.AddrInfo{addrA} + lock.Unlock() + + dhtB.fixLowPeers() + require.NotEqual(t, 0, len(dhtB.host.Network().Peers())) +} + +func TestPreconnectedNodes(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := []Option{ + testPrefix, + DisableAutoRefresh(), + Mode(ModeServer), + } + + // Create hosts + h1, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + h1.Start() + defer h1.Close() + h2, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + h2.Start() + defer h2.Close() + + // Setup first DHT + d1, err := New(ctx, h1, opts...) + require.NoError(t, err) + defer d1.Close() + + // Connect the first host to the second + err = h1.Connect(ctx, peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()}) + require.NoError(t, err) + + // Wait until we know identify has completed by checking for supported protocols + // TODO: Is this needed? Could we do h2.Connect(h1) and that would wait for identify to complete. + require.Eventually(t, func() bool { + h1Protos, err := h2.Peerstore().SupportsProtocols(h1.ID(), d1.protocols...) + require.NoError(t, err) + + return len(h1Protos) > 0 + }, 10*time.Second, time.Millisecond) + + // Setup the second DHT + d2, err := New(ctx, h2, opts...) + require.NoError(t, err) + defer h2.Close() + + connect(t, ctx, d1, d2) + + // See if it works + peers, err := d2.GetClosestPeers(ctx, "testkey") + require.NoError(t, err) + + require.Equal(t, len(peers), 1, "why is there more than one peer?") + require.Equal(t, h1.ID(), peers[0], "could not find peer") +} + +func TestAddrFilter(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pub1, _ := ma.StringCast("/ip4/1.2.3.1/tcp/123") + pub2, _ := ma.StringCast("/ip4/160.160.160.160/tcp/1600") + pub3, _ := ma.StringCast("/ip6/2001::10/tcp/123") + priv1, _ := ma.StringCast("/ip4/192.168.1.100/tcp/123") + priv2, _ := ma.StringCast("/ip4/172.16.10.10/tcp/123") + priv3, _ := ma.StringCast("/ip4/10.10.10.10/tcp/123") + priv4, _ := ma.StringCast("/ip6/fc00::10/tcp/123") + l1, _ := ma.StringCast("/ip4/127.0.0.100/tcp/123") + l2, _ := ma.StringCast("/ip6/::1/tcp/123") + // generate a bunch of addresses + publicAddrs := []ma.Multiaddr{ + pub1, + pub2, + pub3, + } + privAddrs := []ma.Multiaddr{ + priv1, + priv2, + priv3, + priv4, + } + loopbackAddrs := []ma.Multiaddr{ + l1, + l2, + } + + allAddrs := append(publicAddrs, privAddrs...) + allAddrs = append(allAddrs, loopbackAddrs...) + + // generate different address filters + acceptAllFilter := AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr { + return addrs + }) + rejectAllFilter := AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr { + return []ma.Multiaddr{} + }) + publicIpFilter := AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr { + return ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool { is, err := manet.IsPublicAddr(a); return is && err == nil }) + }) + localIpFilter := AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr { + return ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool { return !manet.IsIPLoopback(a) }) + }) + + // generate peerid for "remote" peer + _, pub, err := crypto.GenerateKeyPair( + crypto.Ed25519, // Select your key type. Ed25519 are nice short + -1, // Select key length when possible (i.e. RSA). + ) + require.NoError(t, err) + peerid, err := peer.IDFromPublicKey(pub) + require.NoError(t, err) + + // DHT accepting all addresses + d0 := setupDHT(ctx, t, false, acceptAllFilter) + + // peerstore should only contain self + require.Equal(t, 1, d0.host.Peerstore().Peers().Len()) + + d0.maybeAddAddrs(peerid, allAddrs, time.Minute) + require.Equal(t, 2, d0.host.Peerstore().Peers().Len()) + for _, a := range allAddrs { + // check that the peerstore contains all addresses of the remote peer + require.Contains(t, d0.host.Peerstore().Addrs(peerid), a) + } + + // DHT rejecting all addresses + d1 := setupDHT(ctx, t, false, rejectAllFilter) + d1.maybeAddAddrs(peerid, allAddrs, time.Minute) + // remote peer should not be added to peerstore (all addresses rejected) + require.Equal(t, 1, d1.host.Peerstore().Peers().Len()) + + // DHT accepting only public addresses + d2 := setupDHT(ctx, t, false, publicIpFilter) + d2.maybeAddAddrs(peerid, allAddrs, time.Minute) + for _, a := range publicAddrs { + // check that the peerstore contains only public addresses of the remote peer + require.Contains(t, d2.host.Peerstore().Addrs(peerid), a) + } + require.Equal(t, len(publicAddrs), len(d2.host.Peerstore().Addrs(peerid))) + + // DHT accepting only non-loopback addresses + d3 := setupDHT(ctx, t, false, localIpFilter) + d3.maybeAddAddrs(peerid, allAddrs, time.Minute) + for _, a := range publicAddrs { + // check that the peerstore contains only non-loopback addresses of the remote peer + require.Contains(t, d3.host.Peerstore().Addrs(peerid), a) + } + for _, a := range privAddrs { + // check that the peerstore contains only non-loopback addresses of the remote peer + require.Contains(t, d3.host.Peerstore().Addrs(peerid), a) + } + require.Equal(t, len(publicAddrs)+len(privAddrs), len(d3.host.Peerstore().Addrs(peerid))) +} diff --git a/go-libp2p-kad-dht/doc.go b/go-libp2p-kad-dht/doc.go new file mode 100644 index 0000000..acbb181 --- /dev/null +++ b/go-libp2p-kad-dht/doc.go @@ -0,0 +1,3 @@ +// Package dht implements a distributed hash table that satisfies the ipfs routing +// interface. This DHT is modeled after kademlia with S/Kademlia modifications. +package dht diff --git a/go-libp2p-kad-dht/dual/dual.go b/go-libp2p-kad-dht/dual/dual.go new file mode 100644 index 0000000..afad4ca --- /dev/null +++ b/go-libp2p-kad-dht/dual/dual.go @@ -0,0 +1,394 @@ +// Package dual provides an implementation of a split or "dual" dht, where two parallel instances +// are maintained for the global internet and the local LAN respectively. +package dual + +import ( + "context" + "fmt" + "sync" + + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-kad-dht/internal" + "github.com/libp2p/go-libp2p-routing-helpers/tracing" + + "github.com/ipfs/go-cid" + kb "github.com/libp2p/go-libp2p-kbucket" + "github.com/libp2p/go-libp2p-kbucket/peerdiversity" + helper "github.com/libp2p/go-libp2p-routing-helpers" + ci "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/routing" + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + + "github.com/hashicorp/go-multierror" +) + +const tracer = tracing.Tracer("go-libp2p-kad-dht/dual") +const dualName = "Dual" + +// DHT implements the routing interface to provide two concrete DHT implementationts for use +// in IPFS that are used to support both global network users and disjoint LAN usecases. +type DHT struct { + WAN *dht.IpfsDHT + LAN *dht.IpfsDHT +} + +// LanExtension is used to differentiate local protocol requests from those on the WAN DHT. +const LanExtension protocol.ID = "/lan" + +// Assert that IPFS assumptions about interfaces aren't broken. These aren't a +// guarantee, but we can use them to aid refactoring. +var ( + _ routing.ContentRouting = (*DHT)(nil) + _ routing.Routing = (*DHT)(nil) + _ routing.PeerRouting = (*DHT)(nil) + _ routing.PubKeyFetcher = (*DHT)(nil) + _ routing.ValueStore = (*DHT)(nil) +) + +var ( + maxPrefixCountPerCpl = 2 + maxPrefixCount = 3 +) + +type config struct { + wan, lan []dht.Option +} + +func (cfg *config) apply(opts ...Option) error { + for i, o := range opts { + if err := o(cfg); err != nil { + return fmt.Errorf("dual dht option %d failed: %w", i, err) + } + } + return nil +} + +// Option is an option used to configure the Dual DHT. +type Option func(*config) error + +// WanDHTOption applies the given DHT options to the WAN DHT. +func WanDHTOption(opts ...dht.Option) Option { + return func(c *config) error { + c.wan = append(c.wan, opts...) + return nil + } +} + +// LanDHTOption applies the given DHT options to the LAN DHT. +func LanDHTOption(opts ...dht.Option) Option { + return func(c *config) error { + c.lan = append(c.lan, opts...) + return nil + } +} + +// DHTOption applies the given DHT options to both the WAN and the LAN DHTs. +func DHTOption(opts ...dht.Option) Option { + return func(c *config) error { + c.lan = append(c.lan, opts...) + c.wan = append(c.wan, opts...) + return nil + } +} + +// New creates a new DualDHT instance. Options provided are forwarded on to the two concrete +// IpfsDHT internal constructions, modulo additional options used by the Dual DHT to enforce +// the LAN-vs-WAN distinction. +// Note: query or routing table functional options provided as arguments to this function +// will be overriden by this constructor. +func New(ctx context.Context, h host.Host, options ...Option) (*DHT, error) { + var cfg config + err := cfg.apply( + WanDHTOption( + dht.QueryFilter(dht.PublicQueryFilter), + dht.RoutingTableFilter(dht.PublicRoutingTableFilter), + dht.RoutingTablePeerDiversityFilter(dht.NewRTPeerDiversityFilter(h, maxPrefixCountPerCpl, maxPrefixCount)), + // filter out all private addresses + dht.AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr { + return ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool { is, err := manet.IsPublicAddr(a); return is && err == nil }) + }), + ), + ) + if err != nil { + return nil, err + } + err = cfg.apply( + LanDHTOption( + dht.ProtocolExtension(LanExtension), + dht.QueryFilter(dht.PrivateQueryFilter), + dht.RoutingTableFilter(dht.PrivateRoutingTableFilter), + // filter out localhost IP addresses + dht.AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr { + return ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool { return !manet.IsIPLoopback(a) }) + }), + ), + ) + if err != nil { + return nil, err + } + err = cfg.apply(options...) + if err != nil { + return nil, err + } + + wan, err := dht.New(ctx, h, cfg.wan...) + if err != nil { + return nil, err + } + + // Unless overridden by user supplied options, the LAN DHT should default + // to 'AutoServer' mode. + if wan.Mode() != dht.ModeClient { + cfg.lan = append(cfg.lan, dht.Mode(dht.ModeServer)) + } + lan, err := dht.New(ctx, h, cfg.lan...) + if err != nil { + return nil, err + } + + impl := DHT{wan, lan} + return &impl, nil +} + +// Close closes the DHT context. +func (dht *DHT) Close() error { + return combineErrors(dht.WAN.Close(), dht.LAN.Close()) +} + +// WANActive returns true when the WAN DHT is active (has peers). +func (dht *DHT) WANActive() bool { + return dht.WAN.RoutingTable().Size() > 0 +} + +// Provide adds the given cid to the content routing system. +func (dht *DHT) Provide(ctx context.Context, key cid.Cid, announce bool) (err error) { + ctx, end := tracer.Provide(dualName, ctx, key, announce) + defer func() { end(err) }() + + if dht.WANActive() { + return dht.WAN.Provide(ctx, key, announce) + } + return dht.LAN.Provide(ctx, key, announce) +} + +// GetRoutingTableDiversityStats fetches the Routing Table Diversity Stats. +func (dht *DHT) GetRoutingTableDiversityStats() []peerdiversity.CplDiversityStats { + if dht.WANActive() { + return dht.WAN.GetRoutingTableDiversityStats() + } + return nil +} + +// FindProvidersAsync searches for peers who are able to provide a given key +func (dht *DHT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) (ch <-chan peer.AddrInfo) { + ctx, end := tracer.FindProvidersAsync(dualName, ctx, key, count) + defer func() { ch = end(ch, nil) }() + + reqCtx, cancel := context.WithCancel(ctx) + outCh := make(chan peer.AddrInfo) + + // Register for and merge query events if we care about them. + subCtx := reqCtx + var evtCh <-chan *routing.QueryEvent + if routing.SubscribesToQueryEvents(ctx) { + subCtx, evtCh = routing.RegisterForQueryEvents(reqCtx) + } + + subCtx, span := internal.StartSpan(subCtx, "Dual.worker") + wanCh := dht.WAN.FindProvidersAsync(subCtx, key, count) + lanCh := dht.LAN.FindProvidersAsync(subCtx, key, count) + zeroCount := (count == 0) + go func() { + defer span.End() + + defer cancel() + defer close(outCh) + + found := make(map[peer.ID]struct{}, count) + var pi peer.AddrInfo + var qEv *routing.QueryEvent + for (zeroCount || count > 0) && (wanCh != nil || lanCh != nil) { + var ok bool + select { + case qEv, ok = <-evtCh: + if !ok { + evtCh = nil + } else if qEv != nil && qEv.Type != routing.QueryError { + routing.PublishQueryEvent(reqCtx, qEv) + } + continue + case pi, ok = <-wanCh: + if !ok { + span.AddEvent("wan finished") + wanCh = nil + continue + } + case pi, ok = <-lanCh: + if !ok { + span.AddEvent("lan finished") + lanCh = nil + continue + } + } + // already found + if _, ok = found[pi.ID]; ok { + continue + } + + select { + case outCh <- pi: + found[pi.ID] = struct{}{} + count-- + case <-ctx.Done(): + return + } + } + if qEv != nil && qEv.Type == routing.QueryError && len(found) == 0 { + routing.PublishQueryEvent(reqCtx, qEv) + } + }() + return outCh +} + +// FindPeer searches for a peer with given ID +// Note: with signed peer records, we can change this to short circuit once either DHT returns. +func (dht *DHT) FindPeer(ctx context.Context, pid peer.ID) (pi peer.AddrInfo, err error) { + ctx, end := tracer.FindPeer(dualName, ctx, pid) + defer func() { end(pi, err) }() + + var wg sync.WaitGroup + wg.Add(2) + var wanInfo, lanInfo peer.AddrInfo + var wanErr, lanErr error + go func() { + defer wg.Done() + wanInfo, wanErr = dht.WAN.FindPeer(ctx, pid) + }() + go func() { + defer wg.Done() + lanInfo, lanErr = dht.LAN.FindPeer(ctx, pid) + }() + + wg.Wait() + + // Combine addresses. Try to avoid doing unnecessary work while we're at + // it. Note: We're ignoring the errors for now as many of our DHT + // commands can return both a result and an error. + ai := peer.AddrInfo{ID: pid} + if len(wanInfo.Addrs) == 0 { + ai.Addrs = lanInfo.Addrs + } else if len(lanInfo.Addrs) == 0 { + ai.Addrs = wanInfo.Addrs + } else { + // combine addresses + deduped := make(map[string]ma.Multiaddr, len(wanInfo.Addrs)+len(lanInfo.Addrs)) + for _, addr := range wanInfo.Addrs { + deduped[string(addr.Bytes())] = addr + } + for _, addr := range lanInfo.Addrs { + deduped[string(addr.Bytes())] = addr + } + ai.Addrs = make([]ma.Multiaddr, 0, len(deduped)) + for _, addr := range deduped { + ai.Addrs = append(ai.Addrs, addr) + } + } + + // If one of the commands succeeded, don't return an error. + if wanErr == nil || lanErr == nil { + return ai, nil + } + + // Otherwise, return what we have _and_ return the error. + return ai, combineErrors(wanErr, lanErr) +} + +func combineErrors(erra, errb error) error { + // if the errors are the same, just return one. + if erra == errb { + return erra + } + + // If one of the errors is a kb lookup failure (no peers in routing + // table), return the other. + if erra == kb.ErrLookupFailure { + return errb + } else if errb == kb.ErrLookupFailure { + return erra + } + return multierror.Append(erra, errb).ErrorOrNil() +} + +// Bootstrap allows callers to hint to the routing system to get into a +// Boostrapped state and remain there. +func (dht *DHT) Bootstrap(ctx context.Context) (err error) { + ctx, end := tracer.Bootstrap(dualName, ctx) + defer func() { end(err) }() + + erra := dht.WAN.Bootstrap(ctx) + errb := dht.LAN.Bootstrap(ctx) + return combineErrors(erra, errb) +} + +// PutValue adds value corresponding to given Key. +func (dht *DHT) PutValue(ctx context.Context, key string, val []byte, opts ...routing.Option) (err error) { + ctx, end := tracer.PutValue(dualName, ctx, key, val, opts...) + defer func() { end(err) }() + + if dht.WANActive() { + return dht.WAN.PutValue(ctx, key, val, opts...) + } + return dht.LAN.PutValue(ctx, key, val, opts...) +} + +// GetValue searches for the value corresponding to given Key. +func (d *DHT) GetValue(ctx context.Context, key string, opts ...routing.Option) (result []byte, err error) { + ctx, end := tracer.GetValue(dualName, ctx, key, opts...) + defer func() { end(result, err) }() + + lanCtx, cancelLan := context.WithCancel(ctx) + defer cancelLan() + + var ( + lanVal []byte + lanErr error + lanWaiter sync.WaitGroup + ) + lanWaiter.Add(1) + go func() { + defer lanWaiter.Done() + lanVal, lanErr = d.LAN.GetValue(lanCtx, key, opts...) + }() + + wanVal, wanErr := d.WAN.GetValue(ctx, key, opts...) + if wanErr == nil { + cancelLan() + } + lanWaiter.Wait() + if wanErr == nil { + return wanVal, nil + } + if lanErr == nil { + return lanVal, nil + } + return nil, combineErrors(wanErr, lanErr) +} + +// SearchValue searches for better values from this value +func (dht *DHT) SearchValue(ctx context.Context, key string, opts ...routing.Option) (ch <-chan []byte, err error) { + ctx, end := tracer.SearchValue(dualName, ctx, key, opts...) + defer func() { ch, err = end(ch, err) }() + + p := helper.Parallel{Routers: []routing.Routing{dht.WAN, dht.LAN}, Validator: dht.WAN.Validator} + return p.SearchValue(ctx, key, opts...) +} + +// GetPublicKey returns the public key for the given peer. +func (dht *DHT) GetPublicKey(ctx context.Context, pid peer.ID) (ci.PubKey, error) { + p := helper.Parallel{Routers: []routing.Routing{dht.WAN, dht.LAN}, Validator: dht.WAN.Validator} + return p.GetPublicKey(ctx, pid) +} diff --git a/go-libp2p-kad-dht/dual/dual_test.go b/go-libp2p-kad-dht/dual/dual_test.go new file mode 100644 index 0000000..721f605 --- /dev/null +++ b/go-libp2p-kad-dht/dual/dual_test.go @@ -0,0 +1,399 @@ +package dual + +import ( + "context" + "testing" + "time" + + u "github.com/ipfs/boxo/util" + "github.com/ipfs/go-cid" + dht "github.com/libp2p/go-libp2p-kad-dht" + test "github.com/libp2p/go-libp2p-kad-dht/internal/testing" + record "github.com/libp2p/go-libp2p-record" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + peerstore "github.com/libp2p/go-libp2p/core/peerstore" + bhost "github.com/libp2p/go-libp2p/p2p/host/basic" + swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" + "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" +) + +var wancid, lancid cid.Cid + +func init() { + wancid = cid.NewCidV1(cid.DagCBOR, u.Hash([]byte("wan cid -- value"))) + lancid = cid.NewCidV1(cid.DagCBOR, u.Hash([]byte("lan cid -- value"))) +} + +type blankValidator struct{} + +func (blankValidator) Validate(_ string, _ []byte) error { return nil } +func (blankValidator) Select(_ string, _ [][]byte) (int, error) { return 0, nil } + +type customRtHelper struct { + allow peer.ID +} + +func MkFilterForPeer() (func(_ interface{}, p peer.ID) bool, *customRtHelper) { + helper := customRtHelper{} + + type hasHost interface { + Host() host.Host + } + + f := func(dht interface{}, p peer.ID) bool { + d := dht.(hasHost) + conns := d.Host().Network().ConnsToPeer(p) + + for _, c := range conns { + if c.RemotePeer() == helper.allow { + return true + } + } + return false + } + return f, &helper +} + +func setupDHTWithFilters(ctx context.Context, t *testing.T, options ...dht.Option) (*DHT, []*customRtHelper) { + h, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + h.Start() + t.Cleanup(func() { h.Close() }) + + wanFilter, wanRef := MkFilterForPeer() + wanOpts := []dht.Option{ + dht.NamespacedValidator("v", blankValidator{}), + dht.ProtocolPrefix("/test"), + dht.DisableAutoRefresh(), + dht.RoutingTableFilter(wanFilter), + } + wan, err := dht.New(ctx, h, wanOpts...) + require.NoError(t, err) + + lanFilter, lanRef := MkFilterForPeer() + lanOpts := []dht.Option{ + dht.NamespacedValidator("v", blankValidator{}), + dht.ProtocolPrefix("/test"), + dht.ProtocolExtension(LanExtension), + dht.DisableAutoRefresh(), + dht.RoutingTableFilter(lanFilter), + dht.Mode(dht.ModeServer), + } + lan, err := dht.New(ctx, h, lanOpts...) + require.NoError(t, err) + + impl := DHT{wan, lan} + return &impl, []*customRtHelper{wanRef, lanRef} +} + +func setupDHT(ctx context.Context, t *testing.T, options ...dht.Option) *DHT { + t.Helper() + + host, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + host.Start() + t.Cleanup(func() { host.Close() }) + + baseOpts := []dht.Option{ + dht.NamespacedValidator("v", blankValidator{}), + dht.ProtocolPrefix("/test"), + dht.DisableAutoRefresh(), + } + + d, err := New( + ctx, + host, + append([]Option{DHTOption(baseOpts...)}, DHTOption(options...))..., + ) + require.NoError(t, err) + + return d +} + +func connect(ctx context.Context, t *testing.T, a, b *dht.IpfsDHT) { + t.Helper() + bid := b.PeerID() + baddr := b.Host().Peerstore().Addrs(bid) + if len(baddr) == 0 { + t.Fatal("no addresses for connection.") + } + a.Host().Peerstore().AddAddrs(bid, baddr, peerstore.TempAddrTTL) + if err := a.Host().Connect(ctx, peer.AddrInfo{ID: bid}); err != nil { + t.Fatal(err) + } + wait(ctx, t, a, b) +} + +func wait(ctx context.Context, t *testing.T, a, b *dht.IpfsDHT) { + t.Helper() + for a.RoutingTable().Find(b.PeerID()) == "" { + // fmt.Fprintf(os.Stderr, "%v\n", a.RoutingTable().GetPeerInfos()) + select { + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case <-time.After(time.Millisecond * 5): + } + } +} + +func setupTier(ctx context.Context, t *testing.T) (*DHT, *dht.IpfsDHT, *dht.IpfsDHT) { + t.Helper() + baseOpts := []dht.Option{ + dht.NamespacedValidator("v", blankValidator{}), + dht.ProtocolPrefix("/test"), + dht.DisableAutoRefresh(), + } + + d, hlprs := setupDHTWithFilters(ctx, t) + + whost, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + whost.Start() + t.Cleanup(func() { whost.Close() }) + + wan, err := dht.New( + ctx, + whost, + append(baseOpts, dht.Mode(dht.ModeServer))..., + ) + if err != nil { + t.Fatal(err) + } + hlprs[0].allow = wan.PeerID() + connect(ctx, t, d.WAN, wan) + + lhost, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + lhost.Start() + t.Cleanup(func() { lhost.Close() }) + + lan, err := dht.New( + ctx, + lhost, + append(baseOpts, dht.Mode(dht.ModeServer), dht.ProtocolExtension("/lan"))..., + ) + if err != nil { + t.Fatal(err) + } + hlprs[1].allow = lan.PeerID() + connect(ctx, t, d.LAN, lan) + + return d, wan, lan +} + +func TestDualModes(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + d := setupDHT(ctx, t) + defer d.Close() + + if d.WAN.Mode() != dht.ModeAuto { + t.Fatal("wrong default mode for wan") + } else if d.LAN.Mode() != dht.ModeServer { + t.Fatal("wrong default mode for lan") + } + + d2 := setupDHT(ctx, t, dht.Mode(dht.ModeClient)) + defer d2.Close() + if d2.WAN.Mode() != dht.ModeClient || + d2.LAN.Mode() != dht.ModeClient { + t.Fatal("wrong client mode operation") + } +} + +func TestFindProviderAsync(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + d, wan, lan := setupTier(ctx, t) + defer d.Close() + defer wan.Close() + defer lan.Close() + + time.Sleep(5 * time.Millisecond) + + if err := wan.Provide(ctx, wancid, false); err != nil { + t.Fatal(err) + } + + if err := lan.Provide(ctx, lancid, true); err != nil { + t.Fatal(err) + } + + wpc := d.FindProvidersAsync(ctx, wancid, 1) + select { + case p := <-wpc: + if p.ID != wan.PeerID() { + t.Fatal("wrong wan provider") + } + case <-ctx.Done(): + t.Fatal("find provider timeout.") + } + + lpc := d.FindProvidersAsync(ctx, lancid, 1) + select { + case p := <-lpc: + if p.ID != lan.PeerID() { + t.Fatal("wrong lan provider") + } + case <-ctx.Done(): + t.Fatal("find provider timeout.") + } +} + +func TestValueGetSet(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + d, wan, lan := setupTier(ctx, t) + defer d.Close() + defer wan.Close() + defer lan.Close() + + time.Sleep(5 * time.Millisecond) + + err := d.PutValue(ctx, "/v/hello", []byte("valid")) + if err != nil { + t.Fatal(err) + } + val, err := wan.GetValue(ctx, "/v/hello") + if err != nil { + t.Fatal(err) + } + if string(val) != "valid" { + t.Fatal("failed to get expected string.") + } + + _, err = lan.GetValue(ctx, "/v/hello") + if err == nil { + t.Fatal(err) + } +} + +func TestSearchValue(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + d, wan, lan := setupTier(ctx, t) + defer d.Close() + defer wan.Close() + defer lan.Close() + + d.WAN.Validator.(record.NamespacedValidator)["v"] = test.TestValidator{} + d.LAN.Validator.(record.NamespacedValidator)["v"] = test.TestValidator{} + + _ = wan.PutValue(ctx, "/v/hello", []byte("valid")) + + valCh, err := d.SearchValue(ctx, "/v/hello", dht.Quorum(0)) + if err != nil { + t.Fatal(err) + } + + select { + case v := <-valCh: + if string(v) != "valid" { + t.Errorf("expected 'valid', got '%s'", string(v)) + } + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + + select { + case _, ok := <-valCh: + if ok { + t.Errorf("chan should close") + } + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + + err = lan.PutValue(ctx, "/v/hello", []byte("newer")) + if err != nil { + t.Error(err) + } + + valCh, err = d.SearchValue(ctx, "/v/hello", dht.Quorum(0)) + if err != nil { + t.Fatal(err) + } + + var lastVal []byte + for c := range valCh { + lastVal = c + } + if string(lastVal) != "newer" { + t.Fatal("incorrect best search value") + } +} + +func TestGetPublicKey(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + d, wan, lan := setupTier(ctx, t) + defer d.Close() + defer wan.Close() + defer lan.Close() + + time.Sleep(5 * time.Millisecond) + + pk, err := d.GetPublicKey(ctx, wan.PeerID()) + if err != nil { + t.Fatal(err) + } + id, err := peer.IDFromPublicKey(pk) + if err != nil { + t.Fatal(err) + } + if id != wan.PeerID() { + t.Fatal("incorrect PK") + } + + pk, err = d.GetPublicKey(ctx, lan.PeerID()) + if err != nil { + t.Fatal(err) + } + id, err = peer.IDFromPublicKey(pk) + if err != nil { + t.Fatal(err) + } + if id != lan.PeerID() { + t.Fatal("incorrect PK") + } +} + +func TestFindPeer(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + d, wan, lan := setupTier(ctx, t) + defer d.Close() + defer wan.Close() + defer lan.Close() + + time.Sleep(5 * time.Millisecond) + + p, err := d.FindPeer(ctx, lan.PeerID()) + if err != nil { + t.Fatal(err) + } + assertUniqueMultiaddrs(t, p.Addrs) + p, err = d.FindPeer(ctx, wan.PeerID()) + if err != nil { + t.Fatal(err) + } + assertUniqueMultiaddrs(t, p.Addrs) +} + +func assertUniqueMultiaddrs(t *testing.T, addrs []multiaddr.Multiaddr) { + set := make(map[string]bool) + for _, addr := range addrs { + if set[string(addr.Bytes())] { + t.Errorf("duplicate address %s", addr) + } + set[string(addr.Bytes())] = true + } +} diff --git a/go-libp2p-kad-dht/events.go b/go-libp2p-kad-dht/events.go new file mode 100644 index 0000000..d6fe2e7 --- /dev/null +++ b/go-libp2p-kad-dht/events.go @@ -0,0 +1,247 @@ +package dht + +import ( + "context" + "encoding/json" + "sync" + + "github.com/google/uuid" + + kbucket "github.com/libp2p/go-libp2p-kbucket" + "github.com/libp2p/go-libp2p/core/peer" +) + +// KeyKadID contains the Kademlia key in string and binary form. +type KeyKadID struct { + Key string + Kad kbucket.ID +} + +// NewKeyKadID creates a KeyKadID from a string Kademlia ID. +func NewKeyKadID(k string) *KeyKadID { + return &KeyKadID{ + Key: k, + Kad: kbucket.ConvertKey(k), + } +} + +// PeerKadID contains a libp2p Peer ID and a binary Kademlia ID. +type PeerKadID struct { + Peer peer.ID + Kad kbucket.ID +} + +// NewPeerKadID creates a PeerKadID from a libp2p Peer ID. +func NewPeerKadID(p peer.ID) *PeerKadID { + return &PeerKadID{ + Peer: p, + Kad: kbucket.ConvertPeerID(p), + } +} + +// NewPeerKadIDSlice creates a slice of PeerKadID from the passed slice of libp2p Peer IDs. +func NewPeerKadIDSlice(p []peer.ID) []*PeerKadID { + r := make([]*PeerKadID, len(p)) + for i := range p { + r[i] = NewPeerKadID(p[i]) + } + return r +} + +// OptPeerKadID returns a pointer to a PeerKadID or nil if the passed Peer ID is it's default value. +func OptPeerKadID(p peer.ID) *PeerKadID { + if p == "" { + return nil + } + return NewPeerKadID(p) +} + +// NewLookupEvent creates a LookupEvent automatically converting the node +// libp2p Peer ID to a PeerKadID and the string Kademlia key to a KeyKadID. +func NewLookupEvent( + node peer.ID, + id uuid.UUID, + key string, + request *LookupUpdateEvent, + response *LookupUpdateEvent, + terminate *LookupTerminateEvent, +) *LookupEvent { + return &LookupEvent{ + Node: NewPeerKadID(node), + ID: id, + Key: NewKeyKadID(key), + Request: request, + Response: response, + Terminate: terminate, + } +} + +// LookupEvent is emitted for every notable event that happens during a DHT lookup. +// LookupEvent supports JSON marshalling because all of its fields do, recursively. +type LookupEvent struct { + // Node is the ID of the node performing the lookup. + Node *PeerKadID + // ID is a unique identifier for the lookup instance. + ID uuid.UUID + // Key is the Kademlia key used as a lookup target. + Key *KeyKadID + // Request, if not nil, describes a state update event, associated with an outgoing query request. + Request *LookupUpdateEvent + // Response, if not nil, describes a state update event, associated with an outgoing query response. + Response *LookupUpdateEvent + // Terminate, if not nil, describe a termination event. + Terminate *LookupTerminateEvent +} + +// NewLookupUpdateEvent creates a new lookup update event, automatically converting the passed peer IDs to peer Kad IDs. +func NewLookupUpdateEvent( + cause peer.ID, + source peer.ID, + heard []peer.ID, + waiting []peer.ID, + queried []peer.ID, + unreachable []peer.ID, +) *LookupUpdateEvent { + return &LookupUpdateEvent{ + Cause: OptPeerKadID(cause), + Source: OptPeerKadID(source), + Heard: NewPeerKadIDSlice(heard), + Waiting: NewPeerKadIDSlice(waiting), + Queried: NewPeerKadIDSlice(queried), + Unreachable: NewPeerKadIDSlice(unreachable), + } +} + +// LookupUpdateEvent describes a lookup state update event. +type LookupUpdateEvent struct { + // Cause is the peer whose response (or lack of response) caused the update event. + // If Cause is nil, this is the first update event in the lookup, caused by the seeding. + Cause *PeerKadID + // Source is the peer who informed us about the peer IDs in this update (below). + Source *PeerKadID + // Heard is a set of peers whose state in the lookup's peerset is being set to "heard". + Heard []*PeerKadID + // Waiting is a set of peers whose state in the lookup's peerset is being set to "waiting". + Waiting []*PeerKadID + // Queried is a set of peers whose state in the lookup's peerset is being set to "queried". + Queried []*PeerKadID + // Unreachable is a set of peers whose state in the lookup's peerset is being set to "unreachable". + Unreachable []*PeerKadID +} + +// LookupTerminateEvent describes a lookup termination event. +type LookupTerminateEvent struct { + // Reason is the reason for lookup termination. + Reason LookupTerminationReason +} + +// NewLookupTerminateEvent creates a new lookup termination event with a given reason. +func NewLookupTerminateEvent(reason LookupTerminationReason) *LookupTerminateEvent { + return &LookupTerminateEvent{Reason: reason} +} + +// LookupTerminationReason captures reasons for terminating a lookup. +type LookupTerminationReason int + +// MarshalJSON returns the JSON encoding of the passed lookup termination reason. +func (r LookupTerminationReason) MarshalJSON() ([]byte, error) { + return json.Marshal(r.String()) +} + +func (r LookupTerminationReason) String() string { + switch r { + case LookupStopped: + return "stopped" + case LookupCancelled: + return "cancelled" + case LookupStarvation: + return "starvation" + case LookupCompleted: + return "completed" + } + panic("unreachable") +} + +const ( + // LookupStopped indicates that the lookup was aborted by the user's stopFn. + LookupStopped LookupTerminationReason = iota + // LookupCancelled indicates that the lookup was aborted by the context. + LookupCancelled + // LookupStarvation indicates that the lookup terminated due to lack of unqueried peers. + LookupStarvation + // LookupCompleted indicates that the lookup terminated successfully, reaching the Kademlia end condition. + LookupCompleted +) + +type routingLookupKey struct{} + +// TODO: lookupEventChannel copies the implementation of eventChanel. +// The two should be refactored to use a common event channel implementation. +// A common implementation needs to rethink the signature of RegisterForEvents, +// because returning a typed channel cannot be made polymorphic without creating +// additional "adapter" channels. This will be easier to handle when Go +// introduces generics. +type lookupEventChannel struct { + mu sync.Mutex + ctx context.Context + ch chan<- *LookupEvent +} + +// waitThenClose is spawned in a goroutine when the channel is registered. This +// safely cleans up the channel when the context has been canceled. +func (e *lookupEventChannel) waitThenClose() { + <-e.ctx.Done() + e.mu.Lock() + close(e.ch) + // 1. Signals that we're done. + // 2. Frees memory (in case we end up hanging on to this for a while). + e.ch = nil + e.mu.Unlock() +} + +// send sends an event on the event channel, aborting if either the passed or +// the internal context expire. +func (e *lookupEventChannel) send(ctx context.Context, ev *LookupEvent) { + e.mu.Lock() + // Closed. + if e.ch == nil { + e.mu.Unlock() + return + } + // in case the passed context is unrelated, wait on both. + select { + case e.ch <- ev: + case <-e.ctx.Done(): + case <-ctx.Done(): + } + e.mu.Unlock() +} + +// RegisterForLookupEvents registers a lookup event channel with the given context. +// The returned context can be passed to DHT queries to receive lookup events on +// the returned channels. +// +// The passed context MUST be canceled when the caller is no longer interested +// in query events. +func RegisterForLookupEvents(ctx context.Context) (context.Context, <-chan *LookupEvent) { + ch := make(chan *LookupEvent, LookupEventBufferSize) + ech := &lookupEventChannel{ch: ch, ctx: ctx} + go ech.waitThenClose() + return context.WithValue(ctx, routingLookupKey{}, ech), ch +} + +// LookupEventBufferSize is the number of events to buffer. +var LookupEventBufferSize = 16 + +// PublishLookupEvent publishes a query event to the query event channel +// associated with the given context, if any. +func PublishLookupEvent(ctx context.Context, ev *LookupEvent) { + ich := ctx.Value(routingLookupKey{}) + if ich == nil { + return + } + + // We *want* to panic here. + ech := ich.(*lookupEventChannel) + ech.send(ctx, ev) +} diff --git a/go-libp2p-kad-dht/ext_test.go b/go-libp2p-kad-dht/ext_test.go new file mode 100644 index 0000000..36d3ca9 --- /dev/null +++ b/go-libp2p-kad-dht/ext_test.go @@ -0,0 +1,48 @@ +package dht + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/network" + "github.com/stretchr/testify/require" + + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" +) + +func TestInvalidRemotePeers(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mn, err := mocknet.FullMeshLinked(5) + if err != nil { + t.Fatal(err) + } + defer mn.Close() + hosts := mn.Hosts() + + os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)} + d, err := New(ctx, hosts[0], os...) + if err != nil { + t.Fatal(err) + } + for _, proto := range d.serverProtocols { + // Hang on every request. + hosts[1].SetStreamHandler(proto, func(s network.Stream) { + defer s.Reset() // nolint + <-ctx.Done() + }) + } + + err = mn.ConnectAllButSelf() + if err != nil { + t.Fatal("failed to connect peers", err) + } + + time.Sleep(100 * time.Millisecond) + + // hosts[1] isn't added to the routing table because it isn't responding to + // the DHT request + require.Equal(t, 0, d.routingTable.Size()) +} diff --git a/go-libp2p-kad-dht/fullrt/dht.go b/go-libp2p-kad-dht/fullrt/dht.go new file mode 100644 index 0000000..2242fbf --- /dev/null +++ b/go-libp2p-kad-dht/fullrt/dht.go @@ -0,0 +1,1555 @@ +package fullrt + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "github.com/multiformats/go-base32" + "github.com/multiformats/go-multiaddr" + "github.com/multiformats/go-multihash" + + "github.com/libp2p/go-libp2p-routing-helpers/tracing" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/routing" + swarm "github.com/libp2p/go-libp2p/p2p/net/swarm" + + "github.com/gogo/protobuf/proto" + u "github.com/ipfs/boxo/util" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + logging "github.com/ipfs/go-log/v2" + + kaddht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-kad-dht/crawler" + "github.com/libp2p/go-libp2p-kad-dht/internal" + internalConfig "github.com/libp2p/go-libp2p-kad-dht/internal/config" + "github.com/libp2p/go-libp2p-kad-dht/internal/net" + dht_pb "github.com/libp2p/go-libp2p-kad-dht/pb" + "github.com/libp2p/go-libp2p-kad-dht/providers" + kb "github.com/libp2p/go-libp2p-kbucket" + + record "github.com/libp2p/go-libp2p-record" + recpb "github.com/libp2p/go-libp2p-record/pb" + + "github.com/libp2p/go-libp2p-xor/kademlia" + kadkey "github.com/libp2p/go-libp2p-xor/key" + "github.com/libp2p/go-libp2p-xor/trie" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +var logger = logging.Logger("fullrtdht") + +const tracer = tracing.Tracer("go-libp2p-kad-dht/fullrt") +const dhtName = "FullRT" + +const rtRefreshLimitsMsg = `Accelerated DHT client was unable to fully refresh its routing table due to Resource Manager limits, which may degrade content routing. Consider increasing resource limits. See debug logs for the "dht-crawler" subsystem for details.` + +// FullRT is an experimental DHT client that is under development. Expect breaking changes to occur in this client +// until it stabilizes. +type FullRT struct { + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + enableValues, enableProviders bool + Validator record.Validator + ProviderManager *providers.ProviderManager + datastore ds.Datastore + h host.Host + + crawlerInterval time.Duration + lastCrawlTime time.Time + + crawler crawler.Crawler + protoMessenger *dht_pb.ProtocolMessenger + messageSender dht_pb.MessageSender + + filterFromTable kaddht.QueryFilterFunc + rtLk sync.RWMutex + rt *trie.Trie + + kMapLk sync.RWMutex + keyToPeerMap map[string]peer.ID + + peerAddrsLk sync.RWMutex + peerAddrs map[peer.ID][]multiaddr.Multiaddr + + bootstrapPeers []*peer.AddrInfo + + bucketSize int + + triggerRefresh chan struct{} + + waitFrac float64 + timeoutPerOp time.Duration + + bulkSendParallelism int + + self peer.ID +} + +// NewFullRT creates a DHT client that tracks the full network. It takes a protocol prefix for the given network, +// For example, the protocol /ipfs/kad/1.0.0 has the prefix /ipfs. +// +// FullRT is an experimental DHT client that is under development. Expect breaking changes to occur in this client +// until it stabilizes. +// +// Not all of the standard DHT options are supported in this DHT. +func NewFullRT(h host.Host, protocolPrefix protocol.ID, options ...Option) (*FullRT, error) { + fullrtcfg := config{ + crawlInterval: time.Hour, + bulkSendParallelism: 20, + waitFrac: 0.3, + timeoutPerOp: 5 * time.Second, + } + if err := fullrtcfg.apply(options...); err != nil { + return nil, err + } + + dhtcfg := &internalConfig.Config{ + Datastore: dssync.MutexWrap(ds.NewMapDatastore()), + Validator: record.NamespacedValidator{}, + ValidatorChanged: false, + EnableProviders: true, + EnableValues: true, + ProtocolPrefix: protocolPrefix, + } + + if err := dhtcfg.Apply(fullrtcfg.dhtOpts...); err != nil { + return nil, err + } + if err := dhtcfg.ApplyFallbacks(h); err != nil { + return nil, err + } + + if err := dhtcfg.Validate(); err != nil { + return nil, err + } + + ms := net.NewMessageSenderImpl(h, []protocol.ID{dhtcfg.ProtocolPrefix + "/kad/1.0.0"}) + protoMessenger, err := dht_pb.NewProtocolMessenger(ms) + if err != nil { + return nil, err + } + + if fullrtcfg.crawler == nil { + fullrtcfg.crawler, err = crawler.NewDefaultCrawler(h, crawler.WithParallelism(200)) + if err != nil { + return nil, err + } + } + + ctx, cancel := context.WithCancel(context.Background()) + + self := h.ID() + pm, err := providers.NewProviderManager(self, h.Peerstore(), dhtcfg.Datastore, fullrtcfg.pmOpts...) + if err != nil { + cancel() + return nil, err + } + + var bsPeers []*peer.AddrInfo + + for _, ai := range dhtcfg.BootstrapPeers() { + tmpai := ai + bsPeers = append(bsPeers, &tmpai) + } + + rt := &FullRT{ + ctx: ctx, + cancel: cancel, + + enableValues: dhtcfg.EnableValues, + enableProviders: dhtcfg.EnableProviders, + Validator: dhtcfg.Validator, + ProviderManager: pm, + datastore: dhtcfg.Datastore, + h: h, + crawler: fullrtcfg.crawler, + messageSender: ms, + protoMessenger: protoMessenger, + filterFromTable: kaddht.PublicQueryFilter, + rt: trie.New(), + keyToPeerMap: make(map[string]peer.ID), + bucketSize: dhtcfg.BucketSize, + + peerAddrs: make(map[peer.ID][]multiaddr.Multiaddr), + bootstrapPeers: bsPeers, + + triggerRefresh: make(chan struct{}), + + waitFrac: fullrtcfg.waitFrac, + timeoutPerOp: fullrtcfg.timeoutPerOp, + + crawlerInterval: fullrtcfg.crawlInterval, + + bulkSendParallelism: fullrtcfg.bulkSendParallelism, + + self: self, + } + + rt.wg.Add(1) + go rt.runCrawler(ctx) + + return rt, nil +} + +type crawlVal struct { + addrs []multiaddr.Multiaddr + key kadkey.Key +} + +func (dht *FullRT) TriggerRefresh(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case dht.triggerRefresh <- struct{}{}: + return nil + case <-dht.ctx.Done(): + return fmt.Errorf("dht is closed") + } +} + +func (dht *FullRT) Stat() map[string]peer.ID { + newMap := make(map[string]peer.ID) + + dht.kMapLk.RLock() + for k, v := range dht.keyToPeerMap { + newMap[k] = v + } + dht.kMapLk.RUnlock() + return newMap +} + +func (dht *FullRT) Ready() bool { + dht.rtLk.RLock() + lastCrawlTime := dht.lastCrawlTime + dht.rtLk.RUnlock() + + if time.Since(lastCrawlTime) > dht.crawlerInterval { + return false + } + + // TODO: This function needs to be better defined. Perhaps based on going through the peer map and seeing when the + // last time we were connected to any of them was. + dht.peerAddrsLk.RLock() + rtSize := len(dht.keyToPeerMap) + dht.peerAddrsLk.RUnlock() + + return rtSize > len(dht.bootstrapPeers)+1 +} + +func (dht *FullRT) Host() host.Host { + return dht.h +} + +func (dht *FullRT) runCrawler(ctx context.Context) { + defer dht.wg.Done() + t := time.NewTicker(dht.crawlerInterval) + + m := make(map[peer.ID]*crawlVal) + mxLk := sync.Mutex{} + + initialTrigger := make(chan struct{}, 1) + initialTrigger <- struct{}{} + + for { + select { + case <-t.C: + case <-initialTrigger: + case <-dht.triggerRefresh: + case <-ctx.Done(): + return + } + + var addrs []*peer.AddrInfo + dht.peerAddrsLk.Lock() + for k := range m { + addrs = append(addrs, &peer.AddrInfo{ID: k}) // Addrs: v.addrs + } + + addrs = append(addrs, dht.bootstrapPeers...) + dht.peerAddrsLk.Unlock() + + for k := range m { + delete(m, k) + } + + start := time.Now() + limitErrOnce := sync.Once{} + dht.crawler.Run(ctx, addrs, + func(p peer.ID, rtPeers []*peer.AddrInfo) { + conns := dht.h.Network().ConnsToPeer(p) + var addrs []multiaddr.Multiaddr + for _, conn := range conns { + addr := conn.RemoteMultiaddr() + addrs = append(addrs, addr) + } + + if len(addrs) == 0 { + logger.Debugf("no connections to %v after successful query. keeping addresses from the peerstore", p) + addrs = dht.h.Peerstore().Addrs(p) + } + + keep := kaddht.PublicRoutingTableFilter(dht, p) + if !keep { + return + } + + mxLk.Lock() + defer mxLk.Unlock() + m[p] = &crawlVal{ + addrs: addrs, + } + }, + func(p peer.ID, err error) { + dialErr, ok := err.(*swarm.DialError) + if ok { + for _, transportErr := range dialErr.DialErrors { + if errors.Is(transportErr.Cause, network.ErrResourceLimitExceeded) { + limitErrOnce.Do(func() { logger.Errorf(rtRefreshLimitsMsg) }) + } + } + } + // note that DialError implements Unwrap() which returns the Cause, so this covers that case + if errors.Is(err, network.ErrResourceLimitExceeded) { + limitErrOnce.Do(func() { logger.Errorf(rtRefreshLimitsMsg) }) + } + }) + dur := time.Since(start) + logger.Infof("crawl took %v", dur) + + peerAddrs := make(map[peer.ID][]multiaddr.Multiaddr) + kPeerMap := make(map[string]peer.ID) + newRt := trie.New() + for k, v := range m { + v.key = kadkey.KbucketIDToKey(kb.ConvertPeerID(k)) + peerAddrs[k] = v.addrs + kPeerMap[string(v.key)] = k + newRt.Add(v.key) + } + + dht.peerAddrsLk.Lock() + dht.peerAddrs = peerAddrs + dht.peerAddrsLk.Unlock() + + dht.kMapLk.Lock() + dht.keyToPeerMap = kPeerMap + dht.kMapLk.Unlock() + + dht.rtLk.Lock() + dht.rt = newRt + dht.lastCrawlTime = time.Now() + dht.rtLk.Unlock() + } +} + +func (dht *FullRT) Close() error { + dht.cancel() + dht.wg.Wait() + return dht.ProviderManager.Close() +} + +func (dht *FullRT) Bootstrap(ctx context.Context) (err error) { + _, end := tracer.Bootstrap(dhtName, ctx) + defer func() { end(err) }() + + // TODO: This should block until the first crawl finish. + + return nil +} + +// CheckPeers return (success, total) +func (dht *FullRT) CheckPeers(ctx context.Context, peers ...peer.ID) (int, int) { + ctx, span := internal.StartSpan(ctx, "FullRT.CheckPeers", trace.WithAttributes(attribute.Int("NumPeers", len(peers)))) + defer span.End() + + var peerAddrs chan interface{} + var total int + if len(peers) == 0 { + dht.peerAddrsLk.RLock() + total = len(dht.peerAddrs) + peerAddrs = make(chan interface{}, total) + for k, v := range dht.peerAddrs { + peerAddrs <- peer.AddrInfo{ + ID: k, + Addrs: v, + } + } + close(peerAddrs) + dht.peerAddrsLk.RUnlock() + } else { + total = len(peers) + peerAddrs = make(chan interface{}, total) + dht.peerAddrsLk.RLock() + for _, p := range peers { + peerAddrs <- peer.AddrInfo{ + ID: p, + Addrs: dht.peerAddrs[p], + } + } + close(peerAddrs) + dht.peerAddrsLk.RUnlock() + } + + var success uint64 + + workers(100, func(i interface{}) { + a := i.(peer.AddrInfo) + dialctx, dialcancel := context.WithTimeout(ctx, time.Second*3) + if err := dht.h.Connect(dialctx, a); err == nil { + atomic.AddUint64(&success, 1) + } + dialcancel() + }, peerAddrs) + return int(success), total +} + +func workers(numWorkers int, fn func(interface{}), inputs <-chan interface{}) { + jobs := make(chan interface{}) + defer close(jobs) + for i := 0; i < numWorkers; i++ { + go func() { + for j := range jobs { + fn(j) + } + }() + } + for i := range inputs { + jobs <- i + } +} + +func (dht *FullRT) GetClosestPeers(ctx context.Context, key string) ([]peer.ID, error) { + _, span := internal.StartSpan(ctx, "FullRT.GetClosestPeers", trace.WithAttributes(internal.KeyAsAttribute("Key", key))) + defer span.End() + + kbID := kb.ConvertKey(key) + kadKey := kadkey.KbucketIDToKey(kbID) + dht.rtLk.RLock() + closestKeys := kademlia.ClosestN(kadKey, dht.rt, dht.bucketSize) + dht.rtLk.RUnlock() + + peers := make([]peer.ID, 0, len(closestKeys)) + for _, k := range closestKeys { + dht.kMapLk.RLock() + p, ok := dht.keyToPeerMap[string(k)] + if !ok { + logger.Errorf("key not found in map") + } + dht.kMapLk.RUnlock() + dht.peerAddrsLk.RLock() + peerAddrs := dht.peerAddrs[p] + dht.peerAddrsLk.RUnlock() + + dht.h.Peerstore().AddAddrs(p, peerAddrs, peerstore.TempAddrTTL) + peers = append(peers, p) + } + return peers, nil +} + +// PutValue adds value corresponding to given Key. +// This is the top level "Store" operation of the DHT +func (dht *FullRT) PutValue(ctx context.Context, key string, value []byte, opts ...routing.Option) (err error) { + ctx, end := tracer.PutValue(dhtName, ctx, key, value, opts...) + defer func() { end(err) }() + + if !dht.enableValues { + return routing.ErrNotSupported + } + + logger.Debugw("putting value", "key", internal.LoggableRecordKeyString(key)) + + // don't even allow local users to put bad values. + if err := dht.Validator.Validate(key, value); err != nil { + return err + } + + old, err := dht.getLocal(ctx, key) + if err != nil { + // Means something is wrong with the datastore. + return err + } + + // Check if we have an old value that's not the same as the new one. + if old != nil && !bytes.Equal(old.GetValue(), value) { + // Check to see if the new one is better. + i, err := dht.Validator.Select(key, [][]byte{value, old.GetValue()}) + if err != nil { + return err + } + if i != 0 { + return fmt.Errorf("can't replace a newer value with an older value") + } + } + + rec := record.MakePutRecord(key, value) + rec.TimeReceived = u.FormatRFC3339(time.Now()) + err = dht.putLocal(ctx, key, rec) + if err != nil { + return err + } + + peers, err := dht.GetClosestPeers(ctx, key) + if err != nil { + return err + } + + successes := dht.execOnMany(ctx, func(ctx context.Context, p peer.ID) error { + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.Value, + ID: p, + }) + err := dht.protoMessenger.PutValue(ctx, p, rec) + return err + }, peers, true) + + if successes == 0 { + return fmt.Errorf("failed to complete put") + } + + return nil +} + +// RecvdVal stores a value and the peer from which we got the value. +type RecvdVal struct { + Val []byte + From peer.ID +} + +// GetValue searches for the value corresponding to given Key. +func (dht *FullRT) GetValue(ctx context.Context, key string, opts ...routing.Option) (result []byte, err error) { + ctx, end := tracer.GetValue(dhtName, ctx, key, opts...) + defer func() { end(result, err) }() + + if !dht.enableValues { + return nil, routing.ErrNotSupported + } + + // apply defaultQuorum if relevant + var cfg routing.Options + if err := cfg.Apply(opts...); err != nil { + return nil, err + } + opts = append(opts, kaddht.Quorum(internalConfig.GetQuorum(&cfg))) + + responses, err := dht.SearchValue(ctx, key, opts...) + if err != nil { + return nil, err + } + var best []byte + + for r := range responses { + best = r + } + + if ctx.Err() != nil { + return best, ctx.Err() + } + + if best == nil { + return nil, routing.ErrNotFound + } + logger.Debugf("GetValue %v %x", internal.LoggableRecordKeyString(key), best) + return best, nil +} + +// SearchValue searches for the value corresponding to given Key and streams the results. +func (dht *FullRT) SearchValue(ctx context.Context, key string, opts ...routing.Option) (ch <-chan []byte, err error) { + ctx, end := tracer.SearchValue(dhtName, ctx, key, opts...) + defer func() { ch, err = end(ch, err) }() + + if !dht.enableValues { + return nil, routing.ErrNotSupported + } + + var cfg routing.Options + if err := cfg.Apply(opts...); err != nil { + return nil, err + } + + responsesNeeded := 0 + if !cfg.Offline { + responsesNeeded = internalConfig.GetQuorum(&cfg) + } + + stopCh := make(chan struct{}) + valCh, lookupRes := dht.getValues(ctx, key, stopCh) + + out := make(chan []byte) + go func() { + defer close(out) + + best, peersWithBest, aborted := dht.searchValueQuorum(ctx, key, valCh, stopCh, out, responsesNeeded) + if best == nil || aborted { + return + } + + updatePeers := make([]peer.ID, 0, dht.bucketSize) + select { + case l := <-lookupRes: + if l == nil { + return + } + + for _, p := range l.peers { + if _, ok := peersWithBest[p]; !ok { + updatePeers = append(updatePeers, p) + } + } + case <-ctx.Done(): + return + } + + ctx, cancel := context.WithTimeout(ctx, time.Second*5) + dht.updatePeerValues(ctx, key, best, updatePeers) + cancel() + }() + + return out, nil +} + +func (dht *FullRT) searchValueQuorum(ctx context.Context, key string, valCh <-chan RecvdVal, stopCh chan struct{}, + out chan<- []byte, nvals int) ([]byte, map[peer.ID]struct{}, bool) { + numResponses := 0 + return dht.processValues(ctx, key, valCh, + func(ctx context.Context, v RecvdVal, better bool) bool { + numResponses++ + if better { + select { + case out <- v.Val: + case <-ctx.Done(): + return false + } + } + + if nvals > 0 && numResponses > nvals { + close(stopCh) + return true + } + return false + }) +} + +func (dht *FullRT) processValues(ctx context.Context, key string, vals <-chan RecvdVal, + newVal func(ctx context.Context, v RecvdVal, better bool) bool) (best []byte, peersWithBest map[peer.ID]struct{}, aborted bool) { +loop: + for { + if aborted { + return + } + + select { + case v, ok := <-vals: + if !ok { + break loop + } + + // Select best value + if best != nil { + if bytes.Equal(best, v.Val) { + peersWithBest[v.From] = struct{}{} + aborted = newVal(ctx, v, false) + continue + } + sel, err := dht.Validator.Select(key, [][]byte{best, v.Val}) + if err != nil { + logger.Warnw("failed to select best value", "key", internal.LoggableRecordKeyString(key), "error", err) + continue + } + if sel != 1 { + aborted = newVal(ctx, v, false) + continue + } + } + peersWithBest = make(map[peer.ID]struct{}) + peersWithBest[v.From] = struct{}{} + best = v.Val + aborted = newVal(ctx, v, true) + case <-ctx.Done(): + return + } + } + + return +} + +func (dht *FullRT) updatePeerValues(ctx context.Context, key string, val []byte, peers []peer.ID) { + fixupRec := record.MakePutRecord(key, val) + for _, p := range peers { + go func(p peer.ID) { + // TODO: Is this possible? + if p == dht.h.ID() { + err := dht.putLocal(ctx, key, fixupRec) + if err != nil { + logger.Error("Error correcting local dht entry:", err) + } + return + } + ctx, cancel := context.WithTimeout(ctx, time.Second*5) + defer cancel() + err := dht.protoMessenger.PutValue(ctx, p, fixupRec) + if err != nil { + logger.Debug("Error correcting DHT entry: ", err) + } + }(p) + } +} + +type lookupWithFollowupResult struct { + peers []peer.ID // the top K not unreachable peers at the end of the query +} + +func (dht *FullRT) getValues(ctx context.Context, key string, stopQuery chan struct{}) (<-chan RecvdVal, <-chan *lookupWithFollowupResult) { + valCh := make(chan RecvdVal, 1) + lookupResCh := make(chan *lookupWithFollowupResult, 1) + + logger.Debugw("finding value", "key", internal.LoggableRecordKeyString(key)) + + if rec, err := dht.getLocal(ctx, key); rec != nil && err == nil { + select { + case valCh <- RecvdVal{ + Val: rec.GetValue(), + From: dht.h.ID(), + }: + case <-ctx.Done(): + } + } + peers, err := dht.GetClosestPeers(ctx, key) + if err != nil { + lookupResCh <- &lookupWithFollowupResult{} + close(valCh) + close(lookupResCh) + return valCh, lookupResCh + } + + go func() { + defer close(valCh) + defer close(lookupResCh) + queryFn := func(ctx context.Context, p peer.ID) error { + // For DHT query command + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.SendingQuery, + ID: p, + }) + + rec, peers, err := dht.protoMessenger.GetValue(ctx, p, key) + if err != nil { + return err + } + + // For DHT query command + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.PeerResponse, + ID: p, + Responses: peers, + }) + + if rec == nil { + return nil + } + + val := rec.GetValue() + if val == nil { + logger.Debug("received a nil record value") + return nil + } + if err := dht.Validator.Validate(key, val); err != nil { + // make sure record is valid + logger.Debugw("received invalid record (discarded)", "error", err) + return nil + } + + // the record is present and valid, send it out for processing + select { + case valCh <- RecvdVal{ + Val: val, + From: p, + }: + case <-ctx.Done(): + return ctx.Err() + } + + return nil + } + + dht.execOnMany(ctx, queryFn, peers, false) + lookupResCh <- &lookupWithFollowupResult{peers: peers} + }() + return valCh, lookupResCh +} + +// Provider abstraction for indirect stores. +// Some DHTs store values directly, while an indirect store stores pointers to +// locations of the value, similarly to Coral and Mainline DHT. + +// Provide makes this node announce that it can provide a value for the given key +func (dht *FullRT) Provide(ctx context.Context, key cid.Cid, brdcst bool) (err error) { + ctx, end := tracer.Provide(dhtName, ctx, key, brdcst) + defer func() { end(err) }() + + if !dht.enableProviders { + return routing.ErrNotSupported + } else if !key.Defined() { + return fmt.Errorf("invalid cid: undefined") + } + keyMH := key.Hash() + logger.Debugw("providing", "cid", key, "mh", internal.LoggableProviderRecordBytes(keyMH)) + + // add self locally + dht.ProviderManager.AddProvider(ctx, keyMH, peer.AddrInfo{ID: dht.h.ID()}) + if !brdcst { + return nil + } + + closerCtx := ctx + if deadline, ok := ctx.Deadline(); ok { + now := time.Now() + timeout := deadline.Sub(now) + + if timeout < 0 { + // timed out + return context.DeadlineExceeded + } else if timeout < 10*time.Second { + // Reserve 10% for the final put. + deadline = deadline.Add(-timeout / 10) + } else { + // Otherwise, reserve a second (we'll already be + // connected so this should be fast). + deadline = deadline.Add(-time.Second) + } + var cancel context.CancelFunc + closerCtx, cancel = context.WithDeadline(ctx, deadline) + defer cancel() + } + + var exceededDeadline bool + peers, err := dht.GetClosestPeers(closerCtx, string(keyMH)) + switch err { + case context.DeadlineExceeded: + // If the _inner_ deadline has been exceeded but the _outer_ + // context is still fine, provide the value to the closest peers + // we managed to find, even if they're not the _actual_ closest peers. + if ctx.Err() != nil { + return ctx.Err() + } + exceededDeadline = true + case nil: + default: + return err + } + + successes := dht.execOnMany(ctx, func(ctx context.Context, p peer.ID) error { + err := dht.protoMessenger.PutProviderAddrs(ctx, p, keyMH, peer.AddrInfo{ + ID: dht.self, + Addrs: dht.h.Addrs(), + }) + return err + }, peers, true) + + if exceededDeadline { + return context.DeadlineExceeded + } + + if successes == 0 { + return fmt.Errorf("failed to complete provide") + } + + return ctx.Err() +} + +// execOnMany executes the given function on each of the peers, although it may only wait for a certain chunk of peers +// to respond before considering the results "good enough" and returning. +// +// If sloppyExit is true then this function will return without waiting for all of its internal goroutines to close. +// If sloppyExit is true then the passed in function MUST be able to safely complete an arbitrary amount of time after +// execOnMany has returned (e.g. do not write to resources that might get closed or set to nil and therefore result in +// a panic instead of just returning an error). +func (dht *FullRT) execOnMany(ctx context.Context, fn func(context.Context, peer.ID) error, peers []peer.ID, sloppyExit bool) int { + if len(peers) == 0 { + return 0 + } + + // having a buffer that can take all of the elements is basically a hack to allow for sloppy exits that clean up + // the goroutines after the function is done rather than before + errCh := make(chan error, len(peers)) + numSuccessfulToWaitFor := int(float64(len(peers)) * dht.waitFrac) + + putctx, cancel := context.WithTimeout(ctx, dht.timeoutPerOp) + defer cancel() + + for _, p := range peers { + go func(p peer.ID) { + errCh <- fn(putctx, p) + }(p) + } + + var numDone, numSuccess, successSinceLastTick int + var ticker *time.Ticker + var tickChan <-chan time.Time + + for numDone < len(peers) { + select { + case err := <-errCh: + numDone++ + if err == nil { + numSuccess++ + if numSuccess >= numSuccessfulToWaitFor && ticker == nil { + // Once there are enough successes, wait a little longer + ticker = time.NewTicker(time.Millisecond * 500) + defer ticker.Stop() + tickChan = ticker.C + successSinceLastTick = numSuccess + } + // This is equivalent to numSuccess * 2 + numFailures >= len(peers) and is a heuristic that seems to be + // performing reasonably. + // TODO: Make this metric more configurable + // TODO: Have better heuristics in this function whether determined from observing static network + // properties or dynamically calculating them + if numSuccess+numDone >= len(peers) { + cancel() + if sloppyExit { + return numSuccess + } + } + } + case <-tickChan: + if numSuccess > successSinceLastTick { + // If there were additional successes, then wait another tick + successSinceLastTick = numSuccess + } else { + cancel() + if sloppyExit { + return numSuccess + } + } + } + } + return numSuccess +} + +func (dht *FullRT) ProvideMany(ctx context.Context, keys []multihash.Multihash) (err error) { + ctx, end := tracer.ProvideMany(dhtName, ctx, keys) + defer func() { end(err) }() + + if !dht.enableProviders { + return routing.ErrNotSupported + } + + // Compute addresses once for all provides + pi := peer.AddrInfo{ + ID: dht.h.ID(), + Addrs: dht.h.Addrs(), + } + pbPeers := dht_pb.RawPeerInfosToPBPeers([]peer.AddrInfo{pi}) + + // TODO: We may want to limit the type of addresses in our provider records + // For example, in a WAN-only DHT prohibit sharing non-WAN addresses (e.g. 192.168.0.100) + if len(pi.Addrs) < 1 { + return fmt.Errorf("no known addresses for self, cannot put provider") + } + + fn := func(ctx context.Context, p, k peer.ID) error { + pmes := dht_pb.NewMessage(dht_pb.Message_ADD_PROVIDER, multihash.Multihash(k), 0) + pmes.ProviderPeers = pbPeers + + return dht.messageSender.SendMessage(ctx, p, pmes) + } + + keysAsPeerIDs := make([]peer.ID, 0, len(keys)) + for _, k := range keys { + keysAsPeerIDs = append(keysAsPeerIDs, peer.ID(k)) + } + + return dht.bulkMessageSend(ctx, keysAsPeerIDs, fn, true) +} + +func (dht *FullRT) PutMany(ctx context.Context, keys []string, values [][]byte) error { + ctx, span := internal.StartSpan(ctx, "FullRT.PutMany", trace.WithAttributes(attribute.Int("NumKeys", len(keys)))) + defer span.End() + + if !dht.enableValues { + return routing.ErrNotSupported + } + + if len(keys) != len(values) { + return fmt.Errorf("number of keys does not match the number of values") + } + + keysAsPeerIDs := make([]peer.ID, 0, len(keys)) + keyRecMap := make(map[string][]byte) + for i, k := range keys { + keysAsPeerIDs = append(keysAsPeerIDs, peer.ID(k)) + keyRecMap[k] = values[i] + } + + if len(keys) != len(keyRecMap) { + return fmt.Errorf("does not support duplicate keys") + } + + fn := func(ctx context.Context, p, k peer.ID) error { + keyStr := string(k) + return dht.protoMessenger.PutValue(ctx, p, record.MakePutRecord(keyStr, keyRecMap[keyStr])) + } + + return dht.bulkMessageSend(ctx, keysAsPeerIDs, fn, false) +} + +func (dht *FullRT) bulkMessageSend(ctx context.Context, keys []peer.ID, fn func(ctx context.Context, target, k peer.ID) error, isProvRec bool) error { + ctx, span := internal.StartSpan(ctx, "FullRT.BulkMessageSend") + defer span.End() + + if len(keys) == 0 { + return nil + } + + type report struct { + successes int + failures int + lastSuccess time.Time + mx sync.RWMutex + } + + keySuccesses := make(map[peer.ID]*report, len(keys)) + var numSkipped int64 + + for _, k := range keys { + keySuccesses[k] = &report{} + } + + logger.Infof("bulk send: number of keys %d, unique %d", len(keys), len(keySuccesses)) + numSuccessfulToWaitFor := int(float64(dht.bucketSize) * dht.waitFrac * 1.2) + + sortedKeys := make([]peer.ID, 0, len(keySuccesses)) + for k := range keySuccesses { + sortedKeys = append(sortedKeys, k) + } + + sortedKeys = kb.SortClosestPeers(sortedKeys, kb.ID(make([]byte, 32))) + + dht.kMapLk.RLock() + numPeers := len(dht.keyToPeerMap) + dht.kMapLk.RUnlock() + + chunkSize := (len(sortedKeys) * dht.bucketSize * 2) / numPeers + if chunkSize == 0 { + chunkSize = 1 + } + + connmgrTag := fmt.Sprintf("dht-bulk-provide-tag-%d", rand.Int()) + + type workMessage struct { + p peer.ID + keys []peer.ID + } + + workCh := make(chan workMessage, 1) + wg := sync.WaitGroup{} + wg.Add(dht.bulkSendParallelism) + for i := 0; i < dht.bulkSendParallelism; i++ { + go func() { + defer wg.Done() + defer logger.Debugf("bulk send goroutine done") + for wmsg := range workCh { + p, workKeys := wmsg.p, wmsg.keys + dht.peerAddrsLk.RLock() + peerAddrs := dht.peerAddrs[p] + dht.peerAddrsLk.RUnlock() + dialCtx, dialCancel := context.WithTimeout(ctx, dht.timeoutPerOp) + if err := dht.h.Connect(dialCtx, peer.AddrInfo{ID: p, Addrs: peerAddrs}); err != nil { + dialCancel() + atomic.AddInt64(&numSkipped, 1) + continue + } + dialCancel() + dht.h.ConnManager().Protect(p, connmgrTag) + for _, k := range workKeys { + keyReport := keySuccesses[k] + + queryTimeout := dht.timeoutPerOp + keyReport.mx.RLock() + if keyReport.successes >= numSuccessfulToWaitFor { + if time.Since(keyReport.lastSuccess) > time.Millisecond*500 { + keyReport.mx.RUnlock() + continue + } + queryTimeout = time.Millisecond * 500 + } + keyReport.mx.RUnlock() + + fnCtx, fnCancel := context.WithTimeout(ctx, queryTimeout) + if err := fn(fnCtx, p, k); err == nil { + keyReport.mx.Lock() + keyReport.successes++ + if keyReport.successes >= numSuccessfulToWaitFor { + keyReport.lastSuccess = time.Now() + } + keyReport.mx.Unlock() + } else { + keyReport.mx.Lock() + keyReport.failures++ + keyReport.mx.Unlock() + if ctx.Err() != nil { + fnCancel() + break + } + } + fnCancel() + } + + dht.h.ConnManager().Unprotect(p, connmgrTag) + } + }() + } + + keyGroups := divideByChunkSize(sortedKeys, chunkSize) + sendsSoFar := 0 + for _, g := range keyGroups { + if ctx.Err() != nil { + break + } + + keysPerPeer := make(map[peer.ID][]peer.ID) + for _, k := range g { + peers, err := dht.GetClosestPeers(ctx, string(k)) + if err == nil { + for _, p := range peers { + keysPerPeer[p] = append(keysPerPeer[p], k) + } + } + } + + logger.Debugf("bulk send: %d peers for group size %d", len(keysPerPeer), len(g)) + + keyloop: + for p, workKeys := range keysPerPeer { + select { + case workCh <- workMessage{p: p, keys: workKeys}: + case <-ctx.Done(): + break keyloop + } + } + sendsSoFar += len(g) + logger.Infof("bulk sending: %.1f%% done - %d/%d done", 100*float64(sendsSoFar)/float64(len(keySuccesses)), sendsSoFar, len(keySuccesses)) + } + + close(workCh) + + logger.Debugf("bulk send complete, waiting on goroutines to close") + + wg.Wait() + + numSendsSuccessful := 0 + numFails := 0 + // generate a histogram of how many successful sends occurred per key + successHist := make(map[int]int) + // generate a histogram of how many failed sends occurred per key + // this does not include sends to peers that were skipped and had no messages sent to them at all + failHist := make(map[int]int) + for _, v := range keySuccesses { + if v.successes > 0 { + numSendsSuccessful++ + } + successHist[v.successes]++ + failHist[v.failures]++ + numFails += v.failures + } + + if numSendsSuccessful == 0 { + logger.Infof("bulk send failed") + return fmt.Errorf("failed to complete bulk sending") + } + + logger.Infof("bulk send complete: %d keys, %d unique, %d successful, %d skipped peers, %d fails", + len(keys), len(keySuccesses), numSendsSuccessful, numSkipped, numFails) + + logger.Infof("bulk send summary: successHist %v, failHist %v", successHist, failHist) + + return nil +} + +// divideByChunkSize divides the set of keys into groups of (at most) chunkSize. Chunk size must be greater than 0. +func divideByChunkSize(keys []peer.ID, chunkSize int) [][]peer.ID { + if len(keys) == 0 { + return nil + } + + if chunkSize < 1 { + panic(fmt.Sprintf("fullrt: divide into groups: invalid chunk size %d", chunkSize)) + } + + var keyChunks [][]peer.ID + var nextChunk []peer.ID + chunkProgress := 0 + for _, k := range keys { + nextChunk = append(nextChunk, k) + chunkProgress++ + if chunkProgress == chunkSize { + keyChunks = append(keyChunks, nextChunk) + chunkProgress = 0 + nextChunk = make([]peer.ID, 0, len(nextChunk)) + } + } + if chunkProgress != 0 { + keyChunks = append(keyChunks, nextChunk) + } + return keyChunks +} + +// FindProviders searches until the context expires. +func (dht *FullRT) FindProviders(ctx context.Context, c cid.Cid) ([]peer.AddrInfo, error) { + if !dht.enableProviders { + return nil, routing.ErrNotSupported + } else if !c.Defined() { + return nil, fmt.Errorf("invalid cid: undefined") + } + + var providers []peer.AddrInfo + for p := range dht.FindProvidersAsync(ctx, c, dht.bucketSize) { + providers = append(providers, p) + } + return providers, nil +} + +// FindProvidersAsync is the same thing as FindProviders, but returns a channel. +// Peers will be returned on the channel as soon as they are found, even before +// the search query completes. If count is zero then the query will run until it +// completes. Note: not reading from the returned channel may block the query +// from progressing. +func (dht *FullRT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) (ch <-chan peer.AddrInfo) { + ctx, end := tracer.FindProvidersAsync(dhtName, ctx, key, count) + defer func() { ch = end(ch, nil) }() + + if !dht.enableProviders || !key.Defined() { + peerOut := make(chan peer.AddrInfo) + close(peerOut) + return peerOut + } + + peerOut := make(chan peer.AddrInfo) + + keyMH := key.Hash() + + logger.Debugw("finding providers", "cid", key, "mh", internal.LoggableProviderRecordBytes(keyMH)) + go dht.findProvidersAsyncRoutine(ctx, keyMH, count, peerOut) + return peerOut +} + +func (dht *FullRT) findProvidersAsyncRoutine(ctx context.Context, key multihash.Multihash, count int, peerOut chan peer.AddrInfo) { + // use a span here because unlike tracer.FindProvidersAsync we know who told us about it and that intresting to log. + ctx, span := internal.StartSpan(ctx, "FullRT.FindProvidersAsyncRoutine") + defer span.End() + + defer close(peerOut) + + findAll := count == 0 + ps := make(map[peer.ID]struct{}) + psLock := &sync.Mutex{} + psTryAdd := func(p peer.ID) bool { + psLock.Lock() + defer psLock.Unlock() + _, ok := ps[p] + if !ok && (len(ps) < count || findAll) { + ps[p] = struct{}{} + return true + } + return false + } + psSize := func() int { + psLock.Lock() + defer psLock.Unlock() + return len(ps) + } + + provs, err := dht.ProviderManager.GetProviders(ctx, key) + if err != nil { + return + } + for _, p := range provs { + // NOTE: Assuming that this list of peers is unique + if psTryAdd(p.ID) { + select { + case peerOut <- p: + span.AddEvent("found provider", trace.WithAttributes( + attribute.Stringer("peer", p.ID), + attribute.Stringer("from", dht.self), + )) + case <-ctx.Done(): + return + } + } + + // If we have enough peers locally, don't bother with remote RPC + // TODO: is this a DOS vector? + if !findAll && psSize() >= count { + return + } + } + + peers, err := dht.GetClosestPeers(ctx, string(key)) + if err != nil { + return + } + + queryctx, cancelquery := context.WithCancel(ctx) + defer cancelquery() + + fn := func(ctx context.Context, p peer.ID) error { + // For DHT query command + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.SendingQuery, + ID: p, + }) + + provs, closest, err := dht.protoMessenger.GetProviders(ctx, p, key) + if err != nil { + return err + } + + logger.Debugf("%d provider entries", len(provs)) + + // Add unique providers from request, up to 'count' + for _, prov := range provs { + dht.maybeAddAddrs(prov.ID, prov.Addrs, peerstore.TempAddrTTL) + logger.Debugf("got provider: %s", prov) + if psTryAdd(prov.ID) { + logger.Debugf("using provider: %s", prov) + select { + case peerOut <- *prov: + span.AddEvent("found provider", trace.WithAttributes( + attribute.Stringer("peer", prov.ID), + attribute.Stringer("from", p), + )) + case <-ctx.Done(): + logger.Debug("context timed out sending more providers") + return ctx.Err() + } + } + if !findAll && psSize() >= count { + logger.Debugf("got enough providers (%d/%d)", psSize(), count) + cancelquery() + return nil + } + } + + // Give closer peers back to the query to be queried + logger.Debugf("got closer peers: %d %s", len(closest), closest) + + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.PeerResponse, + ID: p, + Responses: closest, + }) + return nil + } + + dht.execOnMany(queryctx, fn, peers, false) +} + +// FindPeer searches for a peer with given ID. +func (dht *FullRT) FindPeer(ctx context.Context, id peer.ID) (pi peer.AddrInfo, err error) { + ctx, end := tracer.FindPeer(dhtName, ctx, id) + defer func() { end(pi, err) }() + + if err := id.Validate(); err != nil { + return peer.AddrInfo{}, err + } + + logger.Debugw("finding peer", "peer", id) + + // Check if were already connected to them + if pi := dht.FindLocal(id); pi.ID != "" { + return pi, nil + } + + peers, err := dht.GetClosestPeers(ctx, string(id)) + if err != nil { + return peer.AddrInfo{}, err + } + + queryctx, cancelquery := context.WithCancel(ctx) + defer cancelquery() + + addrsCh := make(chan *peer.AddrInfo, 1) + newAddrs := make([]multiaddr.Multiaddr, 0) + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + addrsSoFar := make(map[multiaddr.Multiaddr]struct{}) + for { + select { + case ai, ok := <-addrsCh: + if !ok { + return + } + + for _, a := range ai.Addrs { + _, found := addrsSoFar[a] + if !found { + newAddrs = append(newAddrs, a) + addrsSoFar[a] = struct{}{} + } + } + case <-ctx.Done(): + return + } + } + }() + + fn := func(ctx context.Context, p peer.ID) error { + // For DHT query command + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.SendingQuery, + ID: p, + }) + + peers, err := dht.protoMessenger.GetClosestPeers(ctx, p, id) + if err != nil { + logger.Debugf("error getting closer peers: %s", err) + return err + } + + // For DHT query command + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.PeerResponse, + ID: p, + Responses: peers, + }) + + for _, a := range peers { + if a.ID == id { + select { + case addrsCh <- a: + case <-ctx.Done(): + return ctx.Err() + } + return nil + } + } + return nil + } + + dht.execOnMany(queryctx, fn, peers, false) + + close(addrsCh) + wg.Wait() + + if len(newAddrs) > 0 { + connctx, cancelconn := context.WithTimeout(ctx, time.Second*5) + defer cancelconn() + _ = dht.h.Connect(connctx, peer.AddrInfo{ + ID: id, + Addrs: newAddrs, + }) + } + + // Return peer information if we tried to dial the peer during the query or we are (or recently were) connected + // to the peer. + connectedness := dht.h.Network().Connectedness(id) + if connectedness == network.Connected || connectedness == network.CanConnect { + return dht.h.Peerstore().PeerInfo(id), nil + } + + return peer.AddrInfo{}, routing.ErrNotFound +} + +var _ routing.Routing = (*FullRT)(nil) + +// getLocal attempts to retrieve the value from the datastore. +// +// returns nil, nil when either nothing is found or the value found doesn't properly validate. +// returns nil, some_error when there's a *datastore* error (i.e., something goes very wrong) +func (dht *FullRT) getLocal(ctx context.Context, key string) (*recpb.Record, error) { + logger.Debugw("finding value in datastore", "key", internal.LoggableRecordKeyString(key)) + + rec, err := dht.getRecordFromDatastore(ctx, mkDsKey(key)) + if err != nil { + logger.Warnw("get local failed", "key", internal.LoggableRecordKeyString(key), "error", err) + return nil, err + } + + // Double check the key. Can't hurt. + if rec != nil && string(rec.GetKey()) != key { + logger.Errorw("BUG: found a DHT record that didn't match it's key", "expected", internal.LoggableRecordKeyString(key), "got", rec.GetKey()) + return nil, nil + + } + return rec, nil +} + +// putLocal stores the key value pair in the datastore +func (dht *FullRT) putLocal(ctx context.Context, key string, rec *recpb.Record) error { + data, err := proto.Marshal(rec) + if err != nil { + logger.Warnw("failed to put marshal record for local put", "error", err, "key", internal.LoggableRecordKeyString(key)) + return err + } + + return dht.datastore.Put(ctx, mkDsKey(key), data) +} + +func mkDsKey(s string) ds.Key { + return ds.NewKey(base32.RawStdEncoding.EncodeToString([]byte(s))) +} + +// returns nil, nil when either nothing is found or the value found doesn't properly validate. +// returns nil, some_error when there's a *datastore* error (i.e., something goes very wrong) +func (dht *FullRT) getRecordFromDatastore(ctx context.Context, dskey ds.Key) (*recpb.Record, error) { + buf, err := dht.datastore.Get(ctx, dskey) + if err == ds.ErrNotFound { + return nil, nil + } + if err != nil { + logger.Errorw("error retrieving record from datastore", "key", dskey, "error", err) + return nil, err + } + rec := new(recpb.Record) + err = proto.Unmarshal(buf, rec) + if err != nil { + // Bad data in datastore, log it but don't return an error, we'll just overwrite it + logger.Errorw("failed to unmarshal record from datastore", "key", dskey, "error", err) + return nil, nil + } + + err = dht.Validator.Validate(string(rec.GetKey()), rec.GetValue()) + if err != nil { + // Invalid record in datastore, probably expired but don't return an error, + // we'll just overwrite it + logger.Debugw("local record verify failed", "key", rec.GetKey(), "error", err) + return nil, nil + } + + return rec, nil +} + +// FindLocal looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in. +func (dht *FullRT) FindLocal(id peer.ID) peer.AddrInfo { + switch dht.h.Network().Connectedness(id) { + case network.Connected, network.CanConnect: + return dht.h.Peerstore().PeerInfo(id) + default: + return peer.AddrInfo{} + } +} + +func (dht *FullRT) maybeAddAddrs(p peer.ID, addrs []multiaddr.Multiaddr, ttl time.Duration) { + // Don't add addresses for self or our connected peers. We have better ones. + if p == dht.h.ID() || dht.h.Network().Connectedness(p) == network.Connected { + return + } + dht.h.Peerstore().AddAddrs(p, addrs, ttl) +} diff --git a/go-libp2p-kad-dht/fullrt/dht_test.go b/go-libp2p-kad-dht/fullrt/dht_test.go new file mode 100644 index 0000000..23eef30 --- /dev/null +++ b/go-libp2p-kad-dht/fullrt/dht_test.go @@ -0,0 +1,86 @@ +package fullrt + +import ( + "strconv" + "testing" + + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestDivideByChunkSize(t *testing.T) { + var keys []peer.ID + for i := 0; i < 10; i++ { + keys = append(keys, peer.ID(strconv.Itoa(i))) + } + + convertToStrings := func(peers []peer.ID) []string { + var out []string + for _, p := range peers { + out = append(out, string(p)) + } + return out + } + + pidsEquals := func(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true + } + + t.Run("Divides", func(t *testing.T) { + gr := divideByChunkSize(keys, 5) + if len(gr) != 2 { + t.Fatal("incorrect number of groups") + } + if g1, expected := convertToStrings(gr[0]), []string{"0", "1", "2", "3", "4"}; !pidsEquals(g1, expected) { + t.Fatalf("expected %v, got %v", expected, g1) + } + if g2, expected := convertToStrings(gr[1]), []string{"5", "6", "7", "8", "9"}; !pidsEquals(g2, expected) { + t.Fatalf("expected %v, got %v", expected, g2) + } + }) + t.Run("Remainder", func(t *testing.T) { + gr := divideByChunkSize(keys, 3) + if len(gr) != 4 { + t.Fatal("incorrect number of groups") + } + if g, expected := convertToStrings(gr[0]), []string{"0", "1", "2"}; !pidsEquals(g, expected) { + t.Fatalf("expected %v, got %v", expected, g) + } + if g, expected := convertToStrings(gr[1]), []string{"3", "4", "5"}; !pidsEquals(g, expected) { + t.Fatalf("expected %v, got %v", expected, g) + } + if g, expected := convertToStrings(gr[2]), []string{"6", "7", "8"}; !pidsEquals(g, expected) { + t.Fatalf("expected %v, got %v", expected, g) + } + if g, expected := convertToStrings(gr[3]), []string{"9"}; !pidsEquals(g, expected) { + t.Fatalf("expected %v, got %v", expected, g) + } + }) + t.Run("OneEach", func(t *testing.T) { + gr := divideByChunkSize(keys, 1) + if len(gr) != 10 { + t.Fatal("incorrect number of groups") + } + for i := 0; i < 10; i++ { + if g, expected := convertToStrings(gr[i]), []string{strconv.Itoa(i)}; !pidsEquals(g, expected) { + t.Fatalf("expected %v, got %v", expected, g) + } + } + }) + t.Run("ChunkSizeLargerThanKeys", func(t *testing.T) { + gr := divideByChunkSize(keys, 11) + if len(gr) != 1 { + t.Fatal("incorrect number of groups") + } + if g, expected := convertToStrings(gr[0]), convertToStrings(keys); !pidsEquals(g, expected) { + t.Fatalf("expected %v, got %v", expected, g) + } + }) +} diff --git a/go-libp2p-kad-dht/fullrt/options.go b/go-libp2p-kad-dht/fullrt/options.go new file mode 100644 index 0000000..275792f --- /dev/null +++ b/go-libp2p-kad-dht/fullrt/options.go @@ -0,0 +1,98 @@ +package fullrt + +import ( + "fmt" + "time" + + kaddht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-kad-dht/crawler" + "github.com/libp2p/go-libp2p-kad-dht/providers" +) + +type config struct { + dhtOpts []kaddht.Option + + crawlInterval time.Duration + waitFrac float64 + bulkSendParallelism int + timeoutPerOp time.Duration + crawler crawler.Crawler + pmOpts []providers.Option +} + +func (cfg *config) apply(opts ...Option) error { + for i, o := range opts { + if err := o(cfg); err != nil { + return fmt.Errorf("fullrt dht option %d failed: %w", i, err) + } + } + return nil +} + +type Option func(opt *config) error + +func DHTOption(opts ...kaddht.Option) Option { + return func(c *config) error { + c.dhtOpts = append(c.dhtOpts, opts...) + return nil + } +} + +// WithCrawler sets the crawler.Crawler to use in order to crawl the DHT network. +// Defaults to crawler.DefaultCrawler with parallelism of 200. +func WithCrawler(c crawler.Crawler) Option { + return func(opt *config) error { + opt.crawler = c + return nil + } +} + +// WithCrawlInterval sets the interval at which the DHT is crawled to refresh peer store. +// Defaults to 1 hour if unspecified. +func WithCrawlInterval(i time.Duration) Option { + return func(opt *config) error { + opt.crawlInterval = i + return nil + } +} + +// WithSuccessWaitFraction sets the fraction of peers to wait for before considering an operation a success defined as a number between (0, 1]. +// Defaults to 30% if unspecified. +func WithSuccessWaitFraction(f float64) Option { + return func(opt *config) error { + if f <= 0 || f > 1 { + return fmt.Errorf("success wait fraction must be larger than 0 and smaller or equal to 1; got: %f", f) + } + opt.waitFrac = f + return nil + } +} + +// WithBulkSendParallelism sets the maximum degree of parallelism at which messages are sent to other peers. It must be at least 1. +// Defaults to 20 if unspecified. +func WithBulkSendParallelism(b int) Option { + return func(opt *config) error { + if b < 1 { + return fmt.Errorf("bulk send parallelism must be at least 1; got: %d", b) + } + opt.bulkSendParallelism = b + return nil + } +} + +// WithTimeoutPerOperation sets the timeout per operation, where operations include putting providers and querying the DHT. +// Defaults to 5 seconds if unspecified. +func WithTimeoutPerOperation(t time.Duration) Option { + return func(opt *config) error { + opt.timeoutPerOp = t + return nil + } +} + +// WithProviderManagerOptions sets the options to use when instantiating providers.ProviderManager. +func WithProviderManagerOptions(pmOpts ...providers.Option) Option { + return func(opt *config) error { + opt.pmOpts = pmOpts + return nil + } +} diff --git a/go-libp2p-kad-dht/go.mod b/go-libp2p-kad-dht/go.mod new file mode 100644 index 0000000..5340f85 --- /dev/null +++ b/go-libp2p-kad-dht/go.mod @@ -0,0 +1,144 @@ +module github.com/libp2p/go-libp2p-kad-dht + +go 1.21 + +retract v0.24.3 // this includes a breaking change and should have been released as v0.25.0 + +replace github.com/multiformats/go-multiaddr => ../go-multiaddr + +replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns + +replace github.com/libp2p/go-libp2p => ../go-libp2p + +require ( + github.com/gogo/protobuf v1.3.2 + github.com/google/gopacket v1.1.19 + github.com/google/uuid v1.4.0 + github.com/hashicorp/go-multierror v1.1.1 + github.com/hashicorp/golang-lru v0.5.4 + github.com/ipfs/boxo v0.10.0 + github.com/ipfs/go-cid v0.4.1 + github.com/ipfs/go-datastore v0.6.0 + github.com/ipfs/go-detect-race v0.0.1 + github.com/ipfs/go-log/v2 v2.5.1 + github.com/libp2p/go-libp2p v0.30.0 + github.com/libp2p/go-libp2p-kbucket v0.6.3 + github.com/libp2p/go-libp2p-record v0.2.0 + github.com/libp2p/go-libp2p-routing-helpers v0.7.2 + github.com/libp2p/go-libp2p-testing v0.12.0 + github.com/libp2p/go-libp2p-xor v0.1.0 + github.com/libp2p/go-msgio v0.3.0 + github.com/libp2p/go-netroute v0.2.1 + github.com/multiformats/go-base32 v0.1.0 + github.com/multiformats/go-multiaddr v0.12.4 + github.com/multiformats/go-multibase v0.2.0 + github.com/multiformats/go-multihash v0.2.3 + github.com/multiformats/go-multistream v0.5.0 + github.com/stretchr/testify v1.9.0 + github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 + go.opencensus.io v0.24.0 + go.opentelemetry.io/otel v1.16.0 + go.opentelemetry.io/otel/trace v1.16.0 + go.uber.org/multierr v1.11.0 + go.uber.org/zap v1.27.0 + gonum.org/v1/gonum v0.13.0 +) + +require ( + github.com/Jorropo/jsync v1.0.1 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cloudflare/circl v1.3.9 // indirect + github.com/containerd/cgroups v1.1.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/elastic/gosigar v0.14.2 // indirect + github.com/flynn/noise v1.1.0 // indirect + github.com/francoispqt/gojay v1.2.13 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect + github.com/gorilla/websocket v1.5.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect + github.com/ipfs/go-log v1.0.5 // indirect + github.com/ipld/go-ipld-prime v0.20.0 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/jbenet/goprocess v0.1.4 // indirect + github.com/klauspost/compress v1.17.8 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect + github.com/libp2p/go-nat v0.2.0 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.1 // indirect + github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/miekg/dns v1.1.58 // indirect + github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect + github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multicodec v0.9.0 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/onsi/ginkgo/v2 v2.15.0 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pion/datachannel v1.5.6 // indirect + github.com/pion/dtls/v2 v2.2.11 // indirect + github.com/pion/ice/v2 v2.3.24 // indirect + github.com/pion/interceptor v0.1.29 // indirect + github.com/pion/logging v0.2.2 // indirect + github.com/pion/mdns v0.0.12 // indirect + github.com/pion/randutil v0.1.0 // indirect + github.com/pion/rtcp v1.2.14 // indirect + github.com/pion/rtp v1.8.6 // indirect + github.com/pion/sctp v1.8.16 // indirect + github.com/pion/sdp/v3 v3.0.9 // indirect + github.com/pion/srtp/v2 v2.0.18 // indirect + github.com/pion/stun v0.6.1 // indirect + github.com/pion/transport/v2 v2.2.5 // indirect + github.com/pion/turn/v2 v2.1.6 // indirect + github.com/pion/webrtc/v3 v3.2.40 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polydawn/refmt v0.89.0 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/quic-go/qpack v0.4.0 // indirect + github.com/quic-go/quic-go v0.44.0 // indirect + github.com/quic-go/webtransport-go v0.8.0 // indirect + github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.uber.org/dig v1.17.1 // indirect + go.uber.org/fx v1.21.1 // indirect + go.uber.org/mock v0.4.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + golang.org/x/tools v0.21.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + lukechampine.com/blake3 v1.2.1 // indirect +) diff --git a/go-libp2p-kad-dht/go.sum b/go-libp2p-kad-dht/go.sum new file mode 100644 index 0000000..1a20f93 --- /dev/null +++ b/go-libp2p-kad-dht/go.sum @@ -0,0 +1,820 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.9 h1:QFrlgFYf2Qpi8bSpVPK1HBvWpx16v/1TZivyo7pGuBE= +github.com/cloudflare/circl v1.3.9/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= +github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= +github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= +github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ipfs/boxo v0.10.0 h1:tdDAxq8jrsbRkYoF+5Rcqyeb91hgWe2hp7iLu7ORZLY= +github.com/ipfs/boxo v0.10.0/go.mod h1:Fg+BnfxZ0RPzR0nOodzdIq3A7KgoWAOWsEIImrIQdBM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= +github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= +github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= +github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g= +github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= +github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= +github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= +github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= +github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= +github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= +github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= +github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= +github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= +github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= +github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= +github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= +github.com/libp2p/go-libp2p-routing-helpers v0.7.2 h1:xJMFyhQ3Iuqnk9Q2dYE1eUTzsah7NLw3Qs2zjUV78T0= +github.com/libp2p/go-libp2p-routing-helpers v0.7.2/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= +github.com/libp2p/go-libp2p-xor v0.1.0 h1:hhQwT4uGrBcuAkUGXADuPltalOdpf9aag9kaYNT2tLA= +github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= +github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= +github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= +github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= +github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= +github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= +github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= +github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= +github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pion/datachannel v1.5.6 h1:1IxKJntfSlYkpUj8LlYRSWpYiTTC02nUrOE8T3DqGeg= +github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNIVb/NfGW4= +github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= +github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= +github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI= +github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= +github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= +github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= +github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= +github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk= +github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= +github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= +github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= +github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/rtp v1.8.6 h1:MTmn/b0aWWsAzux2AmP8WGllusBVw4NPYPVFFd7jUPw= +github.com/pion/rtp v1.8.6/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/sctp v1.8.13/go.mod h1:YKSgO/bO/6aOMP9LCie1DuD7m+GamiK2yIiPM6vH+GA= +github.com/pion/sctp v1.8.16 h1:PKrMs+o9EMLRvFfXq59WFsC+V8mN1wnKzqrv+3D/gYY= +github.com/pion/sctp v1.8.16/go.mod h1:P6PbDVA++OJMrVNg2AL3XtYHV4uD6dvfyOovCgMs0PE= +github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= +github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= +github.com/pion/srtp/v2 v2.0.18 h1:vKpAXfawO9RtTRKZJbG4y0v1b11NZxQnxRl85kGuUlo= +github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= +github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= +github.com/pion/transport/v2 v2.2.2/go.mod h1:OJg3ojoBJopjEeECq2yJdXH9YVrUJ1uQ++NjXLOUorc= +github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.5 h1:iyi25i/21gQck4hfRhomF6SktmUQjRsRW4WJdhfc3Kc= +github.com/pion/transport/v2 v2.2.5/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= +github.com/pion/transport/v3 v3.0.2 h1:r+40RJR25S9w3jbA6/5uEPTzcdn7ncyU44RWCbHkLg4= +github.com/pion/transport/v3 v3.0.2/go.mod h1:nIToODoOlb5If2jF9y2Igfx3PFYWfuXi37m0IlWa/D0= +github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= +github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pion/webrtc/v3 v3.2.40 h1:Wtfi6AZMQg+624cvCXUuSmrKWepSB7zfgYDOYqsSOVU= +github.com/pion/webrtc/v3 v3.2.40/go.mod h1:M1RAe3TNTD1tzyvqHrbVODfwdPGSXOUo/OgpoGGJqFY= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= +github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= +github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= +github.com/quic-go/quic-go v0.44.0 h1:So5wOr7jyO4vzL2sd8/pD9Kesciv91zSk8BoFngItQ0= +github.com/quic-go/quic-go v0.44.0/go.mod h1:z4cx/9Ny9UtGITIPzmPTXh1ULfOyWh4qGQlpnPcWmek= +github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= +github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= +github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= +go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0= +go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= +golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= +gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= +gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= +lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/go-libp2p-kad-dht/handlers.go b/go-libp2p-kad-dht/handlers.go new file mode 100644 index 0000000..c2d4967 --- /dev/null +++ b/go-libp2p-kad-dht/handlers.go @@ -0,0 +1,378 @@ +package dht + +import ( + "bytes" + "context" + "errors" + "fmt" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + pstore "github.com/libp2p/go-libp2p/p2p/host/peerstore" + + "github.com/gogo/protobuf/proto" + u "github.com/ipfs/boxo/util" + ds "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p-kad-dht/internal" + pb "github.com/libp2p/go-libp2p-kad-dht/pb" + recpb "github.com/libp2p/go-libp2p-record/pb" + "github.com/multiformats/go-base32" +) + +// dhthandler specifies the signature of functions that handle DHT messages. +type dhtHandler func(context.Context, peer.ID, *pb.Message) (*pb.Message, error) + +func (dht *IpfsDHT) handlerForMsgType(t pb.Message_MessageType) dhtHandler { + switch t { + case pb.Message_FIND_NODE: + return dht.handleFindPeer + case pb.Message_PING: + return dht.handlePing + } + + if dht.enableValues { + switch t { + case pb.Message_GET_VALUE: + return dht.handleGetValue + case pb.Message_PUT_VALUE: + return dht.handlePutValue + } + } + + if dht.enableProviders { + switch t { + case pb.Message_ADD_PROVIDER: + return dht.handleAddProvider + case pb.Message_GET_PROVIDERS: + return dht.handleGetProviders + } + } + + return nil +} + +func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, err error) { + // first, is there even a key? + k := pmes.GetKey() + if len(k) == 0 { + return nil, errors.New("handleGetValue but no key was provided") + } + + // setup response + resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) + + rec, err := dht.checkLocalDatastore(ctx, k) + if err != nil { + return nil, err + } + resp.Record = rec + + // Find closest peer on given cluster to desired key and reply with that info + closer := dht.betterPeersToQuery(pmes, p, dht.bucketSize) + if len(closer) > 0 { + // TODO: pstore.PeerInfos should move to core (=> peerstore.AddrInfos). + closerinfos := pstore.PeerInfos(dht.peerstore, closer) + for _, pi := range closerinfos { + logger.Debugf("handleGetValue returning closer peer: '%s'", pi.ID) + if len(pi.Addrs) < 1 { + logger.Warnw("no addresses on peer being sent", + "local", dht.self, + "to", p, + "sending", pi.ID, + ) + } + } + + resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), closerinfos) + } + + return resp, nil +} + +func (dht *IpfsDHT) checkLocalDatastore(ctx context.Context, k []byte) (*recpb.Record, error) { + logger.Debugf("%s handleGetValue looking into ds", dht.self) + dskey := convertToDsKey(k) + buf, err := dht.datastore.Get(ctx, dskey) + logger.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, buf) + + if err == ds.ErrNotFound { + return nil, nil + } + + // if we got an unexpected error, bail. + if err != nil { + return nil, err + } + + // if we have the value, send it back + logger.Debugf("%s handleGetValue success!", dht.self) + + rec := new(recpb.Record) + err = proto.Unmarshal(buf, rec) + if err != nil { + logger.Debug("failed to unmarshal DHT record from datastore") + return nil, err + } + + var recordIsBad bool + recvtime, err := u.ParseRFC3339(rec.GetTimeReceived()) + if err != nil { + logger.Info("either no receive time set on record, or it was invalid: ", err) + recordIsBad = true + } + + if time.Since(recvtime) > dht.maxRecordAge { + logger.Debug("old record found, tossing.") + recordIsBad = true + } + + // NOTE: We do not verify the record here beyond checking these timestamps. + // we put the burden of checking the records on the requester as checking a record + // may be computationally expensive + + if recordIsBad { + err := dht.datastore.Delete(ctx, dskey) + if err != nil { + logger.Error("Failed to delete bad record from datastore: ", err) + } + + return nil, nil // can treat this as not having the record at all + } + + return rec, nil +} + +// Cleans the record (to avoid storing arbitrary data). +func cleanRecord(rec *recpb.Record) { + rec.TimeReceived = "" +} + +// Store a value in this peer local storage +func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, err error) { + if len(pmes.GetKey()) == 0 { + return nil, errors.New("handleGetValue but no key was provided") + } + + rec := pmes.GetRecord() + if rec == nil { + logger.Debugw("got nil record from", "from", p) + return nil, errors.New("nil record") + } + + if !bytes.Equal(pmes.GetKey(), rec.GetKey()) { + return nil, errors.New("put key doesn't match record key") + } + + cleanRecord(rec) + + // Make sure the record is valid (not expired, valid signature etc) + if err = dht.Validator.Validate(string(rec.GetKey()), rec.GetValue()); err != nil { + logger.Infow("bad dht record in PUT", "from", p, "key", internal.LoggableRecordKeyBytes(rec.GetKey()), "error", err) + return nil, err + } + + dskey := convertToDsKey(rec.GetKey()) + + // fetch the striped lock for this key + var indexForLock byte + if len(rec.GetKey()) == 0 { + indexForLock = 0 + } else { + indexForLock = rec.GetKey()[len(rec.GetKey())-1] + } + lk := &dht.stripedPutLocks[indexForLock] + lk.Lock() + defer lk.Unlock() + + // Make sure the new record is "better" than the record we have locally. + // This prevents a record with for example a lower sequence number from + // overwriting a record with a higher sequence number. + existing, err := dht.getRecordFromDatastore(ctx, dskey) + if err != nil { + return nil, err + } + + if existing != nil { + recs := [][]byte{rec.GetValue(), existing.GetValue()} + i, err := dht.Validator.Select(string(rec.GetKey()), recs) + if err != nil { + logger.Warnw("dht record passed validation but failed select", "from", p, "key", internal.LoggableRecordKeyBytes(rec.GetKey()), "error", err) + return nil, err + } + if i != 0 { + logger.Infow("DHT record in PUT older than existing record (ignoring)", "peer", p, "key", internal.LoggableRecordKeyBytes(rec.GetKey())) + return nil, errors.New("old record") + } + } + + // record the time we receive every record + rec.TimeReceived = u.FormatRFC3339(time.Now()) + + data, err := proto.Marshal(rec) + if err != nil { + return nil, err + } + + err = dht.datastore.Put(ctx, dskey, data) + return pmes, err +} + +// returns nil, nil when either nothing is found or the value found doesn't properly validate. +// returns nil, some_error when there's a *datastore* error (i.e., something goes very wrong) +func (dht *IpfsDHT) getRecordFromDatastore(ctx context.Context, dskey ds.Key) (*recpb.Record, error) { + buf, err := dht.datastore.Get(ctx, dskey) + if err == ds.ErrNotFound { + return nil, nil + } + if err != nil { + logger.Errorw("error retrieving record from datastore", "key", dskey, "error", err) + return nil, err + } + rec := new(recpb.Record) + err = proto.Unmarshal(buf, rec) + if err != nil { + // Bad data in datastore, log it but don't return an error, we'll just overwrite it + logger.Errorw("failed to unmarshal record from datastore", "key", dskey, "error", err) + return nil, nil + } + + err = dht.Validator.Validate(string(rec.GetKey()), rec.GetValue()) + if err != nil { + // Invalid record in datastore, probably expired but don't return an error, + // we'll just overwrite it + logger.Debugw("local record verify failed", "key", rec.GetKey(), "error", err) + return nil, nil + } + + return rec, nil +} + +func (dht *IpfsDHT) handlePing(_ context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { + logger.Debugf("%s Responding to ping from %s!\n", dht.self, p) + return pmes, nil +} + +func (dht *IpfsDHT) handleFindPeer(ctx context.Context, from peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) { + resp := pb.NewMessage(pmes.GetType(), nil, pmes.GetClusterLevel()) + var closest []peer.ID + + if len(pmes.GetKey()) == 0 { + return nil, fmt.Errorf("handleFindPeer with empty key") + } + + // if looking for self... special case where we send it on CloserPeers. + targetPid := peer.ID(pmes.GetKey()) + closest = dht.betterPeersToQuery(pmes, from, dht.bucketSize) + + // Never tell a peer about itself. + if targetPid != from { + // Add the target peer to the set of closest peers if + // not already present in our routing table. + // + // Later, when we lookup known addresses for all peers + // in this set, we'll prune this peer if we don't + // _actually_ know where it is. + found := false + for _, p := range closest { + if targetPid == p { + found = true + break + } + } + if !found { + closest = append(closest, targetPid) + } + } + + if closest == nil { + return resp, nil + } + + // TODO: pstore.PeerInfos should move to core (=> peerstore.AddrInfos). + closestinfos := pstore.PeerInfos(dht.peerstore, closest) + // possibly an over-allocation but this array is temporary anyways. + withAddresses := make([]peer.AddrInfo, 0, len(closestinfos)) + for _, pi := range closestinfos { + if len(pi.Addrs) > 0 { + withAddresses = append(withAddresses, pi) + } + } + + resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses) + return resp, nil +} + +func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) { + key := pmes.GetKey() + if len(key) > 80 { + return nil, fmt.Errorf("handleGetProviders key size too large") + } else if len(key) == 0 { + return nil, fmt.Errorf("handleGetProviders key is empty") + } + + resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) + + // setup providers + providers, err := dht.providerStore.GetProviders(ctx, key) + if err != nil { + return nil, err + } + + filtered := make([]peer.AddrInfo, len(providers)) + for i, provider := range providers { + filtered[i] = peer.AddrInfo{ + ID: provider.ID, + Addrs: dht.filterAddrs(provider.Addrs), + } + } + + resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), filtered) + + // Also send closer peers. + closer := dht.betterPeersToQuery(pmes, p, dht.bucketSize) + if closer != nil { + // TODO: pstore.PeerInfos should move to core (=> peerstore.AddrInfos). + infos := pstore.PeerInfos(dht.peerstore, closer) + resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos) + } + + return resp, nil +} + +func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) { + key := pmes.GetKey() + if len(key) > 80 { + return nil, fmt.Errorf("handleAddProvider key size too large") + } else if len(key) == 0 { + return nil, fmt.Errorf("handleAddProvider key is empty") + } + + logger.Debugw("adding provider", "from", p, "key", internal.LoggableProviderRecordBytes(key)) + + // add provider should use the address given in the message + pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers()) + for _, pi := range pinfos { + if pi.ID != p { + // we should ignore this provider record! not from originator. + // (we should sign them and check signature later...) + logger.Debugw("received provider from wrong peer", "from", p, "peer", pi.ID) + continue + } + + if len(pi.Addrs) < 1 { + logger.Debugw("no valid addresses for provider", "from", p) + continue + } + + // We run the addrs filter after checking for the length, + // this allows transient nodes with varying /p2p-circuit addresses to still have their anouncement go through. + addrs := dht.filterAddrs(pi.Addrs) + dht.providerStore.AddProvider(ctx, key, peer.AddrInfo{ID: pi.ID, Addrs: addrs}) + } + + return nil, nil +} + +func convertToDsKey(s []byte) ds.Key { + return ds.NewKey(base32.RawStdEncoding.EncodeToString(s)) +} diff --git a/go-libp2p-kad-dht/handlers_test.go b/go-libp2p-kad-dht/handlers_test.go new file mode 100644 index 0000000..35959df --- /dev/null +++ b/go-libp2p-kad-dht/handlers_test.go @@ -0,0 +1,141 @@ +package dht + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "testing" + "time" + + proto "github.com/gogo/protobuf/proto" + "github.com/libp2p/go-libp2p" + pb "github.com/libp2p/go-libp2p-kad-dht/pb" + recpb "github.com/libp2p/go-libp2p-record/pb" + crypto "github.com/libp2p/go-libp2p/core/crypto" + peer "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" +) + +func TestCleanRecordSigned(t *testing.T) { + actual := new(recpb.Record) + actual.TimeReceived = "time" + actual.Value = []byte("value") + actual.Key = []byte("key") + + cleanRecord(actual) + actualBytes, err := proto.Marshal(actual) + if err != nil { + t.Fatal(err) + } + + expected := new(recpb.Record) + expected.Value = []byte("value") + expected.Key = []byte("key") + expectedBytes, err := proto.Marshal(expected) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(actualBytes, expectedBytes) { + t.Error("failed to clean record") + } +} + +func TestCleanRecord(t *testing.T) { + actual := new(recpb.Record) + actual.TimeReceived = "time" + actual.Key = []byte("key") + actual.Value = []byte("value") + + cleanRecord(actual) + actualBytes, err := proto.Marshal(actual) + if err != nil { + t.Fatal(err) + } + + expected := new(recpb.Record) + expected.Key = []byte("key") + expected.Value = []byte("value") + expectedBytes, err := proto.Marshal(expected) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(actualBytes, expectedBytes) { + t.Error("failed to clean record") + } +} + +func TestBadMessage(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dht := setupDHT(ctx, t, false) + + for _, typ := range []pb.Message_MessageType{ + pb.Message_PUT_VALUE, pb.Message_GET_VALUE, pb.Message_ADD_PROVIDER, + pb.Message_GET_PROVIDERS, pb.Message_FIND_NODE, + } { + msg := &pb.Message{ + Type: typ, + // explicitly avoid the key. + } + _, err := dht.handlerForMsgType(typ)(ctx, dht.Host().ID(), msg) + if err == nil { + t.Fatalf("expected processing message to fail for type %s", pb.Message_FIND_NODE) + } + } +} + +func BenchmarkHandleFindPeer(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + h, err := libp2p.New() + if err != nil { + b.Fatal(err) + } + defer h.Close() + + d, err := New(ctx, h) + if err != nil { + b.Fatal(err) + } + + rng := rand.New(rand.NewSource(150)) + var peers []peer.ID + for i := 0; i < 1000; i++ { + _, pubk, _ := crypto.GenerateEd25519Key(rng) + id, err := peer.IDFromPublicKey(pubk) + if err != nil { + panic(err) + } + + d.peerFound(id) + + peers = append(peers, id) + a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) + if err != nil { + panic(err) + } + + d.host.Peerstore().AddAddr(id, a, time.Minute*50) + } + + var reqs []*pb.Message + for i := 0; i < b.N; i++ { + reqs = append(reqs, &pb.Message{ + Key: []byte("asdasdasd"), + }) + } + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err = d.handleFindPeer(ctx, peers[0], reqs[i]) + if err != nil { + b.Error(err) + } + } + +} diff --git a/go-libp2p-kad-dht/internal/config/config.go b/go-libp2p-kad-dht/internal/config/config.go new file mode 100644 index 0000000..bacd2e4 --- /dev/null +++ b/go-libp2p-kad-dht/internal/config/config.go @@ -0,0 +1,172 @@ +package config + +import ( + "fmt" + "time" + + "github.com/ipfs/boxo/ipns" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + "github.com/libp2p/go-libp2p-kad-dht/providers" + "github.com/libp2p/go-libp2p-kbucket/peerdiversity" + record "github.com/libp2p/go-libp2p-record" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + ma "github.com/multiformats/go-multiaddr" +) + +// DefaultPrefix is the application specific prefix attached to all DHT protocols by default. +const DefaultPrefix protocol.ID = "/ipfs" + +const defaultBucketSize = 20 + +// ModeOpt describes what mode the dht should operate in +type ModeOpt int + +// QueryFilterFunc is a filter applied when considering peers to dial when querying +type QueryFilterFunc func(dht interface{}, ai peer.AddrInfo) bool + +// RouteTableFilterFunc is a filter applied when considering connections to keep in +// the local route table. +type RouteTableFilterFunc func(dht interface{}, p peer.ID) bool + +// Config is a structure containing all the options that can be used when constructing a DHT. +type Config struct { + Datastore ds.Batching + Validator record.Validator + ValidatorChanged bool // if true implies that the validator has been changed and that Defaults should not be used + Mode ModeOpt + ProtocolPrefix protocol.ID + V1ProtocolOverride protocol.ID + BucketSize int + Concurrency int + Resiliency int + MaxRecordAge time.Duration + EnableProviders bool + EnableValues bool + ProviderStore providers.ProviderStore + QueryPeerFilter QueryFilterFunc + LookupCheckConcurrency int + + RoutingTable struct { + RefreshQueryTimeout time.Duration + RefreshInterval time.Duration + AutoRefresh bool + LatencyTolerance time.Duration + CheckInterval time.Duration + PeerFilter RouteTableFilterFunc + DiversityFilter peerdiversity.PeerIPGroupFilter + } + + BootstrapPeers func() []peer.AddrInfo + AddressFilter func([]ma.Multiaddr) []ma.Multiaddr + + // test specific Config options + DisableFixLowPeers bool + TestAddressUpdateProcessing bool + + EnableOptimisticProvide bool + OptimisticProvideJobsPoolSize int +} + +func EmptyQueryFilter(_ interface{}, ai peer.AddrInfo) bool { return true } +func EmptyRTFilter(_ interface{}, p peer.ID) bool { return true } + +// Apply applies the given options to this Option +func (c *Config) Apply(opts ...Option) error { + for i, opt := range opts { + if err := opt(c); err != nil { + return fmt.Errorf("dht option %d failed: %s", i, err) + } + } + return nil +} + +// ApplyFallbacks sets default values that could not be applied during config creation since they are dependent +// on other configuration parameters (e.g. optA is by default 2x optB) and/or on the Host +func (c *Config) ApplyFallbacks(h host.Host) error { + if !c.ValidatorChanged { + nsval, ok := c.Validator.(record.NamespacedValidator) + if ok { + if _, pkFound := nsval["pk"]; !pkFound { + nsval["pk"] = record.PublicKeyValidator{} + } + if _, ipnsFound := nsval["ipns"]; !ipnsFound { + nsval["ipns"] = ipns.Validator{KeyBook: h.Peerstore()} + } + } else { + return fmt.Errorf("the default Validator was changed without being marked as changed") + } + } + return nil +} + +// Option DHT option type. +type Option func(*Config) error + +// Defaults are the default DHT options. This option will be automatically +// prepended to any options you pass to the DHT constructor. +var Defaults = func(o *Config) error { + o.Validator = record.NamespacedValidator{} + o.Datastore = dssync.MutexWrap(ds.NewMapDatastore()) + o.ProtocolPrefix = DefaultPrefix + o.EnableProviders = true + o.EnableValues = true + o.QueryPeerFilter = EmptyQueryFilter + + o.RoutingTable.LatencyTolerance = 10 * time.Second + o.RoutingTable.RefreshQueryTimeout = 10 * time.Second + o.RoutingTable.RefreshInterval = 10 * time.Minute + o.RoutingTable.AutoRefresh = true + o.RoutingTable.PeerFilter = EmptyRTFilter + + o.MaxRecordAge = providers.ProvideValidity + + o.BucketSize = defaultBucketSize + o.Concurrency = 10 + o.Resiliency = 3 + o.LookupCheckConcurrency = 256 + + // MAGIC: It makes sense to set it to a multiple of OptProvReturnRatio * BucketSize. We chose a multiple of 4. + o.OptimisticProvideJobsPoolSize = 60 + + return nil +} + +func (c *Config) Validate() error { + if c.ProtocolPrefix != DefaultPrefix { + return nil + } + if c.BucketSize != defaultBucketSize { + return fmt.Errorf("protocol prefix %s must use bucket size %d", DefaultPrefix, defaultBucketSize) + } + if !c.EnableProviders { + return fmt.Errorf("protocol prefix %s must have providers enabled", DefaultPrefix) + } + if !c.EnableValues { + return fmt.Errorf("protocol prefix %s must have values enabled", DefaultPrefix) + } + + nsval, isNSVal := c.Validator.(record.NamespacedValidator) + if !isNSVal { + return fmt.Errorf("protocol prefix %s must use a namespaced Validator", DefaultPrefix) + } + + if len(nsval) != 2 { + return fmt.Errorf("protocol prefix %s must have exactly two namespaced validators - /pk and /ipns", DefaultPrefix) + } + + if pkVal, pkValFound := nsval["pk"]; !pkValFound { + return fmt.Errorf("protocol prefix %s must support the /pk namespaced Validator", DefaultPrefix) + } else if _, ok := pkVal.(record.PublicKeyValidator); !ok { + return fmt.Errorf("protocol prefix %s must use the record.PublicKeyValidator for the /pk namespace", DefaultPrefix) + } + + if ipnsVal, ipnsValFound := nsval["ipns"]; !ipnsValFound { + return fmt.Errorf("protocol prefix %s must support the /ipns namespaced Validator", DefaultPrefix) + } else if _, ok := ipnsVal.(ipns.Validator); !ok { + return fmt.Errorf("protocol prefix %s must use ipns.Validator for the /ipns namespace", DefaultPrefix) + } + return nil +} diff --git a/go-libp2p-kad-dht/internal/config/quorum.go b/go-libp2p-kad-dht/internal/config/quorum.go new file mode 100644 index 0000000..63e3992 --- /dev/null +++ b/go-libp2p-kad-dht/internal/config/quorum.go @@ -0,0 +1,16 @@ +package config + +import "github.com/libp2p/go-libp2p/core/routing" + +type QuorumOptionKey struct{} + +const defaultQuorum = 0 + +// GetQuorum defaults to 0 if no option is found +func GetQuorum(opts *routing.Options) int { + responsesNeeded, ok := opts.Other[QuorumOptionKey{}].(int) + if !ok { + responsesNeeded = defaultQuorum + } + return responsesNeeded +} diff --git a/go-libp2p-kad-dht/internal/ctx_mutex.go b/go-libp2p-kad-dht/internal/ctx_mutex.go new file mode 100644 index 0000000..4e923f6 --- /dev/null +++ b/go-libp2p-kad-dht/internal/ctx_mutex.go @@ -0,0 +1,28 @@ +package internal + +import ( + "context" +) + +type CtxMutex chan struct{} + +func NewCtxMutex() CtxMutex { + return make(CtxMutex, 1) +} + +func (m CtxMutex) Lock(ctx context.Context) error { + select { + case m <- struct{}{}: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (m CtxMutex) Unlock() { + select { + case <-m: + default: + panic("not locked") + } +} diff --git a/go-libp2p-kad-dht/internal/errors.go b/go-libp2p-kad-dht/internal/errors.go new file mode 100644 index 0000000..4f8453c --- /dev/null +++ b/go-libp2p-kad-dht/internal/errors.go @@ -0,0 +1,5 @@ +package internal + +import "errors" + +var ErrIncorrectRecord = errors.New("received incorrect record") diff --git a/go-libp2p-kad-dht/internal/logging.go b/go-libp2p-kad-dht/internal/logging.go new file mode 100644 index 0000000..981f728 --- /dev/null +++ b/go-libp2p-kad-dht/internal/logging.go @@ -0,0 +1,92 @@ +package internal + +import ( + "fmt" + "strings" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multibase" + "github.com/multiformats/go-multihash" +) + +func multibaseB32Encode(k []byte) string { + res, err := multibase.Encode(multibase.Base32, k) + if err != nil { + // Should be unreachable + panic(err) + } + return res +} + +func tryFormatLoggableRecordKey(k string) (string, error) { + if len(k) == 0 { + return "", fmt.Errorf("LoggableRecordKey is empty") + } + var proto, cstr string + if k[0] == '/' { + // it's a path (probably) + protoEnd := strings.IndexByte(k[1:], '/') + if protoEnd < 0 { + return "", fmt.Errorf("LoggableRecordKey starts with '/' but is not a path: %s", multibaseB32Encode([]byte(k))) + } + proto = k[1 : protoEnd+1] + cstr = k[protoEnd+2:] + + encStr := multibaseB32Encode([]byte(cstr)) + return fmt.Sprintf("/%s/%s", proto, encStr), nil + } + + return "", fmt.Errorf("LoggableRecordKey is not a path: %s", multibaseB32Encode([]byte(cstr))) +} + +type LoggableRecordKeyString string + +func (lk LoggableRecordKeyString) String() string { + k := string(lk) + newKey, err := tryFormatLoggableRecordKey(k) + if err == nil { + return newKey + } + return err.Error() +} + +type LoggableRecordKeyBytes []byte + +func (lk LoggableRecordKeyBytes) String() string { + k := string(lk) + newKey, err := tryFormatLoggableRecordKey(k) + if err == nil { + return newKey + } + return err.Error() +} + +type LoggableProviderRecordBytes []byte + +func (lk LoggableProviderRecordBytes) String() string { + newKey, err := tryFormatLoggableProviderKey(lk) + if err == nil { + return newKey + } + return err.Error() +} + +func tryFormatLoggableProviderKey(k []byte) (string, error) { + if len(k) == 0 { + return "", fmt.Errorf("LoggableProviderKey is empty") + } + + encodedKey := multibaseB32Encode(k) + + // The DHT used to provide CIDs, but now provides multihashes + // TODO: Drop this when enough of the network has upgraded + if _, err := cid.Cast(k); err == nil { + return encodedKey, nil + } + + if _, err := multihash.Cast(k); err == nil { + return encodedKey, nil + } + + return "", fmt.Errorf("LoggableProviderKey is not a Multihash or CID: %s", encodedKey) +} diff --git a/go-libp2p-kad-dht/internal/logging_test.go b/go-libp2p-kad-dht/internal/logging_test.go new file mode 100644 index 0000000..25bd033 --- /dev/null +++ b/go-libp2p-kad-dht/internal/logging_test.go @@ -0,0 +1,76 @@ +package internal + +import ( + "testing" + + cid "github.com/ipfs/go-cid" +) + +func TestLoggableRecordKey(t *testing.T) { + c, err := cid.Decode("QmfUvYQhL2GinafMbPDYz7VFoZv4iiuLuR33aRsPurXGag") + if err != nil { + t.Fatal(err) + } + + k, err := tryFormatLoggableRecordKey("/proto/" + string(c.Bytes())) + if err != nil { + t.Errorf("failed to format key: %s", err) + } + if k != "/proto/"+multibaseB32Encode(c.Bytes()) { + t.Error("expected path to be preserved as a loggable key") + } + + for _, s := range []string{"/bla", "", "bla bla"} { + if _, err := tryFormatLoggableRecordKey(s); err == nil { + t.Errorf("expected to fail formatting: %s", s) + } + } + + for _, s := range []string{"/bla/asdf", "/a/b/c"} { + if _, err := tryFormatLoggableRecordKey(s); err != nil { + t.Errorf("expected to be formatable: %s", s) + } + } +} + +func TestLoggableProviderKey(t *testing.T) { + c0, err := cid.Decode("QmfUvYQhL2GinafMbPDYz7VFoZv4iiuLuR33aRsPurXGag") + if err != nil { + t.Fatal(err) + } + + // Test logging CIDv0 provider + b32MH := multibaseB32Encode(c0.Hash()) + k, err := tryFormatLoggableProviderKey(c0.Bytes()) + if err != nil { + t.Errorf("failed to format key: %s", err) + } + if k != b32MH { + t.Error("expected cidv0 to be converted into base32 multihash") + } + + // Test logging CIDv1 provider (from older DHT implementations) + c1 := cid.NewCidV1(cid.DagProtobuf, c0.Hash()) + k, err = tryFormatLoggableProviderKey(c1.Hash()) + if err != nil { + t.Errorf("failed to format key: %s", err) + } + if k != b32MH { + t.Error("expected cidv1 to be converted into base32 multihash") + } + + // Test logging multihash provider + k, err = tryFormatLoggableProviderKey(c1.Hash()) + if err != nil { + t.Errorf("failed to format key: %s", err) + } + if k != b32MH { + t.Error("expected multihash to be displayed in base32") + } + + for _, s := range []string{"/bla", "", "bla bla", "/bla/asdf", "/a/b/c"} { + if _, err := tryFormatLoggableProviderKey([]byte(s)); err == nil { + t.Errorf("expected to fail formatting: %s", s) + } + } +} diff --git a/go-libp2p-kad-dht/internal/net/message_manager.go b/go-libp2p-kad-dht/internal/net/message_manager.go new file mode 100644 index 0000000..294425f --- /dev/null +++ b/go-libp2p-kad-dht/internal/net/message_manager.go @@ -0,0 +1,387 @@ +package net + +import ( + "bufio" + "context" + "fmt" + "io" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-msgio" + + //lint:ignore SA1019 TODO migrate away from gogo pb + "github.com/libp2p/go-msgio/protoio" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + + "github.com/libp2p/go-libp2p-kad-dht/internal" + "github.com/libp2p/go-libp2p-kad-dht/metrics" + pb "github.com/libp2p/go-libp2p-kad-dht/pb" +) + +var dhtReadMessageTimeout = 10 * time.Second + +// ErrReadTimeout is an error that occurs when no message is read within the timeout period. +var ErrReadTimeout = fmt.Errorf("timed out reading response") + +var logger = logging.Logger("dht") + +// messageSenderImpl is responsible for sending requests and messages to peers efficiently, including reuse of streams. +// It also tracks metrics for sent requests and messages. +type messageSenderImpl struct { + host host.Host // the network services we need + smlk sync.Mutex + strmap map[peer.ID]*peerMessageSender + protocols []protocol.ID +} + +func NewMessageSenderImpl(h host.Host, protos []protocol.ID) pb.MessageSenderWithDisconnect { + return &messageSenderImpl{ + host: h, + strmap: make(map[peer.ID]*peerMessageSender), + protocols: protos, + } +} + +func (m *messageSenderImpl) OnDisconnect(ctx context.Context, p peer.ID) { + m.smlk.Lock() + defer m.smlk.Unlock() + ms, ok := m.strmap[p] + if !ok { + return + } + delete(m.strmap, p) + + // Do this asynchronously as ms.lk can block for a while. + go func() { + if err := ms.lk.Lock(ctx); err != nil { + return + } + defer ms.lk.Unlock() + ms.invalidate() + }() +} + +// SendRequest sends out a request, but also makes sure to +// measure the RTT for latency measurements. +func (m *messageSenderImpl) SendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { + ctx, _ = tag.New(ctx, metrics.UpsertMessageType(pmes)) + + ms, err := m.messageSenderForPeer(ctx, p) + if err != nil { + stats.Record(ctx, + metrics.SentRequests.M(1), + metrics.SentRequestErrors.M(1), + ) + logger.Debugw("request failed to open message sender", "error", err, "to", p) + return nil, err + } + + start := time.Now() + + rpmes, err := ms.SendRequest(ctx, pmes) + if err != nil { + stats.Record(ctx, + metrics.SentRequests.M(1), + metrics.SentRequestErrors.M(1), + ) + logger.Debugw("request failed", "error", err, "to", p) + return nil, err + } + + stats.Record(ctx, + metrics.SentRequests.M(1), + metrics.SentBytes.M(int64(pmes.Size())), + metrics.OutboundRequestLatency.M(float64(time.Since(start))/float64(time.Millisecond)), + ) + m.host.Peerstore().RecordLatency(p, time.Since(start)) + return rpmes, nil +} + +// SendMessage sends out a message +func (m *messageSenderImpl) SendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error { + ctx, _ = tag.New(ctx, metrics.UpsertMessageType(pmes)) + + ms, err := m.messageSenderForPeer(ctx, p) + if err != nil { + stats.Record(ctx, + metrics.SentMessages.M(1), + metrics.SentMessageErrors.M(1), + ) + logger.Debugw("message failed to open message sender", "error", err, "to", p) + return err + } + + if err := ms.SendMessage(ctx, pmes); err != nil { + stats.Record(ctx, + metrics.SentMessages.M(1), + metrics.SentMessageErrors.M(1), + ) + logger.Debugw("message failed", "error", err, "to", p) + return err + } + + stats.Record(ctx, + metrics.SentMessages.M(1), + metrics.SentBytes.M(int64(pmes.Size())), + ) + return nil +} + +func (m *messageSenderImpl) messageSenderForPeer(ctx context.Context, p peer.ID) (*peerMessageSender, error) { + m.smlk.Lock() + ms, ok := m.strmap[p] + if ok { + m.smlk.Unlock() + return ms, nil + } + ms = &peerMessageSender{p: p, m: m, lk: internal.NewCtxMutex()} + m.strmap[p] = ms + m.smlk.Unlock() + + if err := ms.prepOrInvalidate(ctx); err != nil { + m.smlk.Lock() + defer m.smlk.Unlock() + + if msCur, ok := m.strmap[p]; ok { + // Changed. Use the new one, old one is invalid and + // not in the map so we can just throw it away. + if ms != msCur { + return msCur, nil + } + // Not changed, remove the now invalid stream from the + // map. + delete(m.strmap, p) + } + // Invalid but not in map. Must have been removed by a disconnect. + return nil, err + } + // All ready to go. + return ms, nil +} + +// peerMessageSender is responsible for sending requests and messages to a particular peer +type peerMessageSender struct { + s network.Stream + r msgio.ReadCloser + lk internal.CtxMutex + p peer.ID + m *messageSenderImpl + + invalid bool + singleMes int +} + +// invalidate is called before this peerMessageSender is removed from the strmap. +// It prevents the peerMessageSender from being reused/reinitialized and then +// forgotten (leaving the stream open). +func (ms *peerMessageSender) invalidate() { + ms.invalid = true + if ms.s != nil { + _ = ms.s.Reset() + ms.s = nil + } +} + +func (ms *peerMessageSender) prepOrInvalidate(ctx context.Context) error { + if err := ms.lk.Lock(ctx); err != nil { + return err + } + defer ms.lk.Unlock() + + if err := ms.prep(ctx); err != nil { + ms.invalidate() + return err + } + return nil +} + +func (ms *peerMessageSender) prep(ctx context.Context) error { + if ms.invalid { + return fmt.Errorf("message sender has been invalidated") + } + if ms.s != nil { + return nil + } + + // We only want to speak to peers using our primary protocols. We do not want to query any peer that only speaks + // one of the secondary "server" protocols that we happen to support (e.g. older nodes that we can respond to for + // backwards compatibility reasons). + nstr, err := ms.m.host.NewStream(ctx, ms.p, ms.m.protocols...) + if err != nil { + return err + } + + ms.r = msgio.NewVarintReaderSize(nstr, network.MessageSizeMax) + ms.s = nstr + + return nil +} + +// streamReuseTries is the number of times we will try to reuse a stream to a +// given peer before giving up and reverting to the old one-message-per-stream +// behaviour. +const streamReuseTries = 3 + +func (ms *peerMessageSender) SendMessage(ctx context.Context, pmes *pb.Message) error { + if err := ms.lk.Lock(ctx); err != nil { + return err + } + defer ms.lk.Unlock() + + retry := false + for { + if err := ms.prep(ctx); err != nil { + return err + } + + if err := ms.writeMsg(pmes); err != nil { + _ = ms.s.Reset() + ms.s = nil + + if retry { + logger.Debugw("error writing message", "error", err) + return err + } + logger.Debugw("error writing message", "error", err, "retrying", true) + retry = true + continue + } + + var err error + if ms.singleMes > streamReuseTries { + err = ms.s.Close() + ms.s = nil + } else if retry { + ms.singleMes++ + } + + return err + } +} + +func (ms *peerMessageSender) SendRequest(ctx context.Context, pmes *pb.Message) (*pb.Message, error) { + if err := ms.lk.Lock(ctx); err != nil { + return nil, err + } + defer ms.lk.Unlock() + + retry := false + for { + if err := ms.prep(ctx); err != nil { + return nil, err + } + + if err := ms.writeMsg(pmes); err != nil { + _ = ms.s.Reset() + ms.s = nil + + if retry { + logger.Debugw("error writing message", "error", err) + return nil, err + } + logger.Debugw("error writing message", "error", err, "retrying", true) + retry = true + continue + } + + mes := new(pb.Message) + if err := ms.ctxReadMsg(ctx, mes); err != nil { + _ = ms.s.Reset() + ms.s = nil + if err == context.Canceled { + // retry would be same error + return nil, err + } + if retry { + logger.Debugw("error reading message", "error", err) + return nil, err + } + logger.Debugw("error reading message", "error", err, "retrying", true) + retry = true + continue + } + + var err error + if ms.singleMes > streamReuseTries { + err = ms.s.Close() + ms.s = nil + } else if retry { + ms.singleMes++ + } + + return mes, err + } +} + +func (ms *peerMessageSender) writeMsg(pmes *pb.Message) error { + return WriteMsg(ms.s, pmes) +} + +func (ms *peerMessageSender) ctxReadMsg(ctx context.Context, mes *pb.Message) error { + errc := make(chan error, 1) + go func(r msgio.ReadCloser) { + defer close(errc) + bytes, err := r.ReadMsg() + defer r.ReleaseMsg(bytes) + if err != nil { + errc <- err + return + } + errc <- mes.Unmarshal(bytes) + }(ms.r) + + t := time.NewTimer(dhtReadMessageTimeout) + defer t.Stop() + + select { + case err := <-errc: + return err + case <-ctx.Done(): + return ctx.Err() + case <-t.C: + return ErrReadTimeout + } +} + +// The Protobuf writer performs multiple small writes when writing a message. +// We need to buffer those writes, to make sure that we're not sending a new +// packet for every single write. +type bufferedDelimitedWriter struct { + *bufio.Writer + protoio.WriteCloser +} + +var writerPool = sync.Pool{ + New: func() interface{} { + w := bufio.NewWriter(nil) + return &bufferedDelimitedWriter{ + Writer: w, + WriteCloser: protoio.NewDelimitedWriter(w), + } + }, +} + +func WriteMsg(w io.Writer, mes *pb.Message) error { + bw := writerPool.Get().(*bufferedDelimitedWriter) + bw.Reset(w) + err := bw.WriteMsg(mes) + if err == nil { + err = bw.Flush() + } + bw.Reset(nil) + writerPool.Put(bw) + return err +} + +func (w *bufferedDelimitedWriter) Flush() error { + return w.Writer.Flush() +} diff --git a/go-libp2p-kad-dht/internal/net/message_manager_test.go b/go-libp2p-kad-dht/internal/net/message_manager_test.go new file mode 100644 index 0000000..5c61ec2 --- /dev/null +++ b/go-libp2p-kad-dht/internal/net/message_manager_test.go @@ -0,0 +1,39 @@ +package net + +import ( + "context" + "testing" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + bhost "github.com/libp2p/go-libp2p/p2p/host/basic" + swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" + + "github.com/stretchr/testify/require" +) + +func TestInvalidMessageSenderTracking(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + foo := peer.ID("asdasd") + + h, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + h.Start() + defer h.Close() + + msgSender := NewMessageSenderImpl(h, []protocol.ID{"/test/kad/1.0.0"}).(*messageSenderImpl) + + _, err = msgSender.messageSenderForPeer(ctx, foo) + require.Error(t, err, "should have failed to find message sender") + + msgSender.smlk.Lock() + mscnt := len(msgSender.strmap) + msgSender.smlk.Unlock() + + if mscnt > 0 { + t.Fatal("should have no message senders in map") + } +} diff --git a/go-libp2p-kad-dht/internal/testing/helper.go b/go-libp2p-kad-dht/internal/testing/helper.go new file mode 100644 index 0000000..52961f3 --- /dev/null +++ b/go-libp2p-kad-dht/internal/testing/helper.go @@ -0,0 +1,31 @@ +package testing + +import ( + "bytes" + "errors" +) + +type TestValidator struct{} + +func (TestValidator) Select(_ string, bs [][]byte) (int, error) { + index := -1 + for i, b := range bs { + if bytes.Equal(b, []byte("newer")) { + index = i + } else if bytes.Equal(b, []byte("valid")) { + if index == -1 { + index = i + } + } + } + if index == -1 { + return -1, errors.New("no rec found") + } + return index, nil +} +func (TestValidator) Validate(_ string, b []byte) error { + if bytes.Equal(b, []byte("expired")) { + return errors.New("expired") + } + return nil +} diff --git a/go-libp2p-kad-dht/internal/tracing.go b/go-libp2p-kad-dht/internal/tracing.go new file mode 100644 index 0000000..6b707f9 --- /dev/null +++ b/go-libp2p-kad-dht/internal/tracing.go @@ -0,0 +1,32 @@ +package internal + +import ( + "context" + "fmt" + "unicode/utf8" + + "github.com/multiformats/go-multibase" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return otel.Tracer("go-libp2p-kad-dht").Start(ctx, fmt.Sprintf("KademliaDHT.%s", name), opts...) +} + +// KeyAsAttribute format a DHT key into a suitable tracing attribute. +// DHT keys can be either valid utf-8 or binary, when they are derived from, for example, a multihash. +// Tracing (and notably OpenTelemetry+grpc exporter) requires valid utf-8 for string attributes. +func KeyAsAttribute(name string, key string) attribute.KeyValue { + b := []byte(key) + if utf8.Valid(b) { + return attribute.String(name, key) + } + encoded, err := multibase.Encode(multibase.Base58BTC, b) + if err != nil { + // should be unreachable + panic(err) + } + return attribute.String(name, encoded) +} diff --git a/go-libp2p-kad-dht/log_test.go b/go-libp2p-kad-dht/log_test.go new file mode 100644 index 0000000..511951d --- /dev/null +++ b/go-libp2p-kad-dht/log_test.go @@ -0,0 +1,7 @@ +package dht + +import "log" + +func init() { + log.SetFlags(log.Flags() | log.Llongfile) +} diff --git a/go-libp2p-kad-dht/lookup.go b/go-libp2p-kad-dht/lookup.go new file mode 100644 index 0000000..0380132 --- /dev/null +++ b/go-libp2p-kad-dht/lookup.go @@ -0,0 +1,85 @@ +package dht + +import ( + "context" + "fmt" + "time" + + "github.com/libp2p/go-libp2p-kad-dht/internal" + "github.com/libp2p/go-libp2p-kad-dht/metrics" + "github.com/libp2p/go-libp2p-kad-dht/qpeerset" + kb "github.com/libp2p/go-libp2p-kbucket" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + "go.opentelemetry.io/otel/trace" +) + +// GetClosestPeers is a Kademlia 'node lookup' operation. Returns a channel of +// the K closest peers to the given key. +// +// If the context is canceled, this function will return the context error along +// with the closest K peers it has found so far. +func (dht *IpfsDHT) GetClosestPeers(ctx context.Context, key string) ([]peer.ID, error) { + ctx, span := internal.StartSpan(ctx, "IpfsDHT.GetClosestPeers", trace.WithAttributes(internal.KeyAsAttribute("Key", key))) + defer span.End() + + if key == "" { + return nil, fmt.Errorf("can't lookup empty key") + } + + //TODO: I can break the interface! return []peer.ID + lookupRes, err := dht.runLookupWithFollowup(ctx, key, dht.pmGetClosestPeers(key), func(*qpeerset.QueryPeerset) bool { return false }) + + if err != nil { + return nil, err + } + + if err := ctx.Err(); err != nil || !lookupRes.completed { + return lookupRes.peers, err + } + + // tracking lookup results for network size estimator + if err = dht.nsEstimator.Track(key, lookupRes.closest); err != nil { + logger.Warnf("network size estimator track peers: %s", err) + } + + if ns, err := dht.nsEstimator.NetworkSize(); err == nil { + metrics.NetworkSize.M(int64(ns)) + } + + // refresh the cpl for this key as the query was successful + dht.routingTable.ResetCplRefreshedAtForID(kb.ConvertKey(key), time.Now()) + + return lookupRes.peers, nil +} + +// pmGetClosestPeers is the protocol messenger version of the GetClosestPeer queryFn. +func (dht *IpfsDHT) pmGetClosestPeers(key string) queryFn { + return func(ctx context.Context, p peer.ID) ([]*peer.AddrInfo, error) { + // For DHT query command + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.SendingQuery, + ID: p, + }) + + peers, err := dht.protoMessenger.GetClosestPeers(ctx, p, peer.ID(key)) + if err != nil { + logger.Debugf("error getting closer peers: %s", err) + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.QueryError, + ID: p, + Extra: err.Error(), + }) + return nil, err + } + + // For DHT query command + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.PeerResponse, + ID: p, + Responses: peers, + }) + + return peers, err + } +} diff --git a/go-libp2p-kad-dht/lookup_optim.go b/go-libp2p-kad-dht/lookup_optim.go new file mode 100644 index 0000000..428e86f --- /dev/null +++ b/go-libp2p-kad-dht/lookup_optim.go @@ -0,0 +1,313 @@ +package dht + +import ( + "context" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/libp2p/go-libp2p-kad-dht/metrics" + "github.com/libp2p/go-libp2p-kad-dht/netsize" + "github.com/libp2p/go-libp2p-kad-dht/qpeerset" + kb "github.com/libp2p/go-libp2p-kbucket" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multihash" + ks "github.com/whyrusleeping/go-keyspace" + "gonum.org/v1/gonum/mathext" +) + +const ( + // optProvIndividualThresholdCertainty describes how sure we want to be that an individual peer that + // we find during walking the DHT actually belongs to the k-closest peers based on the current network size + // estimation. + optProvIndividualThresholdCertainty = 0.9 + + // optProvSetThresholdStrictness describes the probability that the set of closest peers is actually further + // away then the calculated set threshold. Put differently, what is the probability that we are too strict and + // don't terminate the process early because we can't find any closer peers. + optProvSetThresholdStrictness = 0.1 + + // optProvReturnRatio corresponds to how many ADD_PROVIDER RPCs must have completed (regardless of success) + // before we return to the user. The ratio of 0.75 equals 15 RPC as it is based on the Kademlia bucket size. + optProvReturnRatio = 0.75 +) + +type addProviderRPCState int + +const ( + scheduled addProviderRPCState = iota + 1 + success + failure +) + +type optimisticState struct { + // context for all ADD_PROVIDER RPCs + putCtx context.Context + + // reference to the DHT + dht *IpfsDHT + + // the most recent network size estimation + networkSize int32 + + // a channel indicating when an ADD_PROVIDER RPC completed (successful or not) + doneChan chan struct{} + + // tracks which peers we have stored the provider records with + peerStatesLk sync.RWMutex + peerStates map[peer.ID]addProviderRPCState + + // the key to provide + key string + + // the key to provide transformed into the Kademlia key space + ksKey ks.Key + + // distance threshold for individual peers. If peers are closer than this number we store + // the provider records right away. + individualThreshold float64 + + // distance threshold for the set of bucketSize closest peers. If the average distance of the bucketSize + // closest peers is below this number we stop the DHT walk and store the remaining provider records. + // "remaining" because we have likely already stored some on peers that were below the individualThreshold. + setThreshold float64 + + // number of completed (regardless of success) ADD_PROVIDER RPCs before we return control back to the user. + returnThreshold int + + // putProvDone counts the ADD_PROVIDER RPCs that have completed (successful and unsuccessful) + putProvDone atomic.Int32 +} + +func (dht *IpfsDHT) newOptimisticState(ctx context.Context, key string) (*optimisticState, error) { + // get network size and err out if there is no reasonable estimate + networkSize, err := dht.nsEstimator.NetworkSize() + if err != nil { + return nil, err + } + + individualThreshold := mathext.GammaIncRegInv(float64(dht.bucketSize), 1-optProvIndividualThresholdCertainty) / float64(networkSize) + setThreshold := mathext.GammaIncRegInv(float64(dht.bucketSize)/2.0+1, 1-optProvSetThresholdStrictness) / float64(networkSize) + returnThreshold := int(math.Ceil(float64(dht.bucketSize) * optProvReturnRatio)) + + return &optimisticState{ + putCtx: ctx, + dht: dht, + key: key, + doneChan: make(chan struct{}, returnThreshold), // buffered channel to not miss events + ksKey: ks.XORKeySpace.Key([]byte(key)), + networkSize: networkSize, + peerStates: map[peer.ID]addProviderRPCState{}, + individualThreshold: individualThreshold, + setThreshold: setThreshold, + returnThreshold: returnThreshold, + putProvDone: atomic.Int32{}, + }, nil +} + +func (dht *IpfsDHT) optimisticProvide(outerCtx context.Context, keyMH multihash.Multihash) error { + key := string(keyMH) + + if key == "" { + return fmt.Errorf("can't lookup empty key") + } + + // initialize new context for all putProvider operations. + // We don't want to give the outer context to the put operations as we return early before all + // put operations have finished to avoid the long tail of the latency distribution. If we + // provided the outer context the put operations may be cancelled depending on what happens + // with the context on the user side. + putCtx, putCtxCancel := context.WithTimeout(context.Background(), time.Minute) + + es, err := dht.newOptimisticState(putCtx, key) + if err != nil { + putCtxCancel() + return err + } + + // initialize context that finishes when this function returns + innerCtx, innerCtxCancel := context.WithCancel(outerCtx) + defer innerCtxCancel() + + go func() { + select { + case <-outerCtx.Done(): + // If the outer context gets cancelled while we're still in this function. We stop all + // pending put operations. + putCtxCancel() + case <-innerCtx.Done(): + // We have returned from this function. Ignore cancellations of the outer context and continue + // with the remaining put operations. + } + }() + + lookupRes, err := dht.runLookupWithFollowup(outerCtx, key, dht.pmGetClosestPeers(key), es.stopFn) + if err != nil { + return err + } + + // Store the provider records with all the closest peers we haven't already contacted/scheduled interaction with. + es.peerStatesLk.Lock() + for _, p := range lookupRes.peers { + if _, found := es.peerStates[p]; found { + continue + } + + go es.putProviderRecord(p) + es.peerStates[p] = scheduled + } + es.peerStatesLk.Unlock() + + // wait until a threshold number of RPCs have completed + es.waitForRPCs() + + if err := outerCtx.Err(); err != nil || !lookupRes.completed { // likely the "completed" field is false but that's not a given + return err + } + + // tracking lookup results for network size estimator as "completed" is true + if err = dht.nsEstimator.Track(key, lookupRes.closest); err != nil { + logger.Warnf("network size estimator track peers: %s", err) + } + + if ns, err := dht.nsEstimator.NetworkSize(); err == nil { + metrics.NetworkSize.M(int64(ns)) + } + + // refresh the cpl for this key as the query was successful + dht.routingTable.ResetCplRefreshedAtForID(kb.ConvertKey(key), time.Now()) + + return nil +} + +func (os *optimisticState) stopFn(qps *qpeerset.QueryPeerset) bool { + os.peerStatesLk.Lock() + defer os.peerStatesLk.Unlock() + + // get currently known closest peers and check if any of them is already very close. + // If so -> store provider records straight away. + closest := qps.GetClosestNInStates(os.dht.bucketSize, qpeerset.PeerHeard, qpeerset.PeerWaiting, qpeerset.PeerQueried) + distances := make([]float64, os.dht.bucketSize) + for i, p := range closest { + // calculate distance of peer p to the target key + distances[i] = netsize.NormedDistance(p, os.ksKey) + + // Check if we have already scheduled interaction or have actually interacted with that peer + if _, found := os.peerStates[p]; found { + continue + } + + // Check if peer is close enough to store the provider record with + if distances[i] > os.individualThreshold { + continue + } + + // peer is indeed very close already -> store the provider record directly with it! + go os.putProviderRecord(p) + + // keep track that we've scheduled storing a provider record with that peer + os.peerStates[p] = scheduled + } + + // count number of peers we have scheduled to contact or have already successfully contacted via the above method + scheduledAndSuccessCount := 0 + for _, s := range os.peerStates { + if s == scheduled || s == success { + scheduledAndSuccessCount += 1 + } + } + + // if we have already contacted/scheduled the RPC for more than bucketSize peers stop the procedure + if scheduledAndSuccessCount >= os.dht.bucketSize { + return true + } + + // calculate average distance of the set of closest peers + sum := 0.0 + for _, d := range distances { + sum += d + } + avg := sum / float64(len(distances)) + + // if the average is below the set threshold stop the procedure + return avg < os.setThreshold +} + +func (os *optimisticState) putProviderRecord(pid peer.ID) { + err := os.dht.protoMessenger.PutProviderAddrs(os.putCtx, pid, []byte(os.key), peer.AddrInfo{ + ID: os.dht.self, + Addrs: os.dht.filterAddrs(os.dht.host.Addrs()), + }) + os.peerStatesLk.Lock() + if err != nil { + os.peerStates[pid] = failure + } else { + os.peerStates[pid] = success + } + os.peerStatesLk.Unlock() + + // indicate that this ADD_PROVIDER RPC has completed + os.doneChan <- struct{}{} +} + +// waitForRPCs waits for a subset of ADD_PROVIDER RPCs to complete and then acquire a lease on +// a bound channel to return early back to the user and prevent unbound asynchronicity. If +// there are already too many requests in-flight we are just waiting for our current set to +// finish. +func (os *optimisticState) waitForRPCs() { + os.peerStatesLk.RLock() + rpcCount := len(os.peerStates) + os.peerStatesLk.RUnlock() + + // returnThreshold can't be larger than the total number issued RPCs + if os.returnThreshold > rpcCount { + os.returnThreshold = rpcCount + } + + // Wait until returnThreshold ADD_PROVIDER RPCs have returned + for range os.doneChan { + if int(os.putProvDone.Add(1)) == os.returnThreshold { + break + } + } + // At this point only a subset of all ADD_PROVIDER RPCs have completed. + // We want to give control back to the user as soon as possible because + // it is highly likely that at least one of the remaining RPCs will time + // out and thus slow down the whole processes. The provider records will + // already be available with less than the total number of RPCs having + // finished. This has been investigated here: + // https://github.com/protocol/network-measurements/blob/master/results/rfm17-provider-record-liveness.md + + // For the remaining ADD_PROVIDER RPCs try to acquire a lease on the optProvJobsPool channel. + // If that worked we need to consume the doneChan and release the acquired lease on the + // optProvJobsPool channel. + remaining := rpcCount - int(os.putProvDone.Load()) + for i := 0; i < remaining; i++ { + select { + case os.dht.optProvJobsPool <- struct{}{}: + // We were able to acquire a lease on the optProvJobsPool channel. + // Consume doneChan to release the acquired lease again. + go os.consumeDoneChan(rpcCount) + case <-os.doneChan: + // We were not able to acquire a lease but an ADD_PROVIDER RPC resolved. + if int(os.putProvDone.Add(1)) == rpcCount { + close(os.doneChan) + } + } + } +} + +func (os *optimisticState) consumeDoneChan(until int) { + // Wait for an RPC to finish + <-os.doneChan + + // Release acquired lease for other's to get a spot + <-os.dht.optProvJobsPool + + // If all RPCs have finished, close the channel. + if int(os.putProvDone.Add(1)) == until { + close(os.doneChan) + } +} diff --git a/go-libp2p-kad-dht/lookup_optim_test.go b/go-libp2p-kad-dht/lookup_optim_test.go new file mode 100644 index 0000000..e77dd0c --- /dev/null +++ b/go-libp2p-kad-dht/lookup_optim_test.go @@ -0,0 +1,106 @@ +package dht + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/libp2p/go-libp2p-kad-dht/netsize" + "github.com/libp2p/go-libp2p/core/peer" +) + +func randInt(rng *rand.Rand, n, except int) int { + for { + r := rng.Intn(n) + if r != except { + return r + } + } +} + +func TestOptimisticProvide(t *testing.T) { + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + + // Order of events: + // 1. setup DHTs + // 2. connect each DHT with three others (but not to itself) + // 3. select random DHT to be the privileged one (performs the optimistic provide) + // 4. initialize network size estimator of privileged DHT + // 5. perform provides + // 6. let all other DHTs perform the lookup for all provided CIDs + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dhtCount := 21 + + dhts := setupDHTS(t, ctx, dhtCount, EnableOptimisticProvide()) + defer func() { + for i := 0; i < dhtCount; i++ { + dhts[i].Close() + defer dhts[i].host.Close() + } + }() + + // connect each DHT with three random others + for i, dht := range dhts { + for j := 0; j < 3; j++ { + r := randInt(rng, dhtCount, i) + connect(t, ctx, dhts[r], dht) + } + } + + // select privileged DHT that will perform the provide operation + privIdx := rng.Intn(dhtCount) + privDHT := dhts[privIdx] + + peerIDs := make([]peer.ID, 20) + for i := 0; i < dhtCount; i++ { + if i == privIdx { + continue + } + + if i >= privIdx { + peerIDs[i-1] = dhts[i-1].self + } else { + peerIDs[i] = dhts[i].self + } + } + nse := netsize.NewEstimator(privDHT.self, privDHT.routingTable, privDHT.bucketSize) + + for i := 0; i < 20; i++ { + err := nse.Track(string(testCaseCids[i].Bytes()), peerIDs) + if err != nil { + t.Fatal(err) + } + } + privDHT.nsEstimator = nse + + for _, k := range testCaseCids { + logger.Debugf("announcing provider for %s", k) + if err := privDHT.optimisticProvide(ctx, k.Hash()); err != nil { + t.Fatal(err) + } + } + + for _, c := range testCaseCids { + n := randInt(rng, dhtCount, privIdx) + + ctxT, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + provchan := dhts[n].FindProvidersAsync(ctxT, c, 1) + + select { + case prov := <-provchan: + if prov.ID == "" { + t.Fatal("Got back nil provider") + } + if prov.ID != privDHT.self { + t.Fatal("Got back wrong provider") + } + case <-ctxT.Done(): + t.Fatal("Did not get a provider back.") + } + } +} diff --git a/go-libp2p-kad-dht/metrics/metrics.go b/go-libp2p-kad-dht/metrics/metrics.go new file mode 100644 index 0000000..8ff6311 --- /dev/null +++ b/go-libp2p-kad-dht/metrics/metrics.go @@ -0,0 +1,117 @@ +package metrics + +import ( + pb "github.com/libp2p/go-libp2p-kad-dht/pb" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +var ( + defaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + defaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +) + +// Keys +var ( + KeyMessageType, _ = tag.NewKey("message_type") + KeyPeerID, _ = tag.NewKey("peer_id") + // KeyInstanceID identifies a dht instance by the pointer address. + // Useful for differentiating between different dhts that have the same peer id. + KeyInstanceID, _ = tag.NewKey("instance_id") +) + +// UpsertMessageType is a convenience upserts the message type +// of a pb.Message into the KeyMessageType. +func UpsertMessageType(m *pb.Message) tag.Mutator { + return tag.Upsert(KeyMessageType, m.Type.String()) +} + +// Measures +var ( + ReceivedMessages = stats.Int64("libp2p.io/dht/kad/received_messages", "Total number of messages received per RPC", stats.UnitDimensionless) + ReceivedMessageErrors = stats.Int64("libp2p.io/dht/kad/received_message_errors", "Total number of errors for messages received per RPC", stats.UnitDimensionless) + ReceivedBytes = stats.Int64("libp2p.io/dht/kad/received_bytes", "Total received bytes per RPC", stats.UnitBytes) + InboundRequestLatency = stats.Float64("libp2p.io/dht/kad/inbound_request_latency", "Latency per RPC", stats.UnitMilliseconds) + OutboundRequestLatency = stats.Float64("libp2p.io/dht/kad/outbound_request_latency", "Latency per RPC", stats.UnitMilliseconds) + SentMessages = stats.Int64("libp2p.io/dht/kad/sent_messages", "Total number of messages sent per RPC", stats.UnitDimensionless) + SentMessageErrors = stats.Int64("libp2p.io/dht/kad/sent_message_errors", "Total number of errors for messages sent per RPC", stats.UnitDimensionless) + SentRequests = stats.Int64("libp2p.io/dht/kad/sent_requests", "Total number of requests sent per RPC", stats.UnitDimensionless) + SentRequestErrors = stats.Int64("libp2p.io/dht/kad/sent_request_errors", "Total number of errors for requests sent per RPC", stats.UnitDimensionless) + SentBytes = stats.Int64("libp2p.io/dht/kad/sent_bytes", "Total sent bytes per RPC", stats.UnitBytes) + NetworkSize = stats.Int64("libp2p.io/dht/kad/network_size", "Network size estimation", stats.UnitDimensionless) +) + +// Views +var ( + ReceivedMessagesView = &view.View{ + Measure: ReceivedMessages, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } + ReceivedMessageErrorsView = &view.View{ + Measure: ReceivedMessageErrors, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } + ReceivedBytesView = &view.View{ + Measure: ReceivedBytes, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: defaultBytesDistribution, + } + InboundRequestLatencyView = &view.View{ + Measure: InboundRequestLatency, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: defaultMillisecondsDistribution, + } + OutboundRequestLatencyView = &view.View{ + Measure: OutboundRequestLatency, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: defaultMillisecondsDistribution, + } + SentMessagesView = &view.View{ + Measure: SentMessages, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } + SentMessageErrorsView = &view.View{ + Measure: SentMessageErrors, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } + SentRequestsView = &view.View{ + Measure: SentRequests, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } + SentRequestErrorsView = &view.View{ + Measure: SentRequestErrors, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } + SentBytesView = &view.View{ + Measure: SentBytes, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: defaultBytesDistribution, + } + NetworkSizeView = &view.View{ + Measure: NetworkSize, + TagKeys: []tag.Key{KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } +) + +// DefaultViews with all views in it. +var DefaultViews = []*view.View{ + ReceivedMessagesView, + ReceivedMessageErrorsView, + ReceivedBytesView, + InboundRequestLatencyView, + OutboundRequestLatencyView, + SentMessagesView, + SentMessageErrorsView, + SentRequestsView, + SentRequestErrorsView, + SentBytesView, + NetworkSizeView, +} diff --git a/go-libp2p-kad-dht/netsize/netsize.go b/go-libp2p-kad-dht/netsize/netsize.go new file mode 100644 index 0000000..02a0e67 --- /dev/null +++ b/go-libp2p-kad-dht/netsize/netsize.go @@ -0,0 +1,284 @@ +package netsize + +import ( + "fmt" + "math" + "math/big" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + logging "github.com/ipfs/go-log/v2" + kbucket "github.com/libp2p/go-libp2p-kbucket" + "github.com/libp2p/go-libp2p/core/peer" + ks "github.com/whyrusleeping/go-keyspace" +) + +// invalidEstimate indicates that we currently have no valid estimate cached. +const invalidEstimate int32 = -1 + +var ( + ErrNotEnoughData = fmt.Errorf("not enough data") + ErrWrongNumOfPeers = fmt.Errorf("expected bucket size number of peers") +) + +var ( + logger = logging.Logger("dht/netsize") + MaxMeasurementAge = 2 * time.Hour + MinMeasurementsThreshold = 5 + MaxMeasurementsThreshold = 150 + keyspaceMaxInt, _ = new(big.Int).SetString(strings.Repeat("1", 256), 2) + keyspaceMaxFloat = new(big.Float).SetInt(keyspaceMaxInt) +) + +type Estimator struct { + localID kbucket.ID + rt *kbucket.RoutingTable + bucketSize int + + measurementsLk sync.RWMutex + measurements map[int][]measurement + + netSizeCache int32 +} + +func NewEstimator(localID peer.ID, rt *kbucket.RoutingTable, bucketSize int) *Estimator { + // initialize map to hold measurement observations + measurements := map[int][]measurement{} + for i := 0; i < bucketSize; i++ { + measurements[i] = []measurement{} + } + + return &Estimator{ + localID: kbucket.ConvertPeerID(localID), + rt: rt, + bucketSize: bucketSize, + measurements: measurements, + netSizeCache: invalidEstimate, + } +} + +// NormedDistance calculates the normed XOR distance of the given keys (from 0 to 1). +func NormedDistance(p peer.ID, k ks.Key) float64 { + pKey := ks.XORKeySpace.Key([]byte(p)) + ksDistance := new(big.Float).SetInt(pKey.Distance(k)) + normedDist, _ := new(big.Float).Quo(ksDistance, keyspaceMaxFloat).Float64() + return normedDist +} + +type measurement struct { + distance float64 + weight float64 + timestamp time.Time +} + +// Track tracks the list of peers for the given key to incorporate in the next network size estimate. +// key is expected **NOT** to be in the kademlia keyspace and peers is expected to be a sorted list of +// the closest peers to the given key (the closest first). +// This function expects peers to have the same length as the routing table bucket size. It also +// strips old and limits the number of data points (favouring new). +func (e *Estimator) Track(key string, peers []peer.ID) error { + e.measurementsLk.Lock() + defer e.measurementsLk.Unlock() + + // sanity check + if len(peers) != e.bucketSize { + return ErrWrongNumOfPeers + } + + logger.Debugw("Tracking peers for key", "key", key) + + now := time.Now() + + // invalidate cache + atomic.StoreInt32(&e.netSizeCache, invalidEstimate) + + // Calculate weight for the peer distances. + weight := e.calcWeight(key, peers) + + // Map given key to the Kademlia key space (hash it) + ksKey := ks.XORKeySpace.Key([]byte(key)) + + // the maximum age timestamp of the measurement data points + maxAgeTs := now.Add(-MaxMeasurementAge) + + for i, p := range peers { + // Construct measurement struct + m := measurement{ + distance: NormedDistance(p, ksKey), + weight: weight, + timestamp: now, + } + + measurements := append(e.measurements[i], m) + + // find the smallest index of a measurement that is still in the allowed time window + // all measurements with a lower index should be discarded as they are too old + n := len(measurements) + idx := sort.Search(n, func(j int) bool { + return measurements[j].timestamp.After(maxAgeTs) + }) + + // if measurements are outside the allowed time window remove them. + // idx == n - there is no measurement in the allowed time window -> reset slice + // idx == 0 - the normal case where we only have valid entries + // idx != 0 - there is a mix of valid and obsolete entries + if idx != 0 { + x := make([]measurement, n-idx) + copy(x, measurements[idx:]) + measurements = x + } + + // if the number of data points exceed the max threshold, strip oldest measurement data points. + if len(measurements) > MaxMeasurementsThreshold { + measurements = measurements[len(measurements)-MaxMeasurementsThreshold:] + } + + e.measurements[i] = measurements + } + + return nil +} + +// NetworkSize instructs the Estimator to calculate the current network size estimate. +func (e *Estimator) NetworkSize() (int32, error) { + + // return cached calculation lock-free (fast path) + if estimate := atomic.LoadInt32(&e.netSizeCache); estimate != invalidEstimate { + logger.Debugw("Cached network size estimation", "estimate", estimate) + return estimate, nil + } + + e.measurementsLk.Lock() + defer e.measurementsLk.Unlock() + + // Check a second time. This is needed because we maybe had to wait on another goroutine doing the computation. + // Then the computation was just finished by the other goroutine, and we don't need to redo it. + if estimate := e.netSizeCache; estimate != invalidEstimate { + logger.Debugw("Cached network size estimation", "estimate", estimate) + return estimate, nil + } + + // remove obsolete data points + e.garbageCollect() + + // initialize slices for linear fit + xs := make([]float64, e.bucketSize) + ys := make([]float64, e.bucketSize) + yerrs := make([]float64, e.bucketSize) + + for i := 0; i < e.bucketSize; i++ { + observationCount := len(e.measurements[i]) + + // If we don't have enough data to reasonably calculate the network size, return early + if observationCount < MinMeasurementsThreshold { + return 0, ErrNotEnoughData + } + + // Calculate Average Distance + sumDistances := 0.0 + sumWeights := 0.0 + for _, m := range e.measurements[i] { + sumDistances += m.weight * m.distance + sumWeights += m.weight + } + distanceAvg := sumDistances / sumWeights + + // Calculate standard deviation + sumWeightedDiffs := 0.0 + for _, m := range e.measurements[i] { + diff := m.distance - distanceAvg + sumWeightedDiffs += m.weight * diff * diff + } + variance := sumWeightedDiffs / (float64(observationCount-1) / float64(observationCount) * sumWeights) + distanceStd := math.Sqrt(variance) + + // Track calculations + xs[i] = float64(i + 1) + ys[i] = distanceAvg + yerrs[i] = distanceStd + } + + // Calculate linear regression (assumes the line goes through the origin) + var x2Sum, xySum float64 + for i, xi := range xs { + yi := ys[i] + xySum += yerrs[i] * xi * yi + x2Sum += yerrs[i] * xi * xi + } + slope := xySum / x2Sum + + // calculate final network size + netSize := int32(1/slope - 1) + + // cache network size estimation + atomic.StoreInt32(&e.netSizeCache, netSize) + + logger.Debugw("New network size estimation", "estimate", netSize) + return netSize, nil +} + +// calcWeight weighs data points exponentially less if they fall into a non-full bucket. +// It weighs distance estimates based on their CPLs and bucket levels. +// Bucket Level: 20 -> 1/2^0 -> weight: 1 +// Bucket Level: 17 -> 1/2^3 -> weight: 1/8 +// Bucket Level: 10 -> 1/2^10 -> weight: 1/1024 +// +// It can happen that the routing table doesn't have a full bucket, but we are tracking here +// a list of peers that would theoretically have been suitable for that bucket. Let's imagine +// there are only 13 peers in bucket 3 although there is space for 20. Now, the Track function +// gets a peers list (len 20) where all peers fall into bucket 3. The weight of this set of peers +// should be 1 instead of 1/2^7. +// I actually thought this cannot happen as peers would have been added to the routing table before +// the Track function gets called. But they seem sometimes not to be added. +func (e *Estimator) calcWeight(key string, peers []peer.ID) float64 { + + cpl := kbucket.CommonPrefixLen(kbucket.ConvertKey(key), e.localID) + bucketLevel := e.rt.NPeersForCpl(uint(cpl)) + + if bucketLevel < e.bucketSize { + // routing table doesn't have a full bucket. Check how many peers would fit into that bucket + peerLevel := 0 + for _, p := range peers { + if cpl == kbucket.CommonPrefixLen(kbucket.ConvertPeerID(p), e.localID) { + peerLevel += 1 + } + } + + if peerLevel > bucketLevel { + return math.Pow(2, float64(peerLevel-e.bucketSize)) + } + } + + return math.Pow(2, float64(bucketLevel-e.bucketSize)) +} + +// garbageCollect removes all measurements from the list that fell out of the measurement time window. +func (e *Estimator) garbageCollect() { + logger.Debug("Running garbage collection") + + // the maximum age timestamp of the measurement data points + maxAgeTs := time.Now().Add(-MaxMeasurementAge) + + for i := 0; i < e.bucketSize; i++ { + + // find the smallest index of a measurement that is still in the allowed time window + // all measurements with a lower index should be discarded as they are too old + n := len(e.measurements[i]) + idx := sort.Search(n, func(j int) bool { + return e.measurements[i][j].timestamp.After(maxAgeTs) + }) + + // if measurements are outside the allowed time window remove them. + // idx == n - there is no measurement in the allowed time window -> reset slice + // idx == 0 - the normal case where we only have valid entries + // idx != 0 - there is a mix of valid and obsolete entries + if idx == n { + e.measurements[i] = []measurement{} + } else if idx != 0 { + e.measurements[i] = e.measurements[i][idx:] + } + } +} diff --git a/go-libp2p-kad-dht/netsize/netsize_test.go b/go-libp2p-kad-dht/netsize/netsize_test.go new file mode 100644 index 0000000..c6d3307 --- /dev/null +++ b/go-libp2p-kad-dht/netsize/netsize_test.go @@ -0,0 +1,44 @@ +package netsize + +import ( + "testing" + "time" + + kbucket "github.com/libp2p/go-libp2p-kbucket" + pt "github.com/libp2p/go-libp2p/core/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + ks "github.com/whyrusleeping/go-keyspace" +) + +func TestNewEstimator(t *testing.T) { + bucketSize := 20 + + pid, err := pt.RandPeerID() + require.NoError(t, err) + + rt, err := kbucket.NewRoutingTable(bucketSize, kbucket.ConvertPeerID(pid), time.Second, nil, time.Second, nil) + require.NoError(t, err) + + e := NewEstimator(pid, rt, bucketSize) + + assert.Equal(t, rt, e.rt) + assert.Equal(t, kbucket.ConvertPeerID(pid), e.localID) + assert.Len(t, e.measurements, bucketSize) + assert.Equal(t, invalidEstimate, e.netSizeCache) +} + +func TestNormedDistance(t *testing.T) { + pid, err := pt.RandPeerID() + require.NoError(t, err) + + dist := NormedDistance(pid, ks.XORKeySpace.Key([]byte(pid))) + assert.Zero(t, dist) + + pid2, err := pt.RandPeerID() + require.NoError(t, err) + + dist = NormedDistance(pid, ks.XORKeySpace.Key([]byte(pid2))) + assert.Greater(t, 1.0, dist) + assert.Less(t, dist, 1.0) +} diff --git a/go-libp2p-kad-dht/nofile_test.go b/go-libp2p-kad-dht/nofile_test.go new file mode 100644 index 0000000..10dca55 --- /dev/null +++ b/go-libp2p-kad-dht/nofile_test.go @@ -0,0 +1,23 @@ +//go:build !windows && !wasm + +package dht + +import ( + "fmt" + "os" + "testing" + + "syscall" +) + +func TestMain(m *testing.M) { + err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &syscall.Rlimit{ + Cur: 4096, + Max: 4096, + }) + if err != nil { + fmt.Println("failed to increase open file descriptor limit, can't run tests") + os.Exit(1) + } + os.Exit(m.Run()) +} diff --git a/go-libp2p-kad-dht/optimizations.md b/go-libp2p-kad-dht/optimizations.md new file mode 100644 index 0000000..214c179 --- /dev/null +++ b/go-libp2p-kad-dht/optimizations.md @@ -0,0 +1,7 @@ +# Client-side optimizations + +This document reflects client-side optimizations that are implemented in this repository. Client-side optimizations are not part of the [Kademlia spec](https://github.com/libp2p/specs/tree/master/kad-dht), and are not required to be implemented on all clients. + +## Checking before Adding + +A Kademlia server should try to add remote peers querying it to its routing table. However, the Kademlia server has no guarantee that remote peers issuing requests are able to answer Kademlia requests correctly, even though they advertise speaking the Kademlia server protocol. It is important that only server nodes able to answer Kademlia requests end up in other peers' routing tables. Hence, before adding a remote peer to the Kademlia server's routing table, the Kademlia server will send a trivial `FIND_NODE` request to the remote peer, and add it to its routing table only if it is able to provide a valid response. \ No newline at end of file diff --git a/go-libp2p-kad-dht/opts/options.go b/go-libp2p-kad-dht/opts/options.go new file mode 100644 index 0000000..a8619ad --- /dev/null +++ b/go-libp2p-kad-dht/opts/options.go @@ -0,0 +1,68 @@ +// Deprecated: Options are now defined in the root package. + +package dhtopts + +import ( + "time" + + ds "github.com/ipfs/go-datastore" + dht "github.com/libp2p/go-libp2p-kad-dht" + record "github.com/libp2p/go-libp2p-record" +) + +type Option = dht.Option + +// Deprecated: use dht.RoutingTableLatencyTolerance +func RoutingTableLatencyTolerance(latency time.Duration) dht.Option { + return dht.RoutingTableLatencyTolerance(latency) +} + +// Deprecated: use dht.RoutingTableRefreshQueryTimeout +func RoutingTableRefreshQueryTimeout(timeout time.Duration) dht.Option { + return dht.RoutingTableRefreshQueryTimeout(timeout) +} + +// Deprecated: use dht.RoutingTableRefreshPeriod +func RoutingTableRefreshPeriod(period time.Duration) dht.Option { + return dht.RoutingTableRefreshPeriod(period) +} + +// Deprecated: use dht.Datastore +func Datastore(ds ds.Batching) dht.Option { return dht.Datastore(ds) } + +// Client configures whether or not the DHT operates in client-only mode. +// +// Defaults to false (which is ModeAuto). +// Deprecated: use dht.Mode(ModeClient) +func Client(only bool) dht.Option { + if only { + return dht.Mode(dht.ModeClient) + } + return dht.Mode(dht.ModeAuto) +} + +// Deprecated: use dht.Mode +func Mode(m dht.ModeOpt) dht.Option { return dht.Mode(m) } + +// Deprecated: use dht.Validator +func Validator(v record.Validator) dht.Option { return dht.Validator(v) } + +// Deprecated: use dht.NamespacedValidator +func NamespacedValidator(ns string, v record.Validator) dht.Option { + return dht.NamespacedValidator(ns, v) +} + +// Deprecated: use dht.BucketSize +func BucketSize(bucketSize int) dht.Option { return dht.BucketSize(bucketSize) } + +// Deprecated: use dht.MaxRecordAge +func MaxRecordAge(maxAge time.Duration) dht.Option { return dht.MaxRecordAge(maxAge) } + +// Deprecated: use dht.DisableAutoRefresh +func DisableAutoRefresh() dht.Option { return dht.DisableAutoRefresh() } + +// Deprecated: use dht.DisableProviders +func DisableProviders() dht.Option { return dht.DisableProviders() } + +// Deprecated: use dht.DisableValues +func DisableValues() dht.Option { return dht.DisableValues() } diff --git a/go-libp2p-kad-dht/pb/Makefile b/go-libp2p-kad-dht/pb/Makefile new file mode 100644 index 0000000..eb14b57 --- /dev/null +++ b/go-libp2p-kad-dht/pb/Makefile @@ -0,0 +1,11 @@ +PB = $(wildcard *.proto) +GO = $(PB:.proto=.pb.go) + +all: $(GO) + +%.pb.go: %.proto + protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $< + +clean: + rm -f *.pb.go + rm -f *.go diff --git a/go-libp2p-kad-dht/pb/bytestring.go b/go-libp2p-kad-dht/pb/bytestring.go new file mode 100644 index 0000000..f20f197 --- /dev/null +++ b/go-libp2p-kad-dht/pb/bytestring.go @@ -0,0 +1,42 @@ +package dht_pb + +import ( + "encoding/json" +) + +type byteString string + +func (b byteString) Marshal() ([]byte, error) { + return []byte(b), nil +} + +func (b *byteString) MarshalTo(data []byte) (int, error) { + return copy(data, *b), nil +} + +func (b *byteString) Unmarshal(data []byte) error { + *b = byteString(data) + return nil +} + +func (b *byteString) Size() int { + return len(*b) +} + +func (b byteString) MarshalJSON() ([]byte, error) { + return json.Marshal([]byte(b)) +} + +func (b *byteString) UnmarshalJSON(data []byte) error { + var buf []byte + err := json.Unmarshal(data, &buf) + if err != nil { + return err + } + *b = byteString(buf) + return nil +} + +func (b byteString) Equal(other byteString) bool { + return b == other +} diff --git a/go-libp2p-kad-dht/pb/dht.pb.go b/go-libp2p-kad-dht/pb/dht.pb.go new file mode 100644 index 0000000..cdf9da0 --- /dev/null +++ b/go-libp2p-kad-dht/pb/dht.pb.go @@ -0,0 +1,959 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: dht.proto + +package dht_pb + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + pb "github.com/libp2p/go-libp2p-record/pb" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Message_MessageType int32 + +const ( + Message_PUT_VALUE Message_MessageType = 0 + Message_GET_VALUE Message_MessageType = 1 + Message_ADD_PROVIDER Message_MessageType = 2 + Message_GET_PROVIDERS Message_MessageType = 3 + Message_FIND_NODE Message_MessageType = 4 + Message_PING Message_MessageType = 5 +) + +var Message_MessageType_name = map[int32]string{ + 0: "PUT_VALUE", + 1: "GET_VALUE", + 2: "ADD_PROVIDER", + 3: "GET_PROVIDERS", + 4: "FIND_NODE", + 5: "PING", +} + +var Message_MessageType_value = map[string]int32{ + "PUT_VALUE": 0, + "GET_VALUE": 1, + "ADD_PROVIDER": 2, + "GET_PROVIDERS": 3, + "FIND_NODE": 4, + "PING": 5, +} + +func (x Message_MessageType) String() string { + return proto.EnumName(Message_MessageType_name, int32(x)) +} + +func (Message_MessageType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_616a434b24c97ff4, []int{0, 0} +} + +type Message_ConnectionType int32 + +const ( + // sender does not have a connection to peer, and no extra information (default) + Message_NOT_CONNECTED Message_ConnectionType = 0 + // sender has a live connection to peer + Message_CONNECTED Message_ConnectionType = 1 + // sender recently connected to peer + Message_CAN_CONNECT Message_ConnectionType = 2 + // sender recently tried to connect to peer repeatedly but failed to connect + // ("try" here is loose, but this should signal "made strong effort, failed") + Message_CANNOT_CONNECT Message_ConnectionType = 3 +) + +var Message_ConnectionType_name = map[int32]string{ + 0: "NOT_CONNECTED", + 1: "CONNECTED", + 2: "CAN_CONNECT", + 3: "CANNOT_CONNECT", +} + +var Message_ConnectionType_value = map[string]int32{ + "NOT_CONNECTED": 0, + "CONNECTED": 1, + "CAN_CONNECT": 2, + "CANNOT_CONNECT": 3, +} + +func (x Message_ConnectionType) String() string { + return proto.EnumName(Message_ConnectionType_name, int32(x)) +} + +func (Message_ConnectionType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_616a434b24c97ff4, []int{0, 1} +} + +type Message struct { + // defines what type of message it is. + Type Message_MessageType `protobuf:"varint,1,opt,name=type,proto3,enum=dht.pb.Message_MessageType" json:"type,omitempty"` + // defines what coral cluster level this query/response belongs to. + // in case we want to implement coral's cluster rings in the future. + ClusterLevelRaw int32 `protobuf:"varint,10,opt,name=clusterLevelRaw,proto3" json:"clusterLevelRaw,omitempty"` + // Used to specify the key associated with this message. + // PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // Used to return a value + // PUT_VALUE, GET_VALUE + Record *pb.Record `protobuf:"bytes,3,opt,name=record,proto3" json:"record,omitempty"` + // Used to return peers closer to a key in a query + // GET_VALUE, GET_PROVIDERS, FIND_NODE + CloserPeers []Message_Peer `protobuf:"bytes,8,rep,name=closerPeers,proto3" json:"closerPeers"` + // Used to return Providers + // GET_VALUE, ADD_PROVIDER, GET_PROVIDERS + ProviderPeers []Message_Peer `protobuf:"bytes,9,rep,name=providerPeers,proto3" json:"providerPeers"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_616a434b24c97ff4, []int{0} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetType() Message_MessageType { + if m != nil { + return m.Type + } + return Message_PUT_VALUE +} + +func (m *Message) GetClusterLevelRaw() int32 { + if m != nil { + return m.ClusterLevelRaw + } + return 0 +} + +func (m *Message) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Message) GetRecord() *pb.Record { + if m != nil { + return m.Record + } + return nil +} + +func (m *Message) GetCloserPeers() []Message_Peer { + if m != nil { + return m.CloserPeers + } + return nil +} + +func (m *Message) GetProviderPeers() []Message_Peer { + if m != nil { + return m.ProviderPeers + } + return nil +} + +type Message_Peer struct { + // ID of a given peer. + Id byteString `protobuf:"bytes,1,opt,name=id,proto3,customtype=byteString" json:"id"` + // multiaddrs for a given peer + Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"` + // used to signal the sender's connection capabilities to the peer + Connection Message_ConnectionType `protobuf:"varint,3,opt,name=connection,proto3,enum=dht.pb.Message_ConnectionType" json:"connection,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message_Peer) Reset() { *m = Message_Peer{} } +func (m *Message_Peer) String() string { return proto.CompactTextString(m) } +func (*Message_Peer) ProtoMessage() {} +func (*Message_Peer) Descriptor() ([]byte, []int) { + return fileDescriptor_616a434b24c97ff4, []int{0, 0} +} +func (m *Message_Peer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_Peer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message_Peer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Peer.Merge(m, src) +} +func (m *Message_Peer) XXX_Size() int { + return m.Size() +} +func (m *Message_Peer) XXX_DiscardUnknown() { + xxx_messageInfo_Message_Peer.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_Peer proto.InternalMessageInfo + +func (m *Message_Peer) GetAddrs() [][]byte { + if m != nil { + return m.Addrs + } + return nil +} + +func (m *Message_Peer) GetConnection() Message_ConnectionType { + if m != nil { + return m.Connection + } + return Message_NOT_CONNECTED +} + +func init() { + proto.RegisterEnum("dht.pb.Message_MessageType", Message_MessageType_name, Message_MessageType_value) + proto.RegisterEnum("dht.pb.Message_ConnectionType", Message_ConnectionType_name, Message_ConnectionType_value) + proto.RegisterType((*Message)(nil), "dht.pb.Message") + proto.RegisterType((*Message_Peer)(nil), "dht.pb.Message.Peer") +} + +func init() { proto.RegisterFile("dht.proto", fileDescriptor_616a434b24c97ff4) } + +var fileDescriptor_616a434b24c97ff4 = []byte{ + // 469 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xb1, 0x6f, 0x9b, 0x40, + 0x18, 0xc5, 0x73, 0x80, 0xdd, 0xf8, 0x03, 0x3b, 0xe4, 0x94, 0x01, 0xb9, 0x92, 0x83, 0x3c, 0xd1, + 0xc1, 0x20, 0xd1, 0xb5, 0xaa, 0x6a, 0x03, 0x8d, 0x2c, 0xa5, 0xd8, 0xba, 0x38, 0xe9, 0x68, 0x19, + 0xb8, 0x12, 0x54, 0xd7, 0x87, 0x00, 0xa7, 0xf2, 0xd6, 0x3f, 0x2f, 0x63, 0xe7, 0x0e, 0x51, 0xe5, + 0xa9, 0x7f, 0x46, 0xc5, 0x11, 0x5a, 0xec, 0x25, 0x13, 0xef, 0x7d, 0xf7, 0x7e, 0xe2, 0xdd, 0xa7, + 0x83, 0x4e, 0x74, 0x5f, 0x98, 0x69, 0xc6, 0x0a, 0x86, 0xdb, 0x5c, 0x06, 0x7d, 0x3b, 0x4e, 0x8a, + 0xfb, 0x6d, 0x60, 0x86, 0xec, 0x9b, 0xb5, 0x4e, 0x82, 0xd4, 0x4e, 0xad, 0x98, 0x8d, 0x2a, 0x35, + 0xca, 0x68, 0xc8, 0xb2, 0xc8, 0x4a, 0x03, 0xab, 0x52, 0x15, 0xdb, 0x1f, 0x35, 0x98, 0x98, 0xc5, + 0xcc, 0xe2, 0xe3, 0x60, 0xfb, 0x85, 0x3b, 0x6e, 0xb8, 0xaa, 0xe2, 0xc3, 0x3f, 0x12, 0xbc, 0xfa, + 0x44, 0xf3, 0x7c, 0x15, 0x53, 0x6c, 0x81, 0x54, 0xec, 0x52, 0xaa, 0x21, 0x1d, 0x19, 0x3d, 0xfb, + 0xb5, 0x59, 0xb5, 0x30, 0x9f, 0x8f, 0xeb, 0xef, 0x62, 0x97, 0x52, 0xc2, 0x83, 0xd8, 0x80, 0xb3, + 0x70, 0xbd, 0xcd, 0x0b, 0x9a, 0x5d, 0xd3, 0x07, 0xba, 0x26, 0xab, 0xef, 0x1a, 0xe8, 0xc8, 0x68, + 0x91, 0xe3, 0x31, 0x56, 0x41, 0xfc, 0x4a, 0x77, 0x9a, 0xa0, 0x23, 0x43, 0x21, 0xa5, 0xc4, 0x6f, + 0xa0, 0x5d, 0xf5, 0xd6, 0x44, 0x1d, 0x19, 0xb2, 0x7d, 0x6e, 0xd6, 0xd7, 0x08, 0x4c, 0xc2, 0x15, + 0x79, 0x0e, 0xe0, 0x77, 0x20, 0x87, 0x6b, 0x96, 0xd3, 0x6c, 0x4e, 0x69, 0x96, 0x6b, 0xa7, 0xba, + 0x68, 0xc8, 0xf6, 0xc5, 0x71, 0xbd, 0xf2, 0x70, 0x22, 0x3d, 0x3e, 0x5d, 0x9e, 0x90, 0x66, 0x1c, + 0x7f, 0x80, 0x6e, 0x9a, 0xb1, 0x87, 0x24, 0xaa, 0xf9, 0xce, 0x8b, 0xfc, 0x21, 0xd0, 0xff, 0x81, + 0x40, 0x2a, 0x15, 0x1e, 0x82, 0x90, 0x44, 0x7c, 0x3d, 0xca, 0x04, 0x97, 0xc9, 0x5f, 0x4f, 0x97, + 0x10, 0xec, 0x0a, 0x7a, 0x53, 0x64, 0xc9, 0x26, 0x26, 0x42, 0x12, 0xe1, 0x0b, 0x68, 0xad, 0xa2, + 0x28, 0xcb, 0x35, 0x41, 0x17, 0x0d, 0x85, 0x54, 0x06, 0xbf, 0x07, 0x08, 0xd9, 0x66, 0x43, 0xc3, + 0x22, 0x61, 0x1b, 0x7e, 0xe3, 0x9e, 0x3d, 0x38, 0x6e, 0xe0, 0xfc, 0x4b, 0xf0, 0x1d, 0x37, 0x88, + 0x61, 0x02, 0x72, 0x63, 0xfd, 0xb8, 0x0b, 0x9d, 0xf9, 0xed, 0x62, 0x79, 0x37, 0xbe, 0xbe, 0xf5, + 0xd4, 0x93, 0xd2, 0x5e, 0x79, 0xb5, 0x45, 0x58, 0x05, 0x65, 0xec, 0xba, 0xcb, 0x39, 0x99, 0xdd, + 0x4d, 0x5d, 0x8f, 0xa8, 0x02, 0x3e, 0x87, 0x6e, 0x19, 0xa8, 0x27, 0x37, 0xaa, 0x58, 0x32, 0x1f, + 0xa7, 0xbe, 0xbb, 0xf4, 0x67, 0xae, 0xa7, 0x4a, 0xf8, 0x14, 0xa4, 0xf9, 0xd4, 0xbf, 0x52, 0x5b, + 0xc3, 0xcf, 0xd0, 0x3b, 0x2c, 0x52, 0xd2, 0xfe, 0x6c, 0xb1, 0x74, 0x66, 0xbe, 0xef, 0x39, 0x0b, + 0xcf, 0xad, 0xfe, 0xf8, 0xdf, 0x22, 0x7c, 0x06, 0xb2, 0x33, 0xf6, 0xeb, 0x84, 0x2a, 0x60, 0x0c, + 0x3d, 0x67, 0xec, 0x37, 0x28, 0x55, 0x9c, 0x28, 0x8f, 0xfb, 0x01, 0xfa, 0xb9, 0x1f, 0xa0, 0xdf, + 0xfb, 0x01, 0x0a, 0xda, 0xfc, 0xfd, 0xbd, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x1a, 0xa1, + 0xbe, 0xf7, 0x02, 0x00, 0x00, +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ClusterLevelRaw != 0 { + i = encodeVarintDht(dAtA, i, uint64(m.ClusterLevelRaw)) + i-- + dAtA[i] = 0x50 + } + if len(m.ProviderPeers) > 0 { + for iNdEx := len(m.ProviderPeers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ProviderPeers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDht(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.CloserPeers) > 0 { + for iNdEx := len(m.CloserPeers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CloserPeers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDht(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.Record != nil { + { + size, err := m.Record.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDht(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintDht(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintDht(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Message_Peer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_Peer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Peer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Connection != 0 { + i = encodeVarintDht(dAtA, i, uint64(m.Connection)) + i-- + dAtA[i] = 0x18 + } + if len(m.Addrs) > 0 { + for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addrs[iNdEx]) + copy(dAtA[i:], m.Addrs[iNdEx]) + i = encodeVarintDht(dAtA, i, uint64(len(m.Addrs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + { + size := m.Id.Size() + i -= size + if _, err := m.Id.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintDht(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintDht(dAtA []byte, offset int, v uint64) int { + offset -= sovDht(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovDht(uint64(m.Type)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovDht(uint64(l)) + } + if m.Record != nil { + l = m.Record.Size() + n += 1 + l + sovDht(uint64(l)) + } + if len(m.CloserPeers) > 0 { + for _, e := range m.CloserPeers { + l = e.Size() + n += 1 + l + sovDht(uint64(l)) + } + } + if len(m.ProviderPeers) > 0 { + for _, e := range m.ProviderPeers { + l = e.Size() + n += 1 + l + sovDht(uint64(l)) + } + } + if m.ClusterLevelRaw != 0 { + n += 1 + sovDht(uint64(m.ClusterLevelRaw)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message_Peer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Id.Size() + n += 1 + l + sovDht(uint64(l)) + if len(m.Addrs) > 0 { + for _, b := range m.Addrs { + l = len(b) + n += 1 + l + sovDht(uint64(l)) + } + } + if m.Connection != 0 { + n += 1 + sovDht(uint64(m.Connection)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovDht(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDht(x uint64) (n int) { + return sovDht(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Message_MessageType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDht + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDht + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDht + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDht + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Record == nil { + m.Record = &pb.Record{} + } + if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloserPeers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDht + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDht + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloserPeers = append(m.CloserPeers, Message_Peer{}) + if err := m.CloserPeers[len(m.CloserPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderPeers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDht + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDht + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderPeers = append(m.ProviderPeers, Message_Peer{}) + if err := m.ProviderPeers[len(m.ProviderPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterLevelRaw", wireType) + } + m.ClusterLevelRaw = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterLevelRaw |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDht(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDht + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message_Peer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Peer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDht + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDht + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDht + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDht + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx)) + copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType) + } + m.Connection = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Connection |= Message_ConnectionType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDht(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDht + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDht(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDht + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDht + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDht + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDht + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDht + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDht + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDht = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDht = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDht = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go-libp2p-kad-dht/pb/dht.proto b/go-libp2p-kad-dht/pb/dht.proto new file mode 100644 index 0000000..18bfd74 --- /dev/null +++ b/go-libp2p-kad-dht/pb/dht.proto @@ -0,0 +1,72 @@ +// In order to re-generate the golang packages for `Message` you will need... +// 1. Protobuf binary (tested with protoc 3.0.0). - https://github.com/gogo/protobuf/releases +// 2. Gogo Protobuf (tested with gogo 0.3). - https://github.com/gogo/protobuf +// 3. To have cloned `libp2p/go-libp2p-{record,kad-dht}` under the same directory. +// Now from `libp2p/go-libp2p-kad-dht/pb` you can run... +// `protoc --gogo_out=. --proto_path=../../go-libp2p-record/pb/ --proto_path=./ dht.proto` + +syntax = "proto3"; +package dht.pb; + +import "github.com/libp2p/go-libp2p-record/pb/record.proto"; +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message Message { + enum MessageType { + PUT_VALUE = 0; + GET_VALUE = 1; + ADD_PROVIDER = 2; + GET_PROVIDERS = 3; + FIND_NODE = 4; + PING = 5; + } + + enum ConnectionType { + // sender does not have a connection to peer, and no extra information (default) + NOT_CONNECTED = 0; + + // sender has a live connection to peer + CONNECTED = 1; + + // sender recently connected to peer + CAN_CONNECT = 2; + + // sender recently tried to connect to peer repeatedly but failed to connect + // ("try" here is loose, but this should signal "made strong effort, failed") + CANNOT_CONNECT = 3; + } + + message Peer { + // ID of a given peer. + bytes id = 1 [(gogoproto.customtype) = "byteString", (gogoproto.nullable) = false]; + + // multiaddrs for a given peer + repeated bytes addrs = 2; + + // used to signal the sender's connection capabilities to the peer + ConnectionType connection = 3; + } + + // defines what type of message it is. + MessageType type = 1; + + // defines what coral cluster level this query/response belongs to. + // in case we want to implement coral's cluster rings in the future. + int32 clusterLevelRaw = 10; + + // Used to specify the key associated with this message. + // PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS + bytes key = 2; + + // Used to return a value + // PUT_VALUE, GET_VALUE + record.pb.Record record = 3; + + // Used to return peers closer to a key in a query + // GET_VALUE, GET_PROVIDERS, FIND_NODE + repeated Peer closerPeers = 8 [(gogoproto.nullable) = false]; + + // Used to return Providers + // GET_VALUE, ADD_PROVIDER, GET_PROVIDERS + repeated Peer providerPeers = 9 [(gogoproto.nullable) = false]; +} diff --git a/go-libp2p-kad-dht/pb/message.go b/go-libp2p-kad-dht/pb/message.go new file mode 100644 index 0000000..2e4f3d6 --- /dev/null +++ b/go-libp2p-kad-dht/pb/message.go @@ -0,0 +1,171 @@ +package dht_pb + +import ( + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + + logging "github.com/ipfs/go-log/v2" + ma "github.com/multiformats/go-multiaddr" +) + +var log = logging.Logger("dht.pb") + +type PeerRoutingInfo struct { + peer.AddrInfo + network.Connectedness +} + +// NewMessage constructs a new dht message with given type, key, and level +func NewMessage(typ Message_MessageType, key []byte, level int) *Message { + m := &Message{ + Type: typ, + Key: key, + } + m.SetClusterLevel(level) + return m +} + +func peerRoutingInfoToPBPeer(p PeerRoutingInfo) Message_Peer { + var pbp Message_Peer + + pbp.Addrs = make([][]byte, len(p.Addrs)) + for i, maddr := range p.Addrs { + pbp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed. + } + pbp.Id = byteString(p.ID) + pbp.Connection = ConnectionType(p.Connectedness) + return pbp +} + +func peerInfoToPBPeer(p peer.AddrInfo) Message_Peer { + var pbp Message_Peer + + pbp.Addrs = make([][]byte, len(p.Addrs)) + for i, maddr := range p.Addrs { + pbp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed. + } + pbp.Id = byteString(p.ID) + return pbp +} + +// PBPeerToPeer turns a *Message_Peer into its peer.AddrInfo counterpart +func PBPeerToPeerInfo(pbp Message_Peer) peer.AddrInfo { + return peer.AddrInfo{ + ID: peer.ID(pbp.Id), + Addrs: pbp.Addresses(), + } +} + +// RawPeerInfosToPBPeers converts a slice of Peers into a slice of *Message_Peers, +// ready to go out on the wire. +func RawPeerInfosToPBPeers(peers []peer.AddrInfo) []Message_Peer { + pbpeers := make([]Message_Peer, len(peers)) + for i, p := range peers { + pbpeers[i] = peerInfoToPBPeer(p) + } + return pbpeers +} + +// PeersToPBPeers converts given []peer.Peer into a set of []*Message_Peer, +// which can be written to a message and sent out. the key thing this function +// does (in addition to PeersToPBPeers) is set the ConnectionType with +// information from the given network.Network. +func PeerInfosToPBPeers(n network.Network, peers []peer.AddrInfo) []Message_Peer { + pbps := RawPeerInfosToPBPeers(peers) + for i, pbp := range pbps { + c := ConnectionType(n.Connectedness(peers[i].ID)) + pbp.Connection = c + } + return pbps +} + +func PeerRoutingInfosToPBPeers(peers []PeerRoutingInfo) []Message_Peer { + pbpeers := make([]Message_Peer, len(peers)) + for i, p := range peers { + pbpeers[i] = peerRoutingInfoToPBPeer(p) + } + return pbpeers +} + +// PBPeersToPeerInfos converts given []*Message_Peer into []peer.AddrInfo +// Invalid addresses will be silently omitted. +func PBPeersToPeerInfos(pbps []Message_Peer) []*peer.AddrInfo { + peers := make([]*peer.AddrInfo, 0, len(pbps)) + for _, pbp := range pbps { + ai := PBPeerToPeerInfo(pbp) + peers = append(peers, &ai) + } + return peers +} + +// Addresses returns a multiaddr associated with the Message_Peer entry +func (m *Message_Peer) Addresses() []ma.Multiaddr { + if m == nil { + return nil + } + + maddrs := make([]ma.Multiaddr, 0, len(m.Addrs)) + for _, addr := range m.Addrs { + maddr, err := ma.NewMultiaddrBytes(addr) + if err != nil { + log.Debugw("error decoding multiaddr for peer", "peer", peer.ID(m.Id), "error", err) + continue + } + + maddrs = append(maddrs, maddr) + } + return maddrs +} + +// GetClusterLevel gets and adjusts the cluster level on the message. +// a +/- 1 adjustment is needed to distinguish a valid first level (1) and +// default "no value" protobuf behavior (0) +func (m *Message) GetClusterLevel() int { + level := m.GetClusterLevelRaw() - 1 + if level < 0 { + return 0 + } + return int(level) +} + +// SetClusterLevel adjusts and sets the cluster level on the message. +// a +/- 1 adjustment is needed to distinguish a valid first level (1) and +// default "no value" protobuf behavior (0) +func (m *Message) SetClusterLevel(level int) { + lvl := int32(level) + m.ClusterLevelRaw = lvl + 1 +} + +// ConnectionType returns a Message_ConnectionType associated with the +// network.Connectedness. +func ConnectionType(c network.Connectedness) Message_ConnectionType { + switch c { + default: + return Message_NOT_CONNECTED + case network.NotConnected: + return Message_NOT_CONNECTED + case network.Connected: + return Message_CONNECTED + case network.CanConnect: + return Message_CAN_CONNECT + case network.CannotConnect: + return Message_CANNOT_CONNECT + } +} + +// Connectedness returns an network.Connectedness associated with the +// Message_ConnectionType. +func Connectedness(c Message_ConnectionType) network.Connectedness { + switch c { + default: + return network.NotConnected + case Message_NOT_CONNECTED: + return network.NotConnected + case Message_CONNECTED: + return network.Connected + case Message_CAN_CONNECT: + return network.CanConnect + case Message_CANNOT_CONNECT: + return network.CannotConnect + } +} diff --git a/go-libp2p-kad-dht/pb/message_test.go b/go-libp2p-kad-dht/pb/message_test.go new file mode 100644 index 0000000..71f4abd --- /dev/null +++ b/go-libp2p-kad-dht/pb/message_test.go @@ -0,0 +1,15 @@ +package dht_pb + +import ( + "testing" +) + +func TestBadAddrsDontReturnNil(t *testing.T) { + mp := new(Message_Peer) + mp.Addrs = [][]byte{[]byte("NOT A VALID MULTIADDR")} + + addrs := mp.Addresses() + if len(addrs) > 0 { + t.Fatal("shouldnt have any multiaddrs") + } +} diff --git a/go-libp2p-kad-dht/pb/protocol_messenger.go b/go-libp2p-kad-dht/pb/protocol_messenger.go new file mode 100644 index 0000000..7971db6 --- /dev/null +++ b/go-libp2p-kad-dht/pb/protocol_messenger.go @@ -0,0 +1,261 @@ +package dht_pb + +import ( + "bytes" + "context" + "errors" + "fmt" + + logging "github.com/ipfs/go-log/v2" + recpb "github.com/libp2p/go-libp2p-record/pb" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multihash" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + + "github.com/libp2p/go-libp2p-kad-dht/internal" +) + +var logger = logging.Logger("dht") + +// ProtocolMessenger can be used for sending DHT messages to peers and processing their responses. +// This decouples the wire protocol format from both the DHT protocol implementation and from the implementation of the +// routing.Routing interface. +// +// Note: the ProtocolMessenger's MessageSender still needs to deal with some wire protocol details such as using +// varint-delineated protobufs +type ProtocolMessenger struct { + m MessageSender +} + +type ProtocolMessengerOption func(*ProtocolMessenger) error + +// NewProtocolMessenger creates a new ProtocolMessenger that is used for sending DHT messages to peers and processing +// their responses. +func NewProtocolMessenger(msgSender MessageSender, opts ...ProtocolMessengerOption) (*ProtocolMessenger, error) { + pm := &ProtocolMessenger{ + m: msgSender, + } + + for _, o := range opts { + if err := o(pm); err != nil { + return nil, err + } + } + + return pm, nil +} + +type MessageSenderWithDisconnect interface { + MessageSender + + OnDisconnect(context.Context, peer.ID) +} + +// MessageSender handles sending wire protocol messages to a given peer +type MessageSender interface { + // SendRequest sends a peer a message and waits for its response + SendRequest(ctx context.Context, p peer.ID, pmes *Message) (*Message, error) + // SendMessage sends a peer a message without waiting on a response + SendMessage(ctx context.Context, p peer.ID, pmes *Message) error +} + +// PutValue asks a peer to store the given key/value pair. +func (pm *ProtocolMessenger) PutValue(ctx context.Context, p peer.ID, rec *recpb.Record) (err error) { + ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.PutValue") + defer span.End() + if span.IsRecording() { + span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("record", rec)) + defer func() { + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } + }() + } + + pmes := NewMessage(Message_PUT_VALUE, rec.Key, 0) + pmes.Record = rec + rpmes, err := pm.m.SendRequest(ctx, p, pmes) + if err != nil { + logger.Debugw("failed to put value to peer", "to", p, "key", internal.LoggableRecordKeyBytes(rec.Key), "error", err) + return err + } + + if !bytes.Equal(rpmes.GetRecord().Value, pmes.GetRecord().Value) { + const errStr = "value not put correctly" + logger.Infow(errStr, "put-message", pmes, "get-message", rpmes) + return errors.New(errStr) + } + + return nil +} + +// GetValue asks a peer for the value corresponding to the given key. Also returns the K closest peers to the key +// as described in GetClosestPeers. +func (pm *ProtocolMessenger) GetValue(ctx context.Context, p peer.ID, key string) (record *recpb.Record, closerPeers []*peer.AddrInfo, err error) { + ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.GetValue") + defer span.End() + if span.IsRecording() { + span.SetAttributes(attribute.Stringer("to", p), internal.KeyAsAttribute("key", key)) + defer func() { + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } else { + peers := make([]string, len(closerPeers)) + for i, v := range closerPeers { + peers[i] = v.String() + } + span.SetAttributes( + attribute.Stringer("record", record), + attribute.StringSlice("closestPeers", peers), + ) + } + }() + } + + pmes := NewMessage(Message_GET_VALUE, []byte(key), 0) + respMsg, err := pm.m.SendRequest(ctx, p, pmes) + if err != nil { + return nil, nil, err + } + + // Perhaps we were given closer peers + peers := PBPeersToPeerInfos(respMsg.GetCloserPeers()) + + if rec := respMsg.GetRecord(); rec != nil { + // Success! We were given the value + logger.Debug("got value") + + // Check that record matches the one we are looking for (validation of the record does not happen here) + if !bytes.Equal([]byte(key), rec.GetKey()) { + logger.Debug("received incorrect record") + return nil, nil, internal.ErrIncorrectRecord + } + + return rec, peers, err + } + + return nil, peers, nil +} + +// GetClosestPeers asks a peer to return the K (a DHT-wide parameter) DHT server peers closest in XOR space to the id +// Note: If the peer happens to know another peer whose peerID exactly matches the given id it will return that peer +// even if that peer is not a DHT server node. +func (pm *ProtocolMessenger) GetClosestPeers(ctx context.Context, p peer.ID, id peer.ID) (closerPeers []*peer.AddrInfo, err error) { + ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.GetClosestPeers") + defer span.End() + if span.IsRecording() { + span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("key", id)) + defer func() { + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } else { + peers := make([]string, len(closerPeers)) + for i, v := range closerPeers { + peers[i] = v.String() + } + span.SetAttributes(attribute.StringSlice("peers", peers)) + } + }() + } + + pmes := NewMessage(Message_FIND_NODE, []byte(id), 0) + respMsg, err := pm.m.SendRequest(ctx, p, pmes) + if err != nil { + return nil, err + } + peers := PBPeersToPeerInfos(respMsg.GetCloserPeers()) + return peers, nil +} + +// PutProvider is deprecated please use [ProtocolMessenger.PutProviderAddrs]. +func (pm *ProtocolMessenger) PutProvider(ctx context.Context, p peer.ID, key multihash.Multihash, h host.Host) error { + return pm.PutProviderAddrs(ctx, p, key, peer.AddrInfo{ + ID: h.ID(), + Addrs: h.Addrs(), + }) +} + +// PutProviderAddrs asks a peer to store that we are a provider for the given key. +func (pm *ProtocolMessenger) PutProviderAddrs(ctx context.Context, p peer.ID, key multihash.Multihash, self peer.AddrInfo) (err error) { + ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.PutProvider") + defer span.End() + if span.IsRecording() { + span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("key", key)) + defer func() { + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } + }() + } + + // TODO: We may want to limit the type of addresses in our provider records + // For example, in a WAN-only DHT prohibit sharing non-WAN addresses (e.g. 192.168.0.100) + if len(self.Addrs) < 1 { + return fmt.Errorf("no known addresses for self, cannot put provider") + } + + pmes := NewMessage(Message_ADD_PROVIDER, key, 0) + pmes.ProviderPeers = RawPeerInfosToPBPeers([]peer.AddrInfo{self}) + + return pm.m.SendMessage(ctx, p, pmes) +} + +// GetProviders asks a peer for the providers it knows of for a given key. Also returns the K closest peers to the key +// as described in GetClosestPeers. +func (pm *ProtocolMessenger) GetProviders(ctx context.Context, p peer.ID, key multihash.Multihash) (provs []*peer.AddrInfo, closerPeers []*peer.AddrInfo, err error) { + ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.GetProviders") + defer span.End() + if span.IsRecording() { + span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("key", key)) + defer func() { + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } else { + provsStr := make([]string, len(provs)) + for i, v := range provs { + provsStr[i] = v.String() + } + closerPeersStr := make([]string, len(provs)) + for i, v := range provs { + closerPeersStr[i] = v.String() + } + span.SetAttributes(attribute.StringSlice("provs", provsStr), attribute.StringSlice("closestPeers", closerPeersStr)) + } + }() + } + + pmes := NewMessage(Message_GET_PROVIDERS, key, 0) + respMsg, err := pm.m.SendRequest(ctx, p, pmes) + if err != nil { + return nil, nil, err + } + provs = PBPeersToPeerInfos(respMsg.GetProviderPeers()) + closerPeers = PBPeersToPeerInfos(respMsg.GetCloserPeers()) + return provs, closerPeers, nil +} + +// Ping sends a ping message to the passed peer and waits for a response. +func (pm *ProtocolMessenger) Ping(ctx context.Context, p peer.ID) (err error) { + ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.Ping") + defer span.End() + if span.IsRecording() { + span.SetAttributes(attribute.Stringer("to", p)) + defer func() { + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } + }() + } + + req := NewMessage(Message_PING, nil, 0) + resp, err := pm.m.SendRequest(ctx, p, req) + if err != nil { + return fmt.Errorf("sending request: %w", err) + } + if resp.Type != Message_PING { + return fmt.Errorf("got unexpected response type: %v", resp.Type) + } + return nil +} diff --git a/go-libp2p-kad-dht/protocol.go b/go-libp2p-kad-dht/protocol.go new file mode 100644 index 0000000..5e1334d --- /dev/null +++ b/go-libp2p-kad-dht/protocol.go @@ -0,0 +1,12 @@ +package dht + +import ( + "github.com/libp2p/go-libp2p/core/protocol" +) + +var ( + // ProtocolDHT is the default DHT protocol. + ProtocolDHT protocol.ID = "/ipfs/kad/1.0.0" + // DefaultProtocols spoken by the DHT. + DefaultProtocols = []protocol.ID{ProtocolDHT} +) diff --git a/go-libp2p-kad-dht/providers/provider_set.go b/go-libp2p-kad-dht/providers/provider_set.go new file mode 100644 index 0000000..d1745ef --- /dev/null +++ b/go-libp2p-kad-dht/providers/provider_set.go @@ -0,0 +1,34 @@ +package providers + +import ( + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// A providerSet has the list of providers and the time that they were added +// It is used as an intermediary data struct between what is stored in the datastore +// and the list of providers that get passed to the consumer of a .GetProviders call +type providerSet struct { + providers []peer.ID + set map[peer.ID]time.Time +} + +func newProviderSet() *providerSet { + return &providerSet{ + set: make(map[peer.ID]time.Time), + } +} + +func (ps *providerSet) Add(p peer.ID) { + ps.setVal(p, time.Now()) +} + +func (ps *providerSet) setVal(p peer.ID, t time.Time) { + _, found := ps.set[p] + if !found { + ps.providers = append(ps.providers, p) + } + + ps.set[p] = t +} diff --git a/go-libp2p-kad-dht/providers/providers_manager.go b/go-libp2p-kad-dht/providers/providers_manager.go new file mode 100644 index 0000000..1400dc7 --- /dev/null +++ b/go-libp2p-kad-dht/providers/providers_manager.go @@ -0,0 +1,412 @@ +package providers + +import ( + "context" + "encoding/binary" + "fmt" + "io" + "strings" + "sync" + "time" + + lru "github.com/hashicorp/golang-lru/simplelru" + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/autobatch" + dsq "github.com/ipfs/go-datastore/query" + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p-kad-dht/internal" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + peerstoreImpl "github.com/libp2p/go-libp2p/p2p/host/peerstore" + "github.com/multiformats/go-base32" +) + +const ( + // ProvidersKeyPrefix is the prefix/namespace for ALL provider record + // keys stored in the data store. + ProvidersKeyPrefix = "/providers/" + + // ProviderAddrTTL is the TTL to keep the multi addresses of provider + // peers around. Those addresses are returned alongside provider. After + // it expires, the returned records will require an extra lookup, to + // find the multiaddress associated with the returned peer id. + ProviderAddrTTL = 24 * time.Hour +) + +// ProvideValidity is the default time that a Provider Record should last on DHT +// This value is also known as Provider Record Expiration Interval. +var ProvideValidity = time.Hour * 48 +var defaultCleanupInterval = time.Hour +var lruCacheSize = 256 +var batchBufferSize = 256 +var log = logging.Logger("providers") + +// ProviderStore represents a store that associates peers and their addresses to keys. +type ProviderStore interface { + AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error + GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error) + io.Closer +} + +// ProviderManager adds and pulls providers out of the datastore, +// caching them in between +type ProviderManager struct { + self peer.ID + // all non channel fields are meant to be accessed only within + // the run method + cache lru.LRUCache + pstore peerstore.Peerstore + dstore *autobatch.Datastore + + newprovs chan *addProv + getprovs chan *getProv + + cleanupInterval time.Duration + + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup +} + +var _ ProviderStore = (*ProviderManager)(nil) + +// Option is a function that sets a provider manager option. +type Option func(*ProviderManager) error + +func (pm *ProviderManager) applyOptions(opts ...Option) error { + for i, opt := range opts { + if err := opt(pm); err != nil { + return fmt.Errorf("provider manager option %d failed: %s", i, err) + } + } + return nil +} + +// CleanupInterval sets the time between GC runs. +// Defaults to 1h. +func CleanupInterval(d time.Duration) Option { + return func(pm *ProviderManager) error { + pm.cleanupInterval = d + return nil + } +} + +// Cache sets the LRU cache implementation. +// Defaults to a simple LRU cache. +func Cache(c lru.LRUCache) Option { + return func(pm *ProviderManager) error { + pm.cache = c + return nil + } +} + +type addProv struct { + ctx context.Context + key []byte + val peer.ID +} + +type getProv struct { + ctx context.Context + key []byte + resp chan []peer.ID +} + +// NewProviderManager constructor +func NewProviderManager(local peer.ID, ps peerstore.Peerstore, dstore ds.Batching, opts ...Option) (*ProviderManager, error) { + pm := new(ProviderManager) + pm.self = local + pm.getprovs = make(chan *getProv) + pm.newprovs = make(chan *addProv) + pm.pstore = ps + pm.dstore = autobatch.NewAutoBatching(dstore, batchBufferSize) + cache, err := lru.NewLRU(lruCacheSize, nil) + if err != nil { + return nil, err + } + pm.cache = cache + pm.cleanupInterval = defaultCleanupInterval + if err := pm.applyOptions(opts...); err != nil { + return nil, err + } + pm.ctx, pm.cancel = context.WithCancel(context.Background()) + pm.run() + return pm, nil +} + +func (pm *ProviderManager) run() { + pm.wg.Add(1) + go func() { + defer pm.wg.Done() + + var gcQuery dsq.Results + gcTimer := time.NewTimer(pm.cleanupInterval) + + defer func() { + gcTimer.Stop() + if gcQuery != nil { + // don't really care if this fails. + _ = gcQuery.Close() + } + if err := pm.dstore.Flush(context.Background()); err != nil { + log.Error("failed to flush datastore: ", err) + } + }() + + var gcQueryRes <-chan dsq.Result + var gcSkip map[string]struct{} + var gcTime time.Time + for { + select { + case np := <-pm.newprovs: + err := pm.addProv(np.ctx, np.key, np.val) + if err != nil { + log.Error("error adding new providers: ", err) + continue + } + if gcSkip != nil { + // we have an gc, tell it to skip this provider + // as we've updated it since the GC started. + gcSkip[mkProvKeyFor(np.key, np.val)] = struct{}{} + } + case gp := <-pm.getprovs: + provs, err := pm.getProvidersForKey(gp.ctx, gp.key) + if err != nil && err != ds.ErrNotFound { + log.Error("error reading providers: ", err) + } + + // set the cap so the user can't append to this. + gp.resp <- provs[0:len(provs):len(provs)] + case res, ok := <-gcQueryRes: + if !ok { + if err := gcQuery.Close(); err != nil { + log.Error("failed to close provider GC query: ", err) + } + gcTimer.Reset(pm.cleanupInterval) + + // cleanup GC round + gcQueryRes = nil + gcSkip = nil + gcQuery = nil + continue + } + if res.Error != nil { + log.Error("got error from GC query: ", res.Error) + continue + } + if _, ok := gcSkip[res.Key]; ok { + // We've updated this record since starting the + // GC round, skip it. + continue + } + + // check expiration time + t, err := readTimeValue(res.Value) + switch { + case err != nil: + // couldn't parse the time + log.Error("parsing providers record from disk: ", err) + fallthrough + case gcTime.Sub(t) > ProvideValidity: + // or expired + err = pm.dstore.Delete(pm.ctx, ds.RawKey(res.Key)) + if err != nil && err != ds.ErrNotFound { + log.Error("failed to remove provider record from disk: ", err) + } + } + + case gcTime = <-gcTimer.C: + // You know the wonderful thing about caches? You can + // drop them. + // + // Much faster than GCing. + pm.cache.Purge() + + // Now, kick off a GC of the datastore. + q, err := pm.dstore.Query(pm.ctx, dsq.Query{ + Prefix: ProvidersKeyPrefix, + }) + if err != nil { + log.Error("provider record GC query failed: ", err) + continue + } + gcQuery = q + gcQueryRes = q.Next() + gcSkip = make(map[string]struct{}) + case <-pm.ctx.Done(): + return + } + } + }() +} + +func (pm *ProviderManager) Close() error { + pm.cancel() + pm.wg.Wait() + return nil +} + +// AddProvider adds a provider +func (pm *ProviderManager) AddProvider(ctx context.Context, k []byte, provInfo peer.AddrInfo) error { + ctx, span := internal.StartSpan(ctx, "ProviderManager.AddProvider") + defer span.End() + + if provInfo.ID != pm.self { // don't add own addrs. + pm.pstore.AddAddrs(provInfo.ID, provInfo.Addrs, ProviderAddrTTL) + } + prov := &addProv{ + ctx: ctx, + key: k, + val: provInfo.ID, + } + select { + case pm.newprovs <- prov: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// addProv updates the cache if needed +func (pm *ProviderManager) addProv(ctx context.Context, k []byte, p peer.ID) error { + now := time.Now() + if provs, ok := pm.cache.Get(string(k)); ok { + provs.(*providerSet).setVal(p, now) + } // else not cached, just write through + + return writeProviderEntry(ctx, pm.dstore, k, p, now) +} + +// writeProviderEntry writes the provider into the datastore +func writeProviderEntry(ctx context.Context, dstore ds.Datastore, k []byte, p peer.ID, t time.Time) error { + dsk := mkProvKeyFor(k, p) + + buf := make([]byte, 16) + n := binary.PutVarint(buf, t.UnixNano()) + + return dstore.Put(ctx, ds.NewKey(dsk), buf[:n]) +} + +func mkProvKeyFor(k []byte, p peer.ID) string { + return mkProvKey(k) + "/" + base32.RawStdEncoding.EncodeToString([]byte(p)) +} + +func mkProvKey(k []byte) string { + return ProvidersKeyPrefix + base32.RawStdEncoding.EncodeToString(k) +} + +// GetProviders returns the set of providers for the given key. +// This method _does not_ copy the set. Do not modify it. +func (pm *ProviderManager) GetProviders(ctx context.Context, k []byte) ([]peer.AddrInfo, error) { + ctx, span := internal.StartSpan(ctx, "ProviderManager.GetProviders") + defer span.End() + + gp := &getProv{ + ctx: ctx, + key: k, + resp: make(chan []peer.ID, 1), // buffered to prevent sender from blocking + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case pm.getprovs <- gp: + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case peers := <-gp.resp: + return peerstoreImpl.PeerInfos(pm.pstore, peers), nil + } +} + +func (pm *ProviderManager) getProvidersForKey(ctx context.Context, k []byte) ([]peer.ID, error) { + pset, err := pm.getProviderSetForKey(ctx, k) + if err != nil { + return nil, err + } + return pset.providers, nil +} + +// returns the ProviderSet if it already exists on cache, otherwise loads it from datasatore +func (pm *ProviderManager) getProviderSetForKey(ctx context.Context, k []byte) (*providerSet, error) { + cached, ok := pm.cache.Get(string(k)) + if ok { + return cached.(*providerSet), nil + } + + pset, err := loadProviderSet(ctx, pm.dstore, k) + if err != nil { + return nil, err + } + + if len(pset.providers) > 0 { + pm.cache.Add(string(k), pset) + } + + return pset, nil +} + +// loads the ProviderSet out of the datastore +func loadProviderSet(ctx context.Context, dstore ds.Datastore, k []byte) (*providerSet, error) { + res, err := dstore.Query(ctx, dsq.Query{Prefix: mkProvKey(k)}) + if err != nil { + return nil, err + } + defer res.Close() + + now := time.Now() + out := newProviderSet() + for { + e, ok := res.NextSync() + if !ok { + break + } + if e.Error != nil { + log.Error("got an error: ", e.Error) + continue + } + + // check expiration time + t, err := readTimeValue(e.Value) + switch { + case err != nil: + // couldn't parse the time + log.Error("parsing providers record from disk: ", err) + fallthrough + case now.Sub(t) > ProvideValidity: + // or just expired + err = dstore.Delete(ctx, ds.RawKey(e.Key)) + if err != nil && err != ds.ErrNotFound { + log.Error("failed to remove provider record from disk: ", err) + } + continue + } + + lix := strings.LastIndex(e.Key, "/") + + decstr, err := base32.RawStdEncoding.DecodeString(e.Key[lix+1:]) + if err != nil { + log.Error("base32 decoding error: ", err) + err = dstore.Delete(ctx, ds.RawKey(e.Key)) + if err != nil && err != ds.ErrNotFound { + log.Error("failed to remove provider record from disk: ", err) + } + continue + } + + pid := peer.ID(decstr) + + out.setVal(pid, t) + } + + return out, nil +} + +func readTimeValue(data []byte) (time.Time, error) { + nsec, n := binary.Varint(data) + if n <= 0 { + return time.Time{}, fmt.Errorf("failed to parse time") + } + + return time.Unix(0, nsec), nil +} diff --git a/go-libp2p-kad-dht/providers/providers_manager_test.go b/go-libp2p-kad-dht/providers/providers_manager_test.go new file mode 100644 index 0000000..e830929 --- /dev/null +++ b/go-libp2p-kad-dht/providers/providers_manager_test.go @@ -0,0 +1,366 @@ +package providers + +import ( + "context" + "fmt" + "io" + "os" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" + + mh "github.com/multiformats/go-multihash" + + u "github.com/ipfs/boxo/util" + ds "github.com/ipfs/go-datastore" + dsq "github.com/ipfs/go-datastore/query" + dssync "github.com/ipfs/go-datastore/sync" + // + // used by TestLargeProvidersSet: do not remove + // lds "github.com/ipfs/go-ds-leveldb" +) + +func TestProviderManager(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mid := peer.ID("testing") + ps, err := pstoremem.NewPeerstore() + if err != nil { + t.Fatal(err) + } + p, err := NewProviderManager(mid, ps, dssync.MutexWrap(ds.NewMapDatastore())) + if err != nil { + t.Fatal(err) + } + a := u.Hash([]byte("test")) + p.AddProvider(ctx, a, peer.AddrInfo{ID: peer.ID("testingprovider")}) + + // Not cached + // TODO verify that cache is empty + resp, _ := p.GetProviders(ctx, a) + if len(resp) != 1 { + t.Fatal("Could not retrieve provider.") + } + + // Cached + // TODO verify that cache is populated + resp, _ = p.GetProviders(ctx, a) + if len(resp) != 1 { + t.Fatal("Could not retrieve provider.") + } + + p.AddProvider(ctx, a, peer.AddrInfo{ID: peer.ID("testingprovider2")}) + p.AddProvider(ctx, a, peer.AddrInfo{ID: peer.ID("testingprovider3")}) + // TODO verify that cache is already up to date + resp, _ = p.GetProviders(ctx, a) + if len(resp) != 3 { + t.Fatalf("Should have got 3 providers, got %d", len(resp)) + } + + p.Close() +} + +func TestProvidersDatastore(t *testing.T) { + old := lruCacheSize + lruCacheSize = 10 + defer func() { lruCacheSize = old }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mid := peer.ID("testing") + ps, err := pstoremem.NewPeerstore() + if err != nil { + t.Fatal(err) + } + + p, err := NewProviderManager(mid, ps, dssync.MutexWrap(ds.NewMapDatastore())) + if err != nil { + t.Fatal(err) + } + defer p.Close() + + friend := peer.ID("friend") + var mhs []mh.Multihash + for i := 0; i < 100; i++ { + h := u.Hash([]byte(fmt.Sprint(i))) + mhs = append(mhs, h) + p.AddProvider(ctx, h, peer.AddrInfo{ID: friend}) + } + + for _, c := range mhs { + resp, _ := p.GetProviders(ctx, c) + if len(resp) != 1 { + t.Fatal("Could not retrieve provider.") + } + if resp[0].ID != friend { + t.Fatal("expected provider to be 'friend'") + } + } +} + +func TestProvidersSerialization(t *testing.T) { + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + + k := u.Hash(([]byte("my key!"))) + p1 := peer.ID("peer one") + p2 := peer.ID("peer two") + pt1 := time.Now() + pt2 := pt1.Add(time.Hour) + + err := writeProviderEntry(context.Background(), dstore, k, p1, pt1) + if err != nil { + t.Fatal(err) + } + + err = writeProviderEntry(context.Background(), dstore, k, p2, pt2) + if err != nil { + t.Fatal(err) + } + + pset, err := loadProviderSet(context.Background(), dstore, k) + if err != nil { + t.Fatal(err) + } + + lt1, ok := pset.set[p1] + if !ok { + t.Fatal("failed to load set correctly") + } + + if !pt1.Equal(lt1) { + t.Fatalf("time wasnt serialized correctly, %v != %v", pt1, lt1) + } + + lt2, ok := pset.set[p2] + if !ok { + t.Fatal("failed to load set correctly") + } + + if !pt2.Equal(lt2) { + t.Fatalf("time wasnt serialized correctly, %v != %v", pt1, lt1) + } +} + +func TestProvidesExpire(t *testing.T) { + t.Skip("This test is flaky, see https://github.com/libp2p/go-libp2p-kad-dht/issues/725.") + + pval := ProvideValidity + cleanup := defaultCleanupInterval + ProvideValidity = time.Second / 2 + defaultCleanupInterval = time.Second / 2 + defer func() { + ProvideValidity = pval + defaultCleanupInterval = cleanup + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ds := dssync.MutexWrap(ds.NewMapDatastore()) + mid := peer.ID("testing") + ps, err := pstoremem.NewPeerstore() + if err != nil { + t.Fatal(err) + } + p, err := NewProviderManager(mid, ps, ds) + if err != nil { + t.Fatal(err) + } + + peers := []peer.ID{"a", "b"} + var mhs []mh.Multihash + for i := 0; i < 10; i++ { + h := u.Hash([]byte(fmt.Sprint(i))) + mhs = append(mhs, h) + } + + for _, h := range mhs[:5] { + p.AddProvider(ctx, h, peer.AddrInfo{ID: peers[0]}) + p.AddProvider(ctx, h, peer.AddrInfo{ID: peers[1]}) + } + + time.Sleep(time.Second / 4) + + for _, h := range mhs[5:] { + p.AddProvider(ctx, h, peer.AddrInfo{ID: peers[0]}) + p.AddProvider(ctx, h, peer.AddrInfo{ID: peers[1]}) + } + + for _, h := range mhs { + out, _ := p.GetProviders(ctx, h) + if len(out) != 2 { + t.Fatal("expected providers to still be there") + } + } + + time.Sleep(3 * time.Second / 8) + + for _, h := range mhs[:5] { + out, _ := p.GetProviders(ctx, h) + if len(out) > 0 { + t.Fatal("expected providers to be cleaned up, got: ", out) + } + } + + for _, h := range mhs[5:] { + out, _ := p.GetProviders(ctx, h) + if len(out) != 2 { + t.Fatal("expected providers to still be there") + } + } + + time.Sleep(time.Second / 2) + + // Stop to prevent data races + p.Close() + + if p.cache.Len() != 0 { + t.Fatal("providers map not cleaned up") + } + + res, err := ds.Query(context.Background(), dsq.Query{Prefix: ProvidersKeyPrefix}) + if err != nil { + t.Fatal(err) + } + rest, err := res.Rest() + if err != nil { + t.Fatal(err) + } + if len(rest) > 0 { + t.Fatal("expected everything to be cleaned out of the datastore") + } +} + +var _ = io.NopCloser +var _ = os.DevNull + +// TestLargeProvidersSet can be used for profiling. +// The datastore can be switched to levelDB by uncommenting the section below and the import above +func TestLargeProvidersSet(t *testing.T) { + t.Skip("This can be used for profiling. Skipping it for now to avoid incurring extra CI time") + old := lruCacheSize + lruCacheSize = 10 + defer func() { lruCacheSize = old }() + + dstore := ds.NewMapDatastore() + + //dirn, err := os.MkdirTemp("", "provtest") + // t.Fatal(err) + // } + // + // opts := &lds.Options{ + // NoSync: true, + // Compression: 1, + // } + // lds, err := lds.NewDatastore(dirn, opts) + // if err != nil { + // t.Fatal(err) + // } + // dstore = lds + // + // defer func() { + // os.RemoveAll(dirn) + // }() + + ctx := context.Background() + var peers []peer.ID + for i := 0; i < 3000; i++ { + peers = append(peers, peer.ID(fmt.Sprint(i))) + } + + mid := peer.ID("myself") + ps, err := pstoremem.NewPeerstore() + if err != nil { + t.Fatal(err) + } + + p, err := NewProviderManager(mid, ps, dstore) + if err != nil { + t.Fatal(err) + } + defer p.Close() + + var mhs []mh.Multihash + for i := 0; i < 1000; i++ { + h := u.Hash([]byte(fmt.Sprint(i))) + mhs = append(mhs, h) + for _, pid := range peers { + p.AddProvider(ctx, h, peer.AddrInfo{ID: pid}) + } + } + + for i := 0; i < 5; i++ { + start := time.Now() + for _, h := range mhs { + _, _ = p.GetProviders(ctx, h) + } + elapsed := time.Since(start) + fmt.Printf("query %f ms\n", elapsed.Seconds()*1000) + } +} + +func TestUponCacheMissProvidersAreReadFromDatastore(t *testing.T) { + old := lruCacheSize + lruCacheSize = 1 + defer func() { lruCacheSize = old }() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + p1, p2 := peer.ID("a"), peer.ID("b") + h1 := u.Hash([]byte("1")) + h2 := u.Hash([]byte("2")) + ps, err := pstoremem.NewPeerstore() + if err != nil { + t.Fatal(err) + } + + pm, err := NewProviderManager(p1, ps, dssync.MutexWrap(ds.NewMapDatastore())) + if err != nil { + t.Fatal(err) + } + + // add provider + pm.AddProvider(ctx, h1, peer.AddrInfo{ID: p1}) + // make the cached provider for h1 go to datastore + pm.AddProvider(ctx, h2, peer.AddrInfo{ID: p1}) + // now just offloaded record should be brought back and joined with p2 + pm.AddProvider(ctx, h1, peer.AddrInfo{ID: p2}) + + h1Provs, _ := pm.GetProviders(ctx, h1) + if len(h1Provs) != 2 { + t.Fatalf("expected h1 to be provided by 2 peers, is by %d", len(h1Provs)) + } +} + +func TestWriteUpdatesCache(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + p1, p2 := peer.ID("a"), peer.ID("b") + h1 := u.Hash([]byte("1")) + ps, err := pstoremem.NewPeerstore() + if err != nil { + t.Fatal(err) + } + + pm, err := NewProviderManager(p1, ps, dssync.MutexWrap(ds.NewMapDatastore())) + if err != nil { + t.Fatal(err) + } + + // add provider + pm.AddProvider(ctx, h1, peer.AddrInfo{ID: p1}) + // force into the cache + pm.GetProviders(ctx, h1) + // add a second provider + pm.AddProvider(ctx, h1, peer.AddrInfo{ID: p2}) + + c1Provs, _ := pm.GetProviders(ctx, h1) + if len(c1Provs) != 2 { + t.Fatalf("expected h1 to be provided by 2 peers, is by %d", len(c1Provs)) + } +} diff --git a/go-libp2p-kad-dht/qpeerset/qpeerset.go b/go-libp2p-kad-dht/qpeerset/qpeerset.go new file mode 100644 index 0000000..d22a074 --- /dev/null +++ b/go-libp2p-kad-dht/qpeerset/qpeerset.go @@ -0,0 +1,159 @@ +package qpeerset + +import ( + "math/big" + "sort" + + "github.com/libp2p/go-libp2p/core/peer" + ks "github.com/whyrusleeping/go-keyspace" +) + +// PeerState describes the state of a peer ID during the lifecycle of an individual lookup. +type PeerState int + +const ( + // PeerHeard is applied to peers which have not been queried yet. + PeerHeard PeerState = iota + // PeerWaiting is applied to peers that are currently being queried. + PeerWaiting + // PeerQueried is applied to peers who have been queried and a response was retrieved successfully. + PeerQueried + // PeerUnreachable is applied to peers who have been queried and a response was not retrieved successfully. + PeerUnreachable +) + +// QueryPeerset maintains the state of a Kademlia asynchronous lookup. +// The lookup state is a set of peers, each labeled with a peer state. +type QueryPeerset struct { + // the key being searched for + key ks.Key + + // all known peers + all []queryPeerState + + // sorted is true if all is currently in sorted order + sorted bool +} + +type queryPeerState struct { + id peer.ID + distance *big.Int + state PeerState + referredBy peer.ID +} + +type sortedQueryPeerset QueryPeerset + +func (sqp *sortedQueryPeerset) Len() int { + return len(sqp.all) +} + +func (sqp *sortedQueryPeerset) Swap(i, j int) { + sqp.all[i], sqp.all[j] = sqp.all[j], sqp.all[i] +} + +func (sqp *sortedQueryPeerset) Less(i, j int) bool { + di, dj := sqp.all[i].distance, sqp.all[j].distance + return di.Cmp(dj) == -1 +} + +// NewQueryPeerset creates a new empty set of peers. +// key is the target key of the lookup that this peer set is for. +func NewQueryPeerset(key string) *QueryPeerset { + return &QueryPeerset{ + key: ks.XORKeySpace.Key([]byte(key)), + all: []queryPeerState{}, + sorted: false, + } +} + +func (qp *QueryPeerset) find(p peer.ID) int { + for i := range qp.all { + if qp.all[i].id == p { + return i + } + } + return -1 +} + +func (qp *QueryPeerset) distanceToKey(p peer.ID) *big.Int { + return ks.XORKeySpace.Key([]byte(p)).Distance(qp.key) +} + +// TryAdd adds the peer p to the peer set. +// If the peer is already present, no action is taken. +// Otherwise, the peer is added with state set to PeerHeard. +// TryAdd returns true iff the peer was not already present. +func (qp *QueryPeerset) TryAdd(p, referredBy peer.ID) bool { + if qp.find(p) >= 0 { + return false + } else { + qp.all = append(qp.all, + queryPeerState{id: p, distance: qp.distanceToKey(p), state: PeerHeard, referredBy: referredBy}) + qp.sorted = false + return true + } +} + +func (qp *QueryPeerset) sort() { + if qp.sorted { + return + } + sort.Sort((*sortedQueryPeerset)(qp)) + qp.sorted = true +} + +// SetState sets the state of peer p to s. +// If p is not in the peerset, SetState panics. +func (qp *QueryPeerset) SetState(p peer.ID, s PeerState) { + qp.all[qp.find(p)].state = s +} + +// GetState returns the state of peer p. +// If p is not in the peerset, GetState panics. +func (qp *QueryPeerset) GetState(p peer.ID) PeerState { + return qp.all[qp.find(p)].state +} + +// GetReferrer returns the peer that referred us to the peer p. +// If p is not in the peerset, GetReferrer panics. +func (qp *QueryPeerset) GetReferrer(p peer.ID) peer.ID { + return qp.all[qp.find(p)].referredBy +} + +// GetClosestNInStates returns the closest to the key peers, which are in one of the given states. +// It returns n peers or less, if fewer peers meet the condition. +// The returned peers are sorted in ascending order by their distance to the key. +func (qp *QueryPeerset) GetClosestNInStates(n int, states ...PeerState) (result []peer.ID) { + qp.sort() + m := make(map[PeerState]struct{}, len(states)) + for i := range states { + m[states[i]] = struct{}{} + } + + for _, p := range qp.all { + if _, ok := m[p.state]; ok { + result = append(result, p.id) + } + } + if len(result) >= n { + return result[:n] + } + return result +} + +// GetClosestInStates returns the peers, which are in one of the given states. +// The returned peers are sorted in ascending order by their distance to the key. +func (qp *QueryPeerset) GetClosestInStates(states ...PeerState) (result []peer.ID) { + return qp.GetClosestNInStates(len(qp.all), states...) +} + +// NumHeard returns the number of peers in state PeerHeard. +func (qp *QueryPeerset) NumHeard() int { + return len(qp.GetClosestInStates(PeerHeard)) +} + +// NumWaiting returns the number of peers in state PeerWaiting. +func (qp *QueryPeerset) NumWaiting() int { + return len(qp.GetClosestInStates(PeerWaiting)) +} diff --git a/go-libp2p-kad-dht/qpeerset/qpeerset_test.go b/go-libp2p-kad-dht/qpeerset/qpeerset_test.go new file mode 100644 index 0000000..5ce4962 --- /dev/null +++ b/go-libp2p-kad-dht/qpeerset/qpeerset_test.go @@ -0,0 +1,86 @@ +package qpeerset + +import ( + "testing" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/test" + + kb "github.com/libp2p/go-libp2p-kbucket" + + "github.com/stretchr/testify/require" +) + +func TestQPeerSet(t *testing.T) { + key := "test" + qp := NewQueryPeerset(key) + + // -----------------Ordering between peers for the Test ----- + // KEY < peer3 < peer1 < peer4 < peer2 + // ---------------------------------------------------------- + peer2 := test.RandPeerIDFatal(t) + var peer4 peer.ID + for { + peer4 = test.RandPeerIDFatal(t) + if kb.Closer(peer4, peer2, key) { + break + } + } + + var peer1 peer.ID + for { + peer1 = test.RandPeerIDFatal(t) + if kb.Closer(peer1, peer4, key) { + break + } + } + + var peer3 peer.ID + for { + peer3 = test.RandPeerIDFatal(t) + if kb.Closer(peer3, peer1, key) { + break + } + } + + oracle := test.RandPeerIDFatal(t) + + // find fails + require.Equal(t, -1, qp.find(peer2)) + + // add peer2,assert state & then another add fails + require.True(t, qp.TryAdd(peer2, oracle)) + require.Equal(t, PeerHeard, qp.GetState(peer2)) + require.False(t, qp.TryAdd(peer2, oracle)) + require.Equal(t, 0, qp.NumWaiting()) + + // add peer4 + require.True(t, qp.TryAdd(peer4, oracle)) + cl := qp.GetClosestNInStates(2, PeerHeard, PeerWaiting, PeerQueried) + require.Equal(t, []peer.ID{peer4, peer2}, cl) + cl = qp.GetClosestNInStates(3, PeerHeard, PeerWaiting, PeerQueried) + require.Equal(t, []peer.ID{peer4, peer2}, cl) + cl = qp.GetClosestNInStates(1, PeerHeard, PeerWaiting, PeerQueried) + require.Equal(t, []peer.ID{peer4}, cl) + + // mark as unreachable & try to get it + qp.SetState(peer4, PeerUnreachable) + cl = qp.GetClosestNInStates(1, PeerHeard, PeerWaiting, PeerQueried) + require.Equal(t, []peer.ID{peer2}, cl) + + // add peer1 + require.True(t, qp.TryAdd(peer1, oracle)) + cl = qp.GetClosestNInStates(1, PeerHeard, PeerWaiting, PeerQueried) + require.Equal(t, []peer.ID{peer1}, cl) + cl = qp.GetClosestNInStates(2, PeerHeard, PeerWaiting, PeerQueried) + require.Equal(t, []peer.ID{peer1, peer2}, cl) + + // mark as waiting and assert + qp.SetState(peer2, PeerWaiting) + require.Equal(t, []peer.ID{peer2}, qp.GetClosestInStates(PeerWaiting)) + + require.Equal(t, []peer.ID{peer1}, qp.GetClosestInStates(PeerHeard)) + require.True(t, qp.TryAdd(peer3, oracle)) + require.Equal(t, []peer.ID{peer3, peer1}, qp.GetClosestInStates(PeerHeard)) + require.Equal(t, 2, qp.NumHeard()) +} diff --git a/go-libp2p-kad-dht/query.go b/go-libp2p-kad-dht/query.go new file mode 100644 index 0000000..7c01a2a --- /dev/null +++ b/go-libp2p-kad-dht/query.go @@ -0,0 +1,556 @@ +package dht + +import ( + "context" + "errors" + "fmt" + "math" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + pstore "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/routing" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/google/uuid" + "github.com/libp2p/go-libp2p-kad-dht/internal" + "github.com/libp2p/go-libp2p-kad-dht/qpeerset" + kb "github.com/libp2p/go-libp2p-kbucket" +) + +// ErrNoPeersQueried is returned when we failed to connect to any peers. +var ErrNoPeersQueried = errors.New("failed to query any peers") + +type queryFn func(context.Context, peer.ID) ([]*peer.AddrInfo, error) +type stopFn func(*qpeerset.QueryPeerset) bool + +// query represents a single DHT query. +type query struct { + // unique identifier for the lookup instance + id uuid.UUID + + // target key for the lookup + key string + + // the query context. + ctx context.Context + + dht *IpfsDHT + + // seedPeers is the set of peers that seed the query + seedPeers []peer.ID + + // peerTimes contains the duration of each successful query to a peer + peerTimes map[peer.ID]time.Duration + + // queryPeers is the set of peers known by this query and their respective states. + queryPeers *qpeerset.QueryPeerset + + // terminated is set when the first worker thread encounters the termination condition. + // Its role is to make sure that once termination is determined, it is sticky. + terminated bool + + // waitGroup ensures lookup does not end until all query goroutines complete. + waitGroup sync.WaitGroup + + // the function that will be used to query a single peer. + queryFn queryFn + + // stopFn is used to determine if we should stop the WHOLE disjoint query. + stopFn stopFn +} + +type lookupWithFollowupResult struct { + peers []peer.ID // the top K not unreachable peers at the end of the query + state []qpeerset.PeerState // the peer states at the end of the query of the peers slice (not closest) + closest []peer.ID // the top K peers at the end of the query + + // indicates that neither the lookup nor the followup has been prematurely terminated by an external condition such + // as context cancellation or the stop function being called. + completed bool +} + +// runLookupWithFollowup executes the lookup on the target using the given query function and stopping when either the +// context is cancelled or the stop function returns true. Note: if the stop function is not sticky, i.e. it does not +// return true every time after the first time it returns true, it is not guaranteed to cause a stop to occur just +// because it momentarily returns true. +// +// After the lookup is complete the query function is run (unless stopped) against all of the top K peers from the +// lookup that have not already been successfully queried. +func (dht *IpfsDHT) runLookupWithFollowup(ctx context.Context, target string, queryFn queryFn, stopFn stopFn) (*lookupWithFollowupResult, error) { + ctx, span := internal.StartSpan(ctx, "IpfsDHT.RunLookupWithFollowup", trace.WithAttributes(internal.KeyAsAttribute("Target", target))) + defer span.End() + + // run the query + lookupRes, qps, err := dht.runQuery(ctx, target, queryFn, stopFn) + if err != nil { + return nil, err + } + + // query all of the top K peers we've either Heard about or have outstanding queries we're Waiting on. + // This ensures that all of the top K results have been queried which adds to resiliency against churn for query + // functions that carry state (e.g. FindProviders and GetValue) as well as establish connections that are needed + // by stateless query functions (e.g. GetClosestPeers and therefore Provide and PutValue) + queryPeers := make([]peer.ID, 0, len(lookupRes.peers)) + for i, p := range lookupRes.peers { + if state := lookupRes.state[i]; state == qpeerset.PeerHeard || state == qpeerset.PeerWaiting { + queryPeers = append(queryPeers, p) + } + } + + if len(queryPeers) == 0 { + return lookupRes, nil + } + + // return if the lookup has been externally stopped + if ctx.Err() != nil || stopFn(qps) { + lookupRes.completed = false + return lookupRes, nil + } + + doneCh := make(chan struct{}, len(queryPeers)) + followUpCtx, cancelFollowUp := context.WithCancel(ctx) + defer cancelFollowUp() + for _, p := range queryPeers { + qp := p + go func() { + _, _ = queryFn(followUpCtx, qp) + doneCh <- struct{}{} + }() + } + + // wait for all queries to complete before returning, aborting ongoing queries if we've been externally stopped + followupsCompleted := 0 +processFollowUp: + for i := 0; i < len(queryPeers); i++ { + select { + case <-doneCh: + followupsCompleted++ + if stopFn(qps) { + cancelFollowUp() + if i < len(queryPeers)-1 { + lookupRes.completed = false + } + break processFollowUp + } + case <-ctx.Done(): + lookupRes.completed = false + cancelFollowUp() + break processFollowUp + } + } + + if !lookupRes.completed { + for i := followupsCompleted; i < len(queryPeers); i++ { + <-doneCh + } + } + + return lookupRes, nil +} + +func (dht *IpfsDHT) runQuery(ctx context.Context, target string, queryFn queryFn, stopFn stopFn) (*lookupWithFollowupResult, *qpeerset.QueryPeerset, error) { + ctx, span := internal.StartSpan(ctx, "IpfsDHT.RunQuery") + defer span.End() + + // pick the K closest peers to the key in our Routing table. + targetKadID := kb.ConvertKey(target) + seedPeers := dht.routingTable.NearestPeers(targetKadID, dht.bucketSize) + if len(seedPeers) == 0 { + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.QueryError, + Extra: kb.ErrLookupFailure.Error(), + }) + return nil, nil, kb.ErrLookupFailure + } + + q := &query{ + id: uuid.New(), + key: target, + ctx: ctx, + dht: dht, + queryPeers: qpeerset.NewQueryPeerset(target), + seedPeers: seedPeers, + peerTimes: make(map[peer.ID]time.Duration), + terminated: false, + queryFn: queryFn, + stopFn: stopFn, + } + + // run the query + q.run() + + if ctx.Err() == nil { + q.recordValuablePeers() + } + + res := q.constructLookupResult(targetKadID) + return res, q.queryPeers, nil +} + +func (q *query) recordPeerIsValuable(p peer.ID) { + if !q.dht.routingTable.UpdateLastUsefulAt(p, time.Now()) { + // not in routing table + return + } +} + +func (q *query) recordValuablePeers() { + // Valuable peers algorithm: + // Label the seed peer that responded to a query in the shortest amount of time as the "most valuable peer" (MVP) + // Each seed peer that responded to a query within some range (i.e. 2x) of the MVP's time is a valuable peer + // Mark the MVP and all the other valuable peers as valuable + mvpDuration := time.Duration(math.MaxInt64) + for _, p := range q.seedPeers { + if queryTime, ok := q.peerTimes[p]; ok && queryTime < mvpDuration { + mvpDuration = queryTime + } + } + + for _, p := range q.seedPeers { + if queryTime, ok := q.peerTimes[p]; ok && queryTime < mvpDuration*2 { + q.recordPeerIsValuable(p) + } + } +} + +// constructLookupResult takes the query information and uses it to construct the lookup result +func (q *query) constructLookupResult(target kb.ID) *lookupWithFollowupResult { + // determine if the query terminated early + completed := true + + // Lookup and starvation are both valid ways for a lookup to complete. (Starvation does not imply failure.) + // Lookup termination (as defined in isLookupTermination) is not possible in small networks. + // Starvation is a successful query termination in small networks. + if !(q.isLookupTermination() || q.isStarvationTermination()) { + completed = false + } + + // extract the top K not unreachable peers + var peers []peer.ID + peerState := make(map[peer.ID]qpeerset.PeerState) + qp := q.queryPeers.GetClosestNInStates(q.dht.bucketSize, qpeerset.PeerHeard, qpeerset.PeerWaiting, qpeerset.PeerQueried) + for _, p := range qp { + state := q.queryPeers.GetState(p) + peerState[p] = state + peers = append(peers, p) + } + + // get the top K overall peers + sortedPeers := kb.SortClosestPeers(peers, target) + if len(sortedPeers) > q.dht.bucketSize { + sortedPeers = sortedPeers[:q.dht.bucketSize] + } + + closest := q.queryPeers.GetClosestNInStates(q.dht.bucketSize, qpeerset.PeerHeard, qpeerset.PeerWaiting, qpeerset.PeerQueried, qpeerset.PeerUnreachable) + + // return the top K not unreachable peers as well as their states at the end of the query + res := &lookupWithFollowupResult{ + peers: sortedPeers, + state: make([]qpeerset.PeerState, len(sortedPeers)), + completed: completed, + closest: closest, + } + + for i, p := range sortedPeers { + res.state[i] = peerState[p] + } + + return res +} + +type queryUpdate struct { + cause peer.ID + queried []peer.ID + heard []peer.ID + unreachable []peer.ID + + queryDuration time.Duration +} + +func (q *query) run() { + ctx, span := internal.StartSpan(q.ctx, "IpfsDHT.Query.Run") + defer span.End() + + pathCtx, cancelPath := context.WithCancel(ctx) + defer cancelPath() + + alpha := q.dht.alpha + + ch := make(chan *queryUpdate, alpha) + ch <- &queryUpdate{cause: q.dht.self, heard: q.seedPeers} + + // return only once all outstanding queries have completed. + defer q.waitGroup.Wait() + for { + var cause peer.ID + select { + case update := <-ch: + q.updateState(pathCtx, update) + cause = update.cause + case <-pathCtx.Done(): + q.terminate(pathCtx, cancelPath, LookupCancelled) + } + + // calculate the maximum number of queries we could be spawning. + // Note: NumWaiting will be updated in spawnQuery + maxNumQueriesToSpawn := alpha - q.queryPeers.NumWaiting() + + // termination is triggered on end-of-lookup conditions or starvation of unused peers + // it also returns the peers we should query next for a maximum of `maxNumQueriesToSpawn` peers. + ready, reason, qPeers := q.isReadyToTerminate(pathCtx, maxNumQueriesToSpawn) + if ready { + q.terminate(pathCtx, cancelPath, reason) + } + + if q.terminated { + return + } + + // try spawning the queries, if there are no available peers to query then we won't spawn them + for _, p := range qPeers { + q.spawnQuery(pathCtx, cause, p, ch) + } + } +} + +// spawnQuery starts one query, if an available heard peer is found +func (q *query) spawnQuery(ctx context.Context, cause peer.ID, queryPeer peer.ID, ch chan<- *queryUpdate) { + ctx, span := internal.StartSpan(ctx, "IpfsDHT.SpawnQuery", trace.WithAttributes( + attribute.String("Cause", cause.String()), + attribute.String("QueryPeer", queryPeer.String()), + )) + defer span.End() + + PublishLookupEvent(ctx, + NewLookupEvent( + q.dht.self, + q.id, + q.key, + NewLookupUpdateEvent( + cause, + q.queryPeers.GetReferrer(queryPeer), + nil, // heard + []peer.ID{queryPeer}, // waiting + nil, // queried + nil, // unreachable + ), + nil, + nil, + ), + ) + q.queryPeers.SetState(queryPeer, qpeerset.PeerWaiting) + q.waitGroup.Add(1) + go q.queryPeer(ctx, ch, queryPeer) +} + +func (q *query) isReadyToTerminate(ctx context.Context, nPeersToQuery int) (bool, LookupTerminationReason, []peer.ID) { + // give the application logic a chance to terminate + if q.stopFn(q.queryPeers) { + return true, LookupStopped, nil + } + if q.isStarvationTermination() { + return true, LookupStarvation, nil + } + if q.isLookupTermination() { + return true, LookupCompleted, nil + } + + // The peers we query next should be ones that we have only Heard about. + var peersToQuery []peer.ID + peers := q.queryPeers.GetClosestInStates(qpeerset.PeerHeard) + count := 0 + for _, p := range peers { + peersToQuery = append(peersToQuery, p) + count++ + if count == nPeersToQuery { + break + } + } + + return false, -1, peersToQuery +} + +// From the set of all nodes that are not unreachable, +// if the closest beta nodes are all queried, the lookup can terminate. +func (q *query) isLookupTermination() bool { + peers := q.queryPeers.GetClosestNInStates(q.dht.beta, qpeerset.PeerHeard, qpeerset.PeerWaiting, qpeerset.PeerQueried) + for _, p := range peers { + if q.queryPeers.GetState(p) != qpeerset.PeerQueried { + return false + } + } + return true +} + +func (q *query) isStarvationTermination() bool { + return q.queryPeers.NumHeard() == 0 && q.queryPeers.NumWaiting() == 0 +} + +func (q *query) terminate(ctx context.Context, cancel context.CancelFunc, reason LookupTerminationReason) { + ctx, span := internal.StartSpan(ctx, "IpfsDHT.Query.Terminate", trace.WithAttributes(attribute.Stringer("Reason", reason))) + defer span.End() + + if q.terminated { + return + } + + PublishLookupEvent(ctx, + NewLookupEvent( + q.dht.self, + q.id, + q.key, + nil, + nil, + NewLookupTerminateEvent(reason), + ), + ) + cancel() // abort outstanding queries + q.terminated = true +} + +// queryPeer queries a single peer and reports its findings on the channel. +// queryPeer does not access the query state in queryPeers! +func (q *query) queryPeer(ctx context.Context, ch chan<- *queryUpdate, p peer.ID) { + defer q.waitGroup.Done() + + ctx, span := internal.StartSpan(ctx, "IpfsDHT.QueryPeer") + defer span.End() + + dialCtx, queryCtx := ctx, ctx + + // dial the peer + if err := q.dht.dialPeer(dialCtx, p); err != nil { + // remove the peer if there was a dial failure..but not because of a context cancellation + if dialCtx.Err() == nil { + q.dht.peerStoppedDHT(p) + } + ch <- &queryUpdate{cause: p, unreachable: []peer.ID{p}} + return + } + + startQuery := time.Now() + // send query RPC to the remote peer + newPeers, err := q.queryFn(queryCtx, p) + if err != nil { + if queryCtx.Err() == nil { + q.dht.peerStoppedDHT(p) + } + ch <- &queryUpdate{cause: p, unreachable: []peer.ID{p}} + return + } + + queryDuration := time.Since(startQuery) + + // query successful, try to add to RT + q.dht.validPeerFound(p) + + // process new peers + saw := []peer.ID{} + for _, next := range newPeers { + if next.ID == q.dht.self { // don't add self. + logger.Debugf("PEERS CLOSER -- worker for: %v found self", p) + continue + } + + // add any other know addresses for the candidate peer. + curInfo := q.dht.peerstore.PeerInfo(next.ID) + next.Addrs = append(next.Addrs, curInfo.Addrs...) + + // add their addresses to the dialer's peerstore + // + // add the next peer to the query if matches the query target even if it would otherwise fail the query filter + // TODO: this behavior is really specific to how FindPeer works and not GetClosestPeers or any other function + isTarget := string(next.ID) == q.key + if isTarget || q.dht.queryPeerFilter(q.dht, *next) { + q.dht.maybeAddAddrs(next.ID, next.Addrs, pstore.TempAddrTTL) + saw = append(saw, next.ID) + } + } + + ch <- &queryUpdate{cause: p, heard: saw, queried: []peer.ID{p}, queryDuration: queryDuration} +} + +func (q *query) updateState(ctx context.Context, up *queryUpdate) { + if q.terminated { + panic("update should not be invoked after the logical lookup termination") + } + PublishLookupEvent(ctx, + NewLookupEvent( + q.dht.self, + q.id, + q.key, + nil, + NewLookupUpdateEvent( + up.cause, + up.cause, + up.heard, // heard + nil, // waiting + up.queried, // queried + up.unreachable, // unreachable + ), + nil, + ), + ) + for _, p := range up.heard { + if p == q.dht.self { // don't add self. + continue + } + q.queryPeers.TryAdd(p, up.cause) + } + for _, p := range up.queried { + if p == q.dht.self { // don't add self. + continue + } + if st := q.queryPeers.GetState(p); st == qpeerset.PeerWaiting { + q.queryPeers.SetState(p, qpeerset.PeerQueried) + q.peerTimes[p] = up.queryDuration + } else { + panic(fmt.Errorf("kademlia protocol error: tried to transition to the queried state from state %v", st)) + } + } + for _, p := range up.unreachable { + if p == q.dht.self { // don't add self. + continue + } + + if st := q.queryPeers.GetState(p); st == qpeerset.PeerWaiting { + q.queryPeers.SetState(p, qpeerset.PeerUnreachable) + } else { + panic(fmt.Errorf("kademlia protocol error: tried to transition to the unreachable state from state %v", st)) + } + } +} + +func (dht *IpfsDHT) dialPeer(ctx context.Context, p peer.ID) error { + ctx, span := internal.StartSpan(ctx, "IpfsDHT.DialPeer", trace.WithAttributes(attribute.String("PeerID", p.String()))) + defer span.End() + + // short-circuit if we're already connected. + if dht.host.Network().Connectedness(p) == network.Connected { + return nil + } + + logger.Debug("not connected. dialing.") + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.DialingPeer, + ID: p, + }) + + pi := peer.AddrInfo{ID: p} + if err := dht.host.Connect(ctx, pi); err != nil { + logger.Debugf("error connecting: %s", err) + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.QueryError, + Extra: err.Error(), + ID: p, + }) + + return err + } + logger.Debugf("connected. dial success.") + return nil +} diff --git a/go-libp2p-kad-dht/query_test.go b/go-libp2p-kad-dht/query_test.go new file mode 100644 index 0000000..9166964 --- /dev/null +++ b/go-libp2p-kad-dht/query_test.go @@ -0,0 +1,118 @@ +package dht + +import ( + "context" + "fmt" + "testing" + "time" + + tu "github.com/libp2p/go-libp2p-testing/etc" + + "github.com/stretchr/testify/require" +) + +// TODO Debug test failures due to timing issue on windows +// Tests are timing dependent as can be seen in the 2 seconds timed context that we use in "tu.WaitFor". +// While the tests work fine on OSX and complete in under a second, +// they repeatedly fail to complete in the stipulated time on Windows. +// However, increasing the timeout makes them pass on Windows. + +func TestRTEvictionOnFailedQuery(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + d1 := setupDHT(ctx, t, false) + d2 := setupDHT(ctx, t, false) + + for i := 0; i < 10; i++ { + connect(t, ctx, d1, d2) + for _, conn := range d1.host.Network().ConnsToPeer(d2.self) { + conn.Close() + } + } + + // peers should be in the RT because of fixLowPeers + require.NoError(t, tu.WaitFor(ctx, func() error { + if !checkRoutingTable(d1, d2) { + return fmt.Errorf("should have routes") + } + return nil + })) + + // close both hosts so query fails + require.NoError(t, d1.host.Close()) + require.NoError(t, d2.host.Close()) + // peers will still be in the RT because we have decoupled membership from connectivity + require.NoError(t, tu.WaitFor(ctx, func() error { + if !checkRoutingTable(d1, d2) { + return fmt.Errorf("should have routes") + } + return nil + })) + + // failed queries should remove the peers from the RT + _, err := d1.GetClosestPeers(ctx, "test") + require.NoError(t, err) + + _, err = d2.GetClosestPeers(ctx, "test") + require.NoError(t, err) + + require.NoError(t, tu.WaitFor(ctx, func() error { + if checkRoutingTable(d1, d2) { + return fmt.Errorf("should not have routes") + } + return nil + })) +} + +func TestRTAdditionOnSuccessfulQuery(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + d1 := setupDHT(ctx, t, false) + d2 := setupDHT(ctx, t, false) + d3 := setupDHT(ctx, t, false) + + connect(t, ctx, d1, d2) + connect(t, ctx, d2, d3) + // validate RT states + + // d1 has d2 + require.NoError(t, tu.WaitFor(ctx, func() error { + if !checkRoutingTable(d1, d2) { + return fmt.Errorf("should have routes") + } + return nil + })) + // d2 has d3 + require.NoError(t, tu.WaitFor(ctx, func() error { + if !checkRoutingTable(d2, d3) { + return fmt.Errorf("should have routes") + } + return nil + })) + + // however, d1 does not know about d3 + require.NoError(t, tu.WaitFor(ctx, func() error { + if checkRoutingTable(d1, d3) { + return fmt.Errorf("should not have routes") + } + return nil + })) + + // but when d3 queries d2, d1 and d3 discover each other + _, err := d3.GetClosestPeers(ctx, "something") + require.NoError(t, err) + require.NoError(t, tu.WaitFor(ctx, func() error { + if !checkRoutingTable(d1, d3) { + return fmt.Errorf("should have routes") + } + return nil + })) +} + +func checkRoutingTable(a, b *IpfsDHT) bool { + // loop until connection notification has been received. + // under high load, this may not happen as immediately as we would like. + return a.routingTable.Find(b.self) != "" && b.routingTable.Find(a.self) != "" +} diff --git a/go-libp2p-kad-dht/records.go b/go-libp2p-kad-dht/records.go new file mode 100644 index 0000000..271425d --- /dev/null +++ b/go-libp2p-kad-dht/records.go @@ -0,0 +1,138 @@ +package dht + +import ( + "context" + "fmt" + + "github.com/libp2p/go-libp2p-kad-dht/internal" + ci "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +type pubkrs struct { + pubk ci.PubKey + err error +} + +// GetPublicKey gets the public key when given a Peer ID. It will extract from +// the Peer ID if inlined or ask the node it belongs to or ask the DHT. +func (dht *IpfsDHT) GetPublicKey(ctx context.Context, p peer.ID) (ci.PubKey, error) { + ctx, span := internal.StartSpan(ctx, "IpfsDHT.GetPublicKey", trace.WithAttributes(attribute.Stringer("PeerID", p))) + defer span.End() + + if !dht.enableValues { + return nil, routing.ErrNotSupported + } + + logger.Debugf("getPublicKey for: %s", p) + + // Check locally. Will also try to extract the public key from the peer + // ID itself if possible (if inlined). + pk := dht.peerstore.PubKey(p) + if pk != nil { + return pk, nil + } + + // Try getting the public key both directly from the node it identifies + // and from the DHT, in parallel + ctx, cancel := context.WithCancel(ctx) + defer cancel() + resp := make(chan pubkrs, 2) + go func() { + pubk, err := dht.getPublicKeyFromNode(ctx, p) + resp <- pubkrs{pubk, err} + }() + + // Note that the number of open connections is capped by the dial + // limiter, so there is a chance that getPublicKeyFromDHT(), which + // potentially opens a lot of connections, will block + // getPublicKeyFromNode() from getting a connection. + // Currently this doesn't seem to cause an issue so leaving as is + // for now. + go func() { + pubk, err := dht.getPublicKeyFromDHT(ctx, p) + resp <- pubkrs{pubk, err} + }() + + // Wait for one of the two go routines to return + // a public key (or for both to error out) + var err error + for i := 0; i < 2; i++ { + r := <-resp + if r.err == nil { + // Found the public key + err := dht.peerstore.AddPubKey(p, r.pubk) + if err != nil { + logger.Errorw("failed to add public key to peerstore", "peer", p) + } + return r.pubk, nil + } + err = r.err + } + + // Both go routines failed to find a public key + return nil, err +} + +func (dht *IpfsDHT) getPublicKeyFromDHT(ctx context.Context, p peer.ID) (ci.PubKey, error) { + // Only retrieve one value, because the public key is immutable + // so there's no need to retrieve multiple versions + pkkey := routing.KeyForPublicKey(p) + val, err := dht.GetValue(ctx, pkkey, Quorum(1)) + if err != nil { + return nil, err + } + + pubk, err := ci.UnmarshalPublicKey(val) + if err != nil { + logger.Errorf("Could not unmarshal public key retrieved from DHT for %v", p) + return nil, err + } + + // Note: No need to check that public key hash matches peer ID + // because this is done by GetValues() + logger.Debugf("Got public key for %s from DHT", p) + return pubk, nil +} + +func (dht *IpfsDHT) getPublicKeyFromNode(ctx context.Context, p peer.ID) (ci.PubKey, error) { + // check locally, just in case... + pk := dht.peerstore.PubKey(p) + if pk != nil { + return pk, nil + } + + // Get the key from the node itself + pkkey := routing.KeyForPublicKey(p) + record, _, err := dht.protoMessenger.GetValue(ctx, p, pkkey) + if err != nil { + return nil, err + } + + // node doesn't have key :( + if record == nil { + return nil, fmt.Errorf("node %v not responding with its public key", p) + } + + pubk, err := ci.UnmarshalPublicKey(record.GetValue()) + if err != nil { + logger.Errorf("Could not unmarshal public key for %v", p) + return nil, err + } + + // Make sure the public key matches the peer ID + id, err := peer.IDFromPublicKey(pubk) + if err != nil { + logger.Errorf("Could not extract peer id from public key for %v", p) + return nil, err + } + if id != p { + return nil, fmt.Errorf("public key %v does not match peer %v", id, p) + } + + logger.Debugf("Got public key from node %v itself", p) + return pubk, nil +} diff --git a/go-libp2p-kad-dht/records_test.go b/go-libp2p-kad-dht/records_test.go new file mode 100644 index 0000000..659b005 --- /dev/null +++ b/go-libp2p-kad-dht/records_test.go @@ -0,0 +1,384 @@ +package dht + +import ( + "context" + "crypto/rand" + "fmt" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/test" + + u "github.com/ipfs/boxo/util" + record "github.com/libp2p/go-libp2p-record" + tnet "github.com/libp2p/go-libp2p-testing/net" + ci "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" +) + +// Check that GetPublicKey() correctly extracts a public key +func TestPubkeyExtract(t *testing.T) { + t.Skip("public key extraction for ed25519 keys has been disabled. See https://github.com/libp2p/specs/issues/111") + ctx := context.Background() + dht := setupDHT(ctx, t, false) + defer dht.Close() + + _, pk, err := ci.GenerateEd25519Key(rand.Reader) + if err != nil { + t.Fatal(err) + } + + pid, err := peer.IDFromPublicKey(pk) + if err != nil { + t.Fatal(err) + } + + pkOut, err := dht.GetPublicKey(context.Background(), pid) + if err != nil { + t.Fatal(err) + } + + if !pkOut.Equals(pk) { + t.Fatal("got incorrect public key out") + } +} + +// Check that GetPublicKey() correctly retrieves a public key from the peerstore +func TestPubkeyPeerstore(t *testing.T) { + ctx := context.Background() + dht := setupDHT(ctx, t, false) + + identity := tnet.RandIdentityOrFatal(t) + err := dht.peerstore.AddPubKey(identity.ID(), identity.PublicKey()) + if err != nil { + t.Fatal(err) + } + + rpubk, err := dht.GetPublicKey(context.Background(), identity.ID()) + if err != nil { + t.Fatal(err) + } + + if !identity.PublicKey().Equals(rpubk) { + t.Fatal("got incorrect public key") + } +} + +// Check that GetPublicKey() correctly retrieves a public key directly +// from the node it identifies +func TestPubkeyDirectFromNode(t *testing.T) { + ctx := context.Background() + + dhtA := setupDHT(ctx, t, false) + dhtB := setupDHT(ctx, t, false) + + defer dhtA.Close() + defer dhtB.Close() + defer dhtA.host.Close() + defer dhtB.host.Close() + + connect(t, ctx, dhtA, dhtB) + + pubk, err := dhtA.GetPublicKey(context.Background(), dhtB.self) + if err != nil { + t.Fatal(err) + } + + id, err := peer.IDFromPublicKey(pubk) + if err != nil { + t.Fatal(err) + } + + if id != dhtB.self { + t.Fatal("got incorrect public key") + } +} + +// Check that GetPublicKey() correctly retrieves a public key +// from the DHT +func TestPubkeyFromDHT(t *testing.T) { + ctx := context.Background() + + dhtA := setupDHT(ctx, t, false) + dhtB := setupDHT(ctx, t, false) + + defer dhtA.Close() + defer dhtB.Close() + defer dhtA.host.Close() + defer dhtB.host.Close() + + connect(t, ctx, dhtA, dhtB) + + identity := tnet.RandIdentityOrFatal(t) + pubk := identity.PublicKey() + id := identity.ID() + pkkey := routing.KeyForPublicKey(id) + pkbytes, err := ci.MarshalPublicKey(pubk) + if err != nil { + t.Fatal(err) + } + + // Store public key on node B + err = dhtB.PutValue(ctx, pkkey, pkbytes) + if err != nil { + t.Fatal(err) + } + + // Retrieve public key on node A + rpubk, err := dhtA.GetPublicKey(ctx, id) + if err != nil { + t.Fatal(err) + } + + if !pubk.Equals(rpubk) { + t.Fatal("got incorrect public key") + } +} + +// Check that GetPublicKey() correctly returns an error when the +// public key is not available directly from the node or on the DHT +func TestPubkeyNotFound(t *testing.T) { + ctx := context.Background() + + dhtA := setupDHT(ctx, t, false) + dhtB := setupDHT(ctx, t, false) + + defer dhtA.Close() + defer dhtB.Close() + defer dhtA.host.Close() + defer dhtB.host.Close() + + connect(t, ctx, dhtA, dhtB) + + r := u.NewSeededRand(15) // generate deterministic keypair + _, pubk, err := ci.GenerateKeyPairWithReader(ci.RSA, 2048, r) + if err != nil { + t.Fatal(err) + } + id, err := peer.IDFromPublicKey(pubk) + if err != nil { + t.Fatal(err) + } + + // Attempt to retrieve public key on node A (should be not found) + _, err = dhtA.GetPublicKey(ctx, id) + if err == nil { + t.Fatal("Expected not found error") + } +} + +// Check that GetPublicKey() returns an error when +// the DHT returns the wrong key +func TestPubkeyBadKeyFromDHT(t *testing.T) { + ctx := context.Background() + + dhtA := setupDHT(ctx, t, false) + dhtB := setupDHT(ctx, t, false) + + defer dhtA.Close() + defer dhtB.Close() + defer dhtA.host.Close() + defer dhtB.host.Close() + + connect(t, ctx, dhtA, dhtB) + + _, pk, err := test.RandTestKeyPair(ci.RSA, 2048) + if err != nil { + t.Fatal(err) + } + id, err := peer.IDFromPublicKey(pk) + if err != nil { + t.Fatal(err) + } + pkkey := routing.KeyForPublicKey(id) + + peer2 := tnet.RandIdentityOrFatal(t) + if pk == peer2.PublicKey() { + t.Fatal("Public keys shouldn't match here") + } + wrongbytes, err := ci.MarshalPublicKey(peer2.PublicKey()) + if err != nil { + t.Fatal(err) + } + + // Store incorrect public key on node B + rec := record.MakePutRecord(pkkey, wrongbytes) + rec.TimeReceived = u.FormatRFC3339(time.Now()) + err = dhtB.putLocal(ctx, pkkey, rec) + if err != nil { + t.Fatal(err) + } + + // Retrieve public key from node A + _, err = dhtA.GetPublicKey(ctx, id) + if err == nil { + t.Fatal("Expected error because public key is incorrect") + } +} + +// Check that GetPublicKey() returns the correct value +// when the DHT returns the wrong key but the direct +// connection returns the correct key +func TestPubkeyBadKeyFromDHTGoodKeyDirect(t *testing.T) { + ctx := context.Background() + + dhtA := setupDHT(ctx, t, false) + dhtB := setupDHT(ctx, t, false) + + defer dhtA.Close() + defer dhtB.Close() + defer dhtA.host.Close() + defer dhtB.host.Close() + + connect(t, ctx, dhtA, dhtB) + + wrong := tnet.RandIdentityOrFatal(t) + pkkey := routing.KeyForPublicKey(dhtB.self) + + wrongbytes, err := ci.MarshalPublicKey(wrong.PublicKey()) + if err != nil { + t.Fatal(err) + } + + // Store incorrect public key on node B + rec := record.MakePutRecord(pkkey, wrongbytes) + rec.TimeReceived = u.FormatRFC3339(time.Now()) + err = dhtB.putLocal(ctx, pkkey, rec) + if err != nil { + t.Fatal(err) + } + + // Retrieve public key from node A + pubk, err := dhtA.GetPublicKey(ctx, dhtB.self) + if err != nil { + t.Fatal(err) + } + + id, err := peer.IDFromPublicKey(pubk) + if err != nil { + t.Fatal(err) + } + + // The incorrect public key retrieved from the DHT + // should be ignored in favour of the correct public + // key retrieved from the node directly + if id != dhtB.self { + t.Fatal("got incorrect public key") + } +} + +// Check that GetPublicKey() returns the correct value +// when both the DHT returns the correct key and the direct +// connection returns the correct key +func TestPubkeyGoodKeyFromDHTGoodKeyDirect(t *testing.T) { + ctx := context.Background() + + dhtA := setupDHT(ctx, t, false) + dhtB := setupDHT(ctx, t, false) + + defer dhtA.Close() + defer dhtB.Close() + defer dhtA.host.Close() + defer dhtB.host.Close() + + connect(t, ctx, dhtA, dhtB) + + pubk := dhtB.peerstore.PubKey(dhtB.self) + pkbytes, err := ci.MarshalPublicKey(pubk) + if err != nil { + t.Fatal(err) + } + + // Store public key on node B + pkkey := routing.KeyForPublicKey(dhtB.self) + err = dhtB.PutValue(ctx, pkkey, pkbytes) + if err != nil { + t.Fatal(err) + } + + // Retrieve public key on node A + rpubk, err := dhtA.GetPublicKey(ctx, dhtB.self) + if err != nil { + t.Fatal(err) + } + + if !pubk.Equals(rpubk) { + t.Fatal("got incorrect public key") + } +} + +func TestValuesDisabled(t *testing.T) { + for i := 0; i < 3; i++ { + enabledA := (i & 0x1) > 0 + enabledB := (i & 0x2) > 0 + t.Run(fmt.Sprintf("a=%v/b=%v", enabledA, enabledB), func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var ( + optsA, optsB []Option + ) + optsA = append(optsA, ProtocolPrefix("/valuesMaybeDisabled")) + optsB = append(optsB, ProtocolPrefix("/valuesMaybeDisabled")) + + if !enabledA { + optsA = append(optsA, DisableValues()) + } + if !enabledB { + optsB = append(optsB, DisableValues()) + } + + dhtA := setupDHT(ctx, t, false, optsA...) + dhtB := setupDHT(ctx, t, false, optsB...) + + defer dhtA.Close() + defer dhtB.Close() + defer dhtA.host.Close() + defer dhtB.host.Close() + + connect(t, ctx, dhtA, dhtB) + + pubk := dhtB.peerstore.PubKey(dhtB.self) + pkbytes, err := ci.MarshalPublicKey(pubk) + if err != nil { + t.Fatal(err) + } + + pkkey := routing.KeyForPublicKey(dhtB.self) + err = dhtB.PutValue(ctx, pkkey, pkbytes) + if enabledB { + if err != nil { + t.Fatal("put should have succeeded on node B", err) + } + } else { + if err != routing.ErrNotSupported { + t.Fatal("should not have put the value to node B", err) + } + _, err = dhtB.GetValue(ctx, pkkey) + if err != routing.ErrNotSupported { + t.Fatal("get should have failed on node B") + } + rec, _ := dhtB.getLocal(ctx, pkkey) + if rec != nil { + t.Fatal("node B should not have found the value locally") + } + } + + _, err = dhtA.GetValue(ctx, pkkey) + if enabledA { + if err != routing.ErrNotFound { + t.Fatal("node A should not have found the value") + } + } else { + if err != routing.ErrNotSupported { + t.Fatal("node A should not have found the value") + } + } + rec, _ := dhtA.getLocal(ctx, pkkey) + if rec != nil { + t.Fatal("node A should not have found the value locally") + } + }) + } +} diff --git a/go-libp2p-kad-dht/routing.go b/go-libp2p-kad-dht/routing.go new file mode 100644 index 0000000..b0778b9 --- /dev/null +++ b/go-libp2p-kad-dht/routing.go @@ -0,0 +1,694 @@ +package dht + +import ( + "bytes" + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/routing" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + u "github.com/ipfs/boxo/util" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-kad-dht/internal" + internalConfig "github.com/libp2p/go-libp2p-kad-dht/internal/config" + "github.com/libp2p/go-libp2p-kad-dht/netsize" + "github.com/libp2p/go-libp2p-kad-dht/qpeerset" + kb "github.com/libp2p/go-libp2p-kbucket" + record "github.com/libp2p/go-libp2p-record" + "github.com/multiformats/go-multihash" +) + +// This file implements the Routing interface for the IpfsDHT struct. + +// Basic Put/Get + +// PutValue adds value corresponding to given Key. +// This is the top level "Store" operation of the DHT +func (dht *IpfsDHT) PutValue(ctx context.Context, key string, value []byte, opts ...routing.Option) (err error) { + ctx, end := tracer.PutValue(dhtName, ctx, key, value, opts...) + defer func() { end(err) }() + + if !dht.enableValues { + return routing.ErrNotSupported + } + + logger.Debugw("putting value", "key", internal.LoggableRecordKeyString(key)) + + // don't even allow local users to put bad values. + if err := dht.Validator.Validate(key, value); err != nil { + return err + } + + old, err := dht.getLocal(ctx, key) + if err != nil { + // Means something is wrong with the datastore. + return err + } + + // Check if we have an old value that's not the same as the new one. + if old != nil && !bytes.Equal(old.GetValue(), value) { + // Check to see if the new one is better. + i, err := dht.Validator.Select(key, [][]byte{value, old.GetValue()}) + if err != nil { + return err + } + if i != 0 { + return fmt.Errorf("can't replace a newer value with an older value") + } + } + + rec := record.MakePutRecord(key, value) + rec.TimeReceived = u.FormatRFC3339(time.Now()) + err = dht.putLocal(ctx, key, rec) + if err != nil { + return err + } + + peers, err := dht.GetClosestPeers(ctx, key) + if err != nil { + return err + } + + wg := sync.WaitGroup{} + for _, p := range peers { + wg.Add(1) + go func(p peer.ID) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer wg.Done() + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.Value, + ID: p, + }) + + err := dht.protoMessenger.PutValue(ctx, p, rec) + if err != nil { + logger.Debugf("failed putting value to peer: %s", err) + } + }(p) + } + wg.Wait() + + return nil +} + +// recvdVal stores a value and the peer from which we got the value. +type recvdVal struct { + Val []byte + From peer.ID +} + +// GetValue searches for the value corresponding to given Key. +func (dht *IpfsDHT) GetValue(ctx context.Context, key string, opts ...routing.Option) (result []byte, err error) { + ctx, end := tracer.GetValue(dhtName, ctx, key, opts...) + defer func() { end(result, err) }() + + if !dht.enableValues { + return nil, routing.ErrNotSupported + } + + // apply defaultQuorum if relevant + var cfg routing.Options + if err := cfg.Apply(opts...); err != nil { + return nil, err + } + opts = append(opts, Quorum(internalConfig.GetQuorum(&cfg))) + + responses, err := dht.SearchValue(ctx, key, opts...) + if err != nil { + return nil, err + } + var best []byte + + for r := range responses { + best = r + } + + if ctx.Err() != nil { + return best, ctx.Err() + } + + if best == nil { + return nil, routing.ErrNotFound + } + logger.Debugf("GetValue %v %x", internal.LoggableRecordKeyString(key), best) + return best, nil +} + +// SearchValue searches for the value corresponding to given Key and streams the results. +func (dht *IpfsDHT) SearchValue(ctx context.Context, key string, opts ...routing.Option) (ch <-chan []byte, err error) { + ctx, end := tracer.SearchValue(dhtName, ctx, key, opts...) + defer func() { ch, err = end(ch, err) }() + + if !dht.enableValues { + return nil, routing.ErrNotSupported + } + + var cfg routing.Options + if err := cfg.Apply(opts...); err != nil { + return nil, err + } + + responsesNeeded := 0 + if !cfg.Offline { + responsesNeeded = internalConfig.GetQuorum(&cfg) + } + + stopCh := make(chan struct{}) + valCh, lookupRes := dht.getValues(ctx, key, stopCh) + + out := make(chan []byte) + go func() { + defer close(out) + best, peersWithBest, aborted := dht.searchValueQuorum(ctx, key, valCh, stopCh, out, responsesNeeded) + if best == nil || aborted { + return + } + + updatePeers := make([]peer.ID, 0, dht.bucketSize) + select { + case l := <-lookupRes: + if l == nil { + return + } + + for _, p := range l.peers { + if _, ok := peersWithBest[p]; !ok { + updatePeers = append(updatePeers, p) + } + } + case <-ctx.Done(): + return + } + + dht.updatePeerValues(dht.Context(), key, best, updatePeers) + }() + + return out, nil +} + +func (dht *IpfsDHT) searchValueQuorum(ctx context.Context, key string, valCh <-chan recvdVal, stopCh chan struct{}, + out chan<- []byte, nvals int) ([]byte, map[peer.ID]struct{}, bool) { + numResponses := 0 + return dht.processValues(ctx, key, valCh, + func(ctx context.Context, v recvdVal, better bool) bool { + numResponses++ + if better { + select { + case out <- v.Val: + case <-ctx.Done(): + return false + } + } + + if nvals > 0 && numResponses > nvals { + close(stopCh) + return true + } + return false + }) +} + +func (dht *IpfsDHT) processValues(ctx context.Context, key string, vals <-chan recvdVal, + newVal func(ctx context.Context, v recvdVal, better bool) bool) (best []byte, peersWithBest map[peer.ID]struct{}, aborted bool) { +loop: + for { + if aborted { + return + } + + select { + case v, ok := <-vals: + if !ok { + break loop + } + + // Select best value + if best != nil { + if bytes.Equal(best, v.Val) { + peersWithBest[v.From] = struct{}{} + aborted = newVal(ctx, v, false) + continue + } + sel, err := dht.Validator.Select(key, [][]byte{best, v.Val}) + if err != nil { + logger.Warnw("failed to select best value", "key", internal.LoggableRecordKeyString(key), "error", err) + continue + } + if sel != 1 { + aborted = newVal(ctx, v, false) + continue + } + } + peersWithBest = make(map[peer.ID]struct{}) + peersWithBest[v.From] = struct{}{} + best = v.Val + aborted = newVal(ctx, v, true) + case <-ctx.Done(): + return + } + } + + return +} + +func (dht *IpfsDHT) updatePeerValues(ctx context.Context, key string, val []byte, peers []peer.ID) { + fixupRec := record.MakePutRecord(key, val) + for _, p := range peers { + go func(p peer.ID) { + // TODO: Is this possible? + if p == dht.self { + err := dht.putLocal(ctx, key, fixupRec) + if err != nil { + logger.Error("Error correcting local dht entry:", err) + } + return + } + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + err := dht.protoMessenger.PutValue(ctx, p, fixupRec) + if err != nil { + logger.Debug("Error correcting DHT entry: ", err) + } + }(p) + } +} + +func (dht *IpfsDHT) getValues(ctx context.Context, key string, stopQuery chan struct{}) (<-chan recvdVal, <-chan *lookupWithFollowupResult) { + valCh := make(chan recvdVal, 1) + lookupResCh := make(chan *lookupWithFollowupResult, 1) + + logger.Debugw("finding value", "key", internal.LoggableRecordKeyString(key)) + + if rec, err := dht.getLocal(ctx, key); rec != nil && err == nil { + select { + case valCh <- recvdVal{ + Val: rec.GetValue(), + From: dht.self, + }: + case <-ctx.Done(): + } + } + + go func() { + defer close(valCh) + defer close(lookupResCh) + lookupRes, err := dht.runLookupWithFollowup(ctx, key, + func(ctx context.Context, p peer.ID) ([]*peer.AddrInfo, error) { + // For DHT query command + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.SendingQuery, + ID: p, + }) + + rec, peers, err := dht.protoMessenger.GetValue(ctx, p, key) + if err != nil { + logger.Debugf("error getting closer peers: %s", err) + return nil, err + } + + // For DHT query command + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.PeerResponse, + ID: p, + Responses: peers, + }) + + if rec == nil { + return peers, nil + } + + val := rec.GetValue() + if val == nil { + logger.Debug("received a nil record value") + return peers, nil + } + if err := dht.Validator.Validate(key, val); err != nil { + // make sure record is valid + logger.Debugw("received invalid record (discarded)", "error", err) + return peers, nil + } + + // the record is present and valid, send it out for processing + select { + case valCh <- recvdVal{ + Val: val, + From: p, + }: + case <-ctx.Done(): + return nil, ctx.Err() + } + + return peers, nil + }, + func(*qpeerset.QueryPeerset) bool { + select { + case <-stopQuery: + return true + default: + return false + } + }, + ) + + if err != nil { + return + } + lookupResCh <- lookupRes + + if ctx.Err() == nil { + dht.refreshRTIfNoShortcut(kb.ConvertKey(key), lookupRes) + } + }() + + return valCh, lookupResCh +} + +func (dht *IpfsDHT) refreshRTIfNoShortcut(key kb.ID, lookupRes *lookupWithFollowupResult) { + if lookupRes.completed { + // refresh the cpl for this key as the query was successful + dht.routingTable.ResetCplRefreshedAtForID(key, time.Now()) + } +} + +// Provider abstraction for indirect stores. +// Some DHTs store values directly, while an indirect store stores pointers to +// locations of the value, similarly to Coral and Mainline DHT. + +// Provide makes this node announce that it can provide a value for the given key +func (dht *IpfsDHT) Provide(ctx context.Context, key cid.Cid, brdcst bool) (err error) { + ctx, end := tracer.Provide(dhtName, ctx, key, brdcst) + defer func() { end(err) }() + + if !dht.enableProviders { + return routing.ErrNotSupported + } else if !key.Defined() { + return fmt.Errorf("invalid cid: undefined") + } + keyMH := key.Hash() + logger.Debugw("providing", "cid", key, "mh", internal.LoggableProviderRecordBytes(keyMH)) + + // add self locally + dht.providerStore.AddProvider(ctx, keyMH, peer.AddrInfo{ID: dht.self}) + if !brdcst { + return nil + } + + if dht.enableOptProv { + err := dht.optimisticProvide(ctx, keyMH) + if errors.Is(err, netsize.ErrNotEnoughData) { + logger.Debugln("not enough data for optimistic provide taking classic approach") + return dht.classicProvide(ctx, keyMH) + } + return err + } + return dht.classicProvide(ctx, keyMH) +} + +func (dht *IpfsDHT) classicProvide(ctx context.Context, keyMH multihash.Multihash) error { + closerCtx := ctx + if deadline, ok := ctx.Deadline(); ok { + now := time.Now() + timeout := deadline.Sub(now) + + if timeout < 0 { + // timed out + return context.DeadlineExceeded + } else if timeout < 10*time.Second { + // Reserve 10% for the final put. + deadline = deadline.Add(-timeout / 10) + } else { + // Otherwise, reserve a second (we'll already be + // connected so this should be fast). + deadline = deadline.Add(-time.Second) + } + var cancel context.CancelFunc + closerCtx, cancel = context.WithDeadline(ctx, deadline) + defer cancel() + } + + var exceededDeadline bool + peers, err := dht.GetClosestPeers(closerCtx, string(keyMH)) + switch err { + case context.DeadlineExceeded: + // If the _inner_ deadline has been exceeded but the _outer_ + // context is still fine, provide the value to the closest peers + // we managed to find, even if they're not the _actual_ closest peers. + if ctx.Err() != nil { + return ctx.Err() + } + exceededDeadline = true + case nil: + default: + return err + } + + wg := sync.WaitGroup{} + for _, p := range peers { + wg.Add(1) + go func(p peer.ID) { + defer wg.Done() + logger.Debugf("putProvider(%s, %s)", internal.LoggableProviderRecordBytes(keyMH), p) + err := dht.protoMessenger.PutProviderAddrs(ctx, p, keyMH, peer.AddrInfo{ + ID: dht.self, + Addrs: dht.filterAddrs(dht.host.Addrs()), + }) + if err != nil { + logger.Debug(err) + } + }(p) + } + wg.Wait() + if exceededDeadline { + return context.DeadlineExceeded + } + return ctx.Err() +} + +// FindProviders searches until the context expires. +func (dht *IpfsDHT) FindProviders(ctx context.Context, c cid.Cid) ([]peer.AddrInfo, error) { + if !dht.enableProviders { + return nil, routing.ErrNotSupported + } else if !c.Defined() { + return nil, fmt.Errorf("invalid cid: undefined") + } + + var providers []peer.AddrInfo + for p := range dht.FindProvidersAsync(ctx, c, dht.bucketSize) { + providers = append(providers, p) + } + return providers, nil +} + +// FindProvidersAsync is the same thing as FindProviders, but returns a channel. +// Peers will be returned on the channel as soon as they are found, even before +// the search query completes. If count is zero then the query will run until it +// completes. Note: not reading from the returned channel may block the query +// from progressing. +func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) (ch <-chan peer.AddrInfo) { + ctx, end := tracer.FindProvidersAsync(dhtName, ctx, key, count) + defer func() { ch = end(ch, nil) }() + + if !dht.enableProviders || !key.Defined() { + peerOut := make(chan peer.AddrInfo) + close(peerOut) + return peerOut + } + + peerOut := make(chan peer.AddrInfo) + + keyMH := key.Hash() + + logger.Debugw("finding providers", "cid", key, "mh", internal.LoggableProviderRecordBytes(keyMH)) + go dht.findProvidersAsyncRoutine(ctx, keyMH, count, peerOut) + return peerOut +} + +func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key multihash.Multihash, count int, peerOut chan peer.AddrInfo) { + // use a span here because unlike tracer.FindProvidersAsync we know who told us about it and that intresting to log. + ctx, span := internal.StartSpan(ctx, "IpfsDHT.FindProvidersAsyncRoutine") + defer span.End() + + defer close(peerOut) + + findAll := count == 0 + + ps := make(map[peer.ID]peer.AddrInfo) + psLock := &sync.Mutex{} + psTryAdd := func(p peer.AddrInfo) bool { + psLock.Lock() + defer psLock.Unlock() + pi, ok := ps[p.ID] + if (!ok || ((len(pi.Addrs) == 0) && len(p.Addrs) > 0)) && (len(ps) < count || findAll) { + ps[p.ID] = p + return true + } + return false + } + psSize := func() int { + psLock.Lock() + defer psLock.Unlock() + return len(ps) + } + + provs, err := dht.providerStore.GetProviders(ctx, key) + if err != nil { + return + } + for _, p := range provs { + // NOTE: Assuming that this list of peers is unique + if psTryAdd(p) { + select { + case peerOut <- p: + span.AddEvent("found provider", trace.WithAttributes( + attribute.Stringer("peer", p.ID), + attribute.Stringer("from", dht.self), + )) + case <-ctx.Done(): + return + } + } + + // If we have enough peers locally, don't bother with remote RPC + // TODO: is this a DOS vector? + if !findAll && len(ps) >= count { + return + } + } + + lookupRes, err := dht.runLookupWithFollowup(ctx, string(key), + func(ctx context.Context, p peer.ID) ([]*peer.AddrInfo, error) { + + // For DHT query command + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.SendingQuery, + ID: p, + }) + + provs, closest, err := dht.protoMessenger.GetProviders(ctx, p, key) + if err != nil { + return nil, err + } + + logger.Debugf("%d provider entries", len(provs)) + + // Add unique providers from request, up to 'count' + for _, prov := range provs { + dht.maybeAddAddrs(prov.ID, prov.Addrs, peerstore.TempAddrTTL) + logger.Debugf("got provider: %s", prov) + if psTryAdd(*prov) { + logger.Debugf("using provider: %s", prov) + select { + case peerOut <- *prov: + span.AddEvent("found provider", trace.WithAttributes( + attribute.Stringer("peer", prov.ID), + attribute.Stringer("from", p), + )) + case <-ctx.Done(): + logger.Debug("context timed out sending more providers") + return nil, ctx.Err() + } + } + if !findAll && psSize() >= count { + logger.Debugf("got enough providers (%d/%d)", psSize(), count) + return nil, nil + } + } + + // Give closer peers back to the query to be queried + logger.Debugf("got closer peers: %d %s", len(closest), closest) + + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.PeerResponse, + ID: p, + Responses: closest, + }) + + return closest, nil + }, + func(*qpeerset.QueryPeerset) bool { + return !findAll && psSize() >= count + }, + ) + + if err == nil && ctx.Err() == nil { + dht.refreshRTIfNoShortcut(kb.ConvertKey(string(key)), lookupRes) + } +} + +// FindPeer searches for a peer with given ID. +func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (pi peer.AddrInfo, err error) { + ctx, end := tracer.FindPeer(dhtName, ctx, id) + defer func() { end(pi, err) }() + + if err := id.Validate(); err != nil { + return peer.AddrInfo{}, err + } + + logger.Debugw("finding peer", "peer", id) + + // Check if were already connected to them + if pi := dht.FindLocal(ctx, id); pi.ID != "" { + return pi, nil + } + + lookupRes, err := dht.runLookupWithFollowup(ctx, string(id), + func(ctx context.Context, p peer.ID) ([]*peer.AddrInfo, error) { + // For DHT query command + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.SendingQuery, + ID: p, + }) + + peers, err := dht.protoMessenger.GetClosestPeers(ctx, p, id) + if err != nil { + logger.Debugf("error getting closer peers: %s", err) + return nil, err + } + + // For DHT query command + routing.PublishQueryEvent(ctx, &routing.QueryEvent{ + Type: routing.PeerResponse, + ID: p, + Responses: peers, + }) + + return peers, err + }, + func(*qpeerset.QueryPeerset) bool { + return dht.host.Network().Connectedness(id) == network.Connected + }, + ) + + if err != nil { + return peer.AddrInfo{}, err + } + + dialedPeerDuringQuery := false + for i, p := range lookupRes.peers { + if p == id { + // Note: we consider PeerUnreachable to be a valid state because the peer may not support the DHT protocol + // and therefore the peer would fail the query. The fact that a peer that is returned can be a non-DHT + // server peer and is not identified as such is a bug. + dialedPeerDuringQuery = (lookupRes.state[i] == qpeerset.PeerQueried || lookupRes.state[i] == qpeerset.PeerUnreachable || lookupRes.state[i] == qpeerset.PeerWaiting) + break + } + } + + // Return peer information if we tried to dial the peer during the query or we are (or recently were) connected + // to the peer. + connectedness := dht.host.Network().Connectedness(id) + if dialedPeerDuringQuery || connectedness == network.Connected || connectedness == network.CanConnect { + return dht.peerstore.PeerInfo(id), nil + } + + return peer.AddrInfo{}, routing.ErrNotFound +} diff --git a/go-libp2p-kad-dht/routing_options.go b/go-libp2p-kad-dht/routing_options.go new file mode 100644 index 0000000..bccc6d6 --- /dev/null +++ b/go-libp2p-kad-dht/routing_options.go @@ -0,0 +1,21 @@ +package dht + +import ( + internalConfig "github.com/libp2p/go-libp2p-kad-dht/internal/config" + "github.com/libp2p/go-libp2p/core/routing" +) + +// Quorum is a DHT option that tells the DHT how many peers it needs to get +// values from before returning the best one. Zero means the DHT query +// should complete instead of returning early. +// +// Default: 0 +func Quorum(n int) routing.Option { + return func(opts *routing.Options) error { + if opts.Other == nil { + opts.Other = make(map[interface{}]interface{}, 1) + } + opts.Other[internalConfig.QuorumOptionKey{}] = n + return nil + } +} diff --git a/go-libp2p-kad-dht/rt_diversity_filter.go b/go-libp2p-kad-dht/rt_diversity_filter.go new file mode 100644 index 0000000..45aedd2 --- /dev/null +++ b/go-libp2p-kad-dht/rt_diversity_filter.go @@ -0,0 +1,103 @@ +package dht + +import ( + "sync" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/libp2p/go-libp2p-kbucket/peerdiversity" + + ma "github.com/multiformats/go-multiaddr" +) + +var _ peerdiversity.PeerIPGroupFilter = (*rtPeerIPGroupFilter)(nil) + +type rtPeerIPGroupFilter struct { + mu sync.RWMutex + h host.Host + + maxPerCpl int + maxForTable int + + cplIpGroupCount map[int]map[peerdiversity.PeerIPGroupKey]int + tableIpGroupCount map[peerdiversity.PeerIPGroupKey]int +} + +// NewRTPeerDiversityFilter constructs the `PeerIPGroupFilter` that will be used to configure +// the diversity filter for the Routing Table. +// Please see the docs for `peerdiversity.PeerIPGroupFilter` AND `peerdiversity.Filter` for more details. +func NewRTPeerDiversityFilter(h host.Host, maxPerCpl, maxForTable int) *rtPeerIPGroupFilter { + return &rtPeerIPGroupFilter{ + h: h, + + maxPerCpl: maxPerCpl, + maxForTable: maxForTable, + + cplIpGroupCount: make(map[int]map[peerdiversity.PeerIPGroupKey]int), + tableIpGroupCount: make(map[peerdiversity.PeerIPGroupKey]int), + } + +} + +func (r *rtPeerIPGroupFilter) Allow(g peerdiversity.PeerGroupInfo) bool { + r.mu.RLock() + defer r.mu.RUnlock() + + key := g.IPGroupKey + cpl := g.Cpl + + if r.tableIpGroupCount[key] >= r.maxForTable { + + return false + } + + c, ok := r.cplIpGroupCount[cpl] + allow := !ok || c[key] < r.maxPerCpl + return allow +} + +func (r *rtPeerIPGroupFilter) Increment(g peerdiversity.PeerGroupInfo) { + r.mu.Lock() + defer r.mu.Unlock() + + key := g.IPGroupKey + cpl := g.Cpl + + r.tableIpGroupCount[key] = r.tableIpGroupCount[key] + 1 + if _, ok := r.cplIpGroupCount[cpl]; !ok { + r.cplIpGroupCount[cpl] = make(map[peerdiversity.PeerIPGroupKey]int) + } + + r.cplIpGroupCount[cpl][key] = r.cplIpGroupCount[cpl][key] + 1 +} + +func (r *rtPeerIPGroupFilter) Decrement(g peerdiversity.PeerGroupInfo) { + r.mu.Lock() + defer r.mu.Unlock() + + key := g.IPGroupKey + cpl := g.Cpl + + r.tableIpGroupCount[key] = r.tableIpGroupCount[key] - 1 + if r.tableIpGroupCount[key] == 0 { + delete(r.tableIpGroupCount, key) + } + + r.cplIpGroupCount[cpl][key] = r.cplIpGroupCount[cpl][key] - 1 + if r.cplIpGroupCount[cpl][key] == 0 { + delete(r.cplIpGroupCount[cpl], key) + } + if len(r.cplIpGroupCount[cpl]) == 0 { + delete(r.cplIpGroupCount, cpl) + } +} + +func (r *rtPeerIPGroupFilter) PeerAddresses(p peer.ID) []ma.Multiaddr { + cs := r.h.Network().ConnsToPeer(p) + addr := make([]ma.Multiaddr, 0, len(cs)) + for _, c := range cs { + addr = append(addr, c.RemoteMultiaddr()) + } + return addr +} diff --git a/go-libp2p-kad-dht/rt_diversity_filter_test.go b/go-libp2p-kad-dht/rt_diversity_filter_test.go new file mode 100644 index 0000000..9faad63 --- /dev/null +++ b/go-libp2p-kad-dht/rt_diversity_filter_test.go @@ -0,0 +1,155 @@ +package dht + +import ( + "context" + "testing" + "time" + + kb "github.com/libp2p/go-libp2p-kbucket" + "github.com/libp2p/go-libp2p-kbucket/peerdiversity" + bhost "github.com/libp2p/go-libp2p/p2p/host/basic" + swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" + + "github.com/stretchr/testify/require" +) + +func TestRTPeerDiversityFilter(t *testing.T) { + h, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + h.Start() + defer h.Close() + r := NewRTPeerDiversityFilter(h, 2, 3) + + // table should only have 2 for each prefix per cpl + key := "key" + g := peerdiversity.PeerGroupInfo{Cpl: 1, IPGroupKey: peerdiversity.PeerIPGroupKey(key)} + require.True(t, r.Allow(g)) + r.Increment(g) + require.True(t, r.Allow(g)) + r.Increment(g) + require.False(t, r.Allow(g)) + + // table should ONLY have 3 for a Prefix + key = "random" + g2 := peerdiversity.PeerGroupInfo{Cpl: 2, IPGroupKey: peerdiversity.PeerIPGroupKey(key)} + require.True(t, r.Allow(g2)) + r.Increment(g2) + + g2.Cpl = 3 + require.True(t, r.Allow(g2)) + r.Increment(g2) + + g2.Cpl = 4 + require.True(t, r.Allow(g2)) + r.Increment(g2) + + require.False(t, r.Allow(g2)) + + // remove a peer with a prefix and it works + r.Decrement(g2) + require.True(t, r.Allow(g2)) + r.Increment(g2) + + // and then it dosen't work again + require.False(t, r.Allow(g2)) +} + +func TestRoutingTableEndToEndMaxPerCpl(t *testing.T) { + ctx := context.Background() + h, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + h.Start() + defer h.Close() + r := NewRTPeerDiversityFilter(h, 1, 2) + + d, err := New( + ctx, + h, + testPrefix, + NamespacedValidator("v", blankValidator{}), + Mode(ModeServer), + DisableAutoRefresh(), + RoutingTablePeerDiversityFilter(r), + ) + require.NoError(t, err) + defer d.Close() + + var d2 *IpfsDHT + var d3 *IpfsDHT + + for { + d2 = setupDHT(ctx, t, false) + if kb.CommonPrefixLen(d.selfKey, kb.ConvertPeerID(d2.self)) == 1 { + break + } + } + + for { + d3 = setupDHT(ctx, t, false) + if kb.CommonPrefixLen(d.selfKey, kb.ConvertPeerID(d3.self)) == 1 { + break + } + } + + // d2 will be allowed in the Routing table but + // d3 will not be allowed. + connectNoSync(t, ctx, d, d2) + require.Eventually(t, func() bool { + return d.routingTable.Find(d2.self) != "" + }, 1*time.Second, 100*time.Millisecond) + + connectNoSync(t, ctx, d, d3) + time.Sleep(1 * time.Second) + require.Len(t, d.routingTable.ListPeers(), 1) + require.True(t, d.routingTable.Find(d3.self) == "") + + // it works after removing d2 + d.routingTable.RemovePeer(d2.self) + b, err := d.routingTable.TryAddPeer(d3.self, true, false) + require.NoError(t, err) + require.True(t, b) + require.Len(t, d.routingTable.ListPeers(), 1) + require.True(t, d.routingTable.Find(d3.self) != "") +} + +func TestRoutingTableEndToEndMaxPerTable(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts)) + require.NoError(t, err) + h.Start() + defer h.Close() + r := NewRTPeerDiversityFilter(h, 100, 3) + + d, err := New( + ctx, + h, + testPrefix, + NamespacedValidator("v", blankValidator{}), + Mode(ModeServer), + DisableAutoRefresh(), + RoutingTablePeerDiversityFilter(r), + ) + require.NoError(t, err) + defer d.Close() + + // only 3 peers per prefix for the table. + d2 := setupDHT(ctx, t, false, DisableAutoRefresh()) + connect(t, ctx, d, d2) + waitForWellFormedTables(t, []*IpfsDHT{d}, 1, 1, 1*time.Second) + + d3 := setupDHT(ctx, t, false, DisableAutoRefresh()) + connect(t, ctx, d, d3) + waitForWellFormedTables(t, []*IpfsDHT{d}, 2, 2, 1*time.Second) + + d4 := setupDHT(ctx, t, false, DisableAutoRefresh()) + connect(t, ctx, d, d4) + waitForWellFormedTables(t, []*IpfsDHT{d}, 3, 3, 1*time.Second) + + d5 := setupDHT(ctx, t, false, DisableAutoRefresh()) + connectNoSync(t, ctx, d, d5) + time.Sleep(1 * time.Second) + require.Len(t, d.routingTable.ListPeers(), 3) + require.True(t, d.routingTable.Find(d5.self) == "") +} diff --git a/go-libp2p-kad-dht/rtrefresh/rt_refresh_manager.go b/go-libp2p-kad-dht/rtrefresh/rt_refresh_manager.go new file mode 100644 index 0000000..c81e9e6 --- /dev/null +++ b/go-libp2p-kad-dht/rtrefresh/rt_refresh_manager.go @@ -0,0 +1,372 @@ +package rtrefresh + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-multierror" + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p-kad-dht/internal" + kbucket "github.com/libp2p/go-libp2p-kbucket" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-base32" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +var logger = logging.Logger("dht/RtRefreshManager") + +const ( + peerPingTimeout = 10 * time.Second +) + +type triggerRefreshReq struct { + respCh chan error + forceCplRefresh bool +} + +type RtRefreshManager struct { + ctx context.Context + cancel context.CancelFunc + refcount sync.WaitGroup + + // peerId of this DHT peer i.e. self peerId. + h host.Host + dhtPeerId peer.ID + rt *kbucket.RoutingTable + + enableAutoRefresh bool // should run periodic refreshes ? + refreshKeyGenFnc func(cpl uint) (string, error) // generate the key for the query to refresh this cpl + refreshQueryFnc func(ctx context.Context, key string) error // query to run for a refresh. + refreshPingFnc func(ctx context.Context, p peer.ID) error // request to check liveness of remote peer + refreshQueryTimeout time.Duration // timeout for one refresh query + + // interval between two periodic refreshes. + // also, a cpl wont be refreshed if the time since it was last refreshed + // is below the interval..unless a "forced" refresh is done. + refreshInterval time.Duration + successfulOutboundQueryGracePeriod time.Duration + + triggerRefresh chan *triggerRefreshReq // channel to write refresh requests to. + + refreshDoneCh chan struct{} // write to this channel after every refresh +} + +func NewRtRefreshManager(h host.Host, rt *kbucket.RoutingTable, autoRefresh bool, + refreshKeyGenFnc func(cpl uint) (string, error), + refreshQueryFnc func(ctx context.Context, key string) error, + refreshPingFnc func(ctx context.Context, p peer.ID) error, + refreshQueryTimeout time.Duration, + refreshInterval time.Duration, + successfulOutboundQueryGracePeriod time.Duration, + refreshDoneCh chan struct{}) (*RtRefreshManager, error) { + + ctx, cancel := context.WithCancel(context.Background()) + return &RtRefreshManager{ + ctx: ctx, + cancel: cancel, + h: h, + dhtPeerId: h.ID(), + rt: rt, + + enableAutoRefresh: autoRefresh, + refreshKeyGenFnc: refreshKeyGenFnc, + refreshQueryFnc: refreshQueryFnc, + refreshPingFnc: refreshPingFnc, + + refreshQueryTimeout: refreshQueryTimeout, + refreshInterval: refreshInterval, + successfulOutboundQueryGracePeriod: successfulOutboundQueryGracePeriod, + + triggerRefresh: make(chan *triggerRefreshReq), + refreshDoneCh: refreshDoneCh, + }, nil +} + +func (r *RtRefreshManager) Start() { + r.refcount.Add(1) + go r.loop() +} + +func (r *RtRefreshManager) Close() error { + r.cancel() + r.refcount.Wait() + return nil +} + +// RefreshRoutingTable requests the refresh manager to refresh the Routing Table. +// If the force parameter is set to true true, all buckets will be refreshed irrespective of when they were last refreshed. +// +// The returned channel will block until the refresh finishes, then yield the +// error and close. The channel is buffered and safe to ignore. +func (r *RtRefreshManager) Refresh(force bool) <-chan error { + resp := make(chan error, 1) + r.refcount.Add(1) + go func() { + defer r.refcount.Done() + select { + case r.triggerRefresh <- &triggerRefreshReq{respCh: resp, forceCplRefresh: force}: + case <-r.ctx.Done(): + resp <- r.ctx.Err() + close(resp) + } + }() + + return resp +} + +// RefreshNoWait requests the refresh manager to refresh the Routing Table. +// However, it moves on without blocking if it's request can't get through. +func (r *RtRefreshManager) RefreshNoWait() { + select { + case r.triggerRefresh <- &triggerRefreshReq{}: + default: + } +} + +// pingAndEvictPeers pings Routing Table peers that haven't been heard of/from +// in the interval they should have been and evict them if they don't reply. +func (r *RtRefreshManager) pingAndEvictPeers(ctx context.Context) { + ctx, span := internal.StartSpan(ctx, "RefreshManager.PingAndEvictPeers") + defer span.End() + + var peersChecked int + var alive int64 + var wg sync.WaitGroup + peers := r.rt.GetPeerInfos() + for _, ps := range peers { + if time.Since(ps.LastSuccessfulOutboundQueryAt) <= r.successfulOutboundQueryGracePeriod { + continue + } + + peersChecked++ + wg.Add(1) + go func(ps kbucket.PeerInfo) { + defer wg.Done() + + livelinessCtx, cancel := context.WithTimeout(ctx, peerPingTimeout) + defer cancel() + peerIdStr := ps.Id.String() + livelinessCtx, span := internal.StartSpan(livelinessCtx, "RefreshManager.PingAndEvictPeers.worker", trace.WithAttributes(attribute.String("peer", peerIdStr))) + defer span.End() + + if err := r.h.Connect(livelinessCtx, peer.AddrInfo{ID: ps.Id}); err != nil { + logger.Debugw("evicting peer after failed connection", "peer", peerIdStr, "error", err) + span.RecordError(err) + r.rt.RemovePeer(ps.Id) + return + } + + if err := r.refreshPingFnc(livelinessCtx, ps.Id); err != nil { + logger.Debugw("evicting peer after failed ping", "peer", peerIdStr, "error", err) + span.RecordError(err) + r.rt.RemovePeer(ps.Id) + return + } + + atomic.AddInt64(&alive, 1) + }(ps) + } + wg.Wait() + + span.SetAttributes(attribute.Int("NumPeersChecked", peersChecked), attribute.Int("NumPeersSkipped", len(peers)-peersChecked), attribute.Int64("NumPeersAlive", alive)) +} + +func (r *RtRefreshManager) loop() { + defer r.refcount.Done() + + var refreshTickrCh <-chan time.Time + if r.enableAutoRefresh { + err := r.doRefresh(r.ctx, true) + if err != nil { + logger.Warn("failed when refreshing routing table", err) + } + t := time.NewTicker(r.refreshInterval) + defer t.Stop() + refreshTickrCh = t.C + } + + for { + var waiting []chan<- error + var forced bool + select { + case <-refreshTickrCh: + case triggerRefreshReq := <-r.triggerRefresh: + if triggerRefreshReq.respCh != nil { + waiting = append(waiting, triggerRefreshReq.respCh) + } + forced = forced || triggerRefreshReq.forceCplRefresh + case <-r.ctx.Done(): + return + } + + // Batch multiple refresh requests if they're all waiting at the same time. + OuterLoop: + for { + select { + case triggerRefreshReq := <-r.triggerRefresh: + if triggerRefreshReq.respCh != nil { + waiting = append(waiting, triggerRefreshReq.respCh) + } + forced = forced || triggerRefreshReq.forceCplRefresh + default: + break OuterLoop + } + } + + ctx, span := internal.StartSpan(r.ctx, "RefreshManager.Refresh") + + r.pingAndEvictPeers(ctx) + + // Query for self and refresh the required buckets + err := r.doRefresh(ctx, forced) + for _, w := range waiting { + w <- err + close(w) + } + if err != nil { + logger.Warnw("failed when refreshing routing table", "error", err) + } + + span.End() + } +} + +func (r *RtRefreshManager) doRefresh(ctx context.Context, forceRefresh bool) error { + ctx, span := internal.StartSpan(ctx, "RefreshManager.doRefresh") + defer span.End() + + var merr error + + if err := r.queryForSelf(ctx); err != nil { + merr = multierror.Append(merr, err) + } + + refreshCpls := r.rt.GetTrackedCplsForRefresh() + + rfnc := func(cpl uint) (err error) { + if forceRefresh { + err = r.refreshCpl(ctx, cpl) + } else { + err = r.refreshCplIfEligible(ctx, cpl, refreshCpls[cpl]) + } + return + } + + for c := range refreshCpls { + cpl := uint(c) + if err := rfnc(cpl); err != nil { + merr = multierror.Append(merr, err) + } else { + // If we see a gap at a Cpl in the Routing table, we ONLY refresh up until the maximum cpl we + // have in the Routing Table OR (2 * (Cpl+ 1) with the gap), whichever is smaller. + // This is to prevent refreshes for Cpls that have no peers in the network but happen to be before a very high max Cpl + // for which we do have peers in the network. + // The number of 2 * (Cpl + 1) can be proved and a proof would have been written here if the programmer + // had paid more attention in the Math classes at university. + // So, please be patient and a doc explaining it will be published soon. + if r.rt.NPeersForCpl(cpl) == 0 { + lastCpl := min(2*(c+1), len(refreshCpls)-1) + for i := c + 1; i < lastCpl+1; i++ { + if err := rfnc(uint(i)); err != nil { + merr = multierror.Append(merr, err) + } + } + return merr + } + } + } + + select { + case r.refreshDoneCh <- struct{}{}: + case <-ctx.Done(): + return ctx.Err() + } + + return merr +} + +func min(a int, b int) int { + if a <= b { + return a + } + + return b +} + +func (r *RtRefreshManager) refreshCplIfEligible(ctx context.Context, cpl uint, lastRefreshedAt time.Time) error { + if time.Since(lastRefreshedAt) <= r.refreshInterval { + logger.Debugf("not running refresh for cpl %d as time since last refresh not above interval", cpl) + return nil + } + + return r.refreshCpl(ctx, cpl) +} + +func (r *RtRefreshManager) refreshCpl(ctx context.Context, cpl uint) error { + ctx, span := internal.StartSpan(ctx, "RefreshManager.refreshCpl", trace.WithAttributes(attribute.Int("cpl", int(cpl)))) + defer span.End() + + // gen a key for the query to refresh the cpl + key, err := r.refreshKeyGenFnc(cpl) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + return fmt.Errorf("failed to generated query key for cpl=%d, err=%s", cpl, err) + } + + logger.Infof("starting refreshing cpl %d with key %s (routing table size was %d)", + cpl, loggableRawKeyString(key), r.rt.Size()) + + if err := r.runRefreshDHTQuery(ctx, key); err != nil { + span.SetStatus(codes.Error, err.Error()) + return fmt.Errorf("failed to refresh cpl=%d, err=%s", cpl, err) + } + + sz := r.rt.Size() + logger.Infof("finished refreshing cpl %d, routing table size is now %d", cpl, sz) + span.SetAttributes(attribute.Int("NewSize", sz)) + return nil +} + +func (r *RtRefreshManager) queryForSelf(ctx context.Context) error { + ctx, span := internal.StartSpan(ctx, "RefreshManager.queryForSelf") + defer span.End() + + if err := r.runRefreshDHTQuery(ctx, string(r.dhtPeerId)); err != nil { + span.SetStatus(codes.Error, err.Error()) + return fmt.Errorf("failed to query for self, err=%s", err) + } + return nil +} + +func (r *RtRefreshManager) runRefreshDHTQuery(ctx context.Context, key string) error { + queryCtx, cancel := context.WithTimeout(ctx, r.refreshQueryTimeout) + defer cancel() + + err := r.refreshQueryFnc(queryCtx, key) + + if err == nil || (err == context.DeadlineExceeded && queryCtx.Err() == context.DeadlineExceeded) { + return nil + } + + return err +} + +type loggableRawKeyString string + +func (lk loggableRawKeyString) String() string { + k := string(lk) + + if len(k) == 0 { + return k + } + + encStr := base32.RawStdEncoding.EncodeToString([]byte(k)) + + return encStr +} diff --git a/go-libp2p-kad-dht/rtrefresh/rt_refresh_manager_test.go b/go-libp2p-kad-dht/rtrefresh/rt_refresh_manager_test.go new file mode 100644 index 0000000..994e0d1 --- /dev/null +++ b/go-libp2p-kad-dht/rtrefresh/rt_refresh_manager_test.go @@ -0,0 +1,102 @@ +package rtrefresh + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/test" + + kb "github.com/libp2p/go-libp2p-kbucket" + pstore "github.com/libp2p/go-libp2p/p2p/host/peerstore" + + "github.com/stretchr/testify/require" +) + +func TestSkipRefreshOnGapCpls(t *testing.T) { + t.Skip("This test is flaky, see https://github.com/libp2p/go-libp2p-kad-dht/issues/722.") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + local := test.RandPeerIDFatal(t) + + // adds a peer for a cpl. + // The "ignoreCpl" is the cpl for which we assume we have no peers in the network. + // So. if the query function gets a "key" which is basically tha stringed version of the "ignoreCpl", + // we return without adding any peers for it to the Routing Table. + qFuncWithIgnore := func(rt *kb.RoutingTable, ignoreCpl uint) func(c context.Context, key string) error { + return func(c context.Context, key string) error { + if key == string(local) { + return nil + } + + u, err := strconv.ParseInt(key, 10, 64) + require.NoError(t, err) + + if uint(u) == ignoreCpl { + return nil + } + + p, err := rt.GenRandPeerID(uint(u)) + require.NoError(t, err) + b, err := rt.TryAddPeer(p, true, false) + require.NoError(t, err) + require.True(t, b) + return nil + } + } + + // We use the cpl as the key for the query. So, the cpl -> key transformation function + // basically just converts the uint cpl to a string key using the strconv lib. + kfnc := func(cpl uint) (string, error) { + return strconv.FormatInt(int64(cpl), 10), nil + } + + // when 2*gapcpl < maxCpl + // gap is 2 and max is 10 + rt, err := kb.NewRoutingTable(2, kb.ConvertPeerID(local), time.Hour, pstore.NewMetrics(), 100*time.Hour, nil) + require.NoError(t, err) + r := &RtRefreshManager{ctx: ctx, rt: rt, refreshKeyGenFnc: kfnc, dhtPeerId: local} + icpl := uint(2) + lastCpl := 2 * (icpl + 1) + p, err := rt.GenRandPeerID(10) + require.NoError(t, err) + b, _ := rt.TryAddPeer(p, true, false) + require.True(t, b) + r.refreshQueryFnc = qFuncWithIgnore(rt, icpl) + require.NoError(t, r.doRefresh(ctx, true)) + + for i := uint(0); i < lastCpl+1; i++ { + if i == icpl { + require.Equal(t, 0, rt.NPeersForCpl(i)) + continue + } + require.Equal(t, 1, rt.NPeersForCpl(uint(i))) + } + for i := lastCpl + 1; i < 10; i++ { + require.Equal(t, 0, rt.NPeersForCpl(i)) + } + + // when 2 * (gapcpl + 1) > maxCpl + rt, err = kb.NewRoutingTable(2, kb.ConvertPeerID(local), time.Hour, pstore.NewMetrics(), 100*time.Hour, nil) + require.NoError(t, err) + r = &RtRefreshManager{ctx: ctx, rt: rt, refreshKeyGenFnc: kfnc, dhtPeerId: local} + icpl = uint(6) + p, err = rt.GenRandPeerID(10) + require.NoError(t, err) + b, _ = rt.TryAddPeer(p, true, false) + require.True(t, b) + r.refreshQueryFnc = qFuncWithIgnore(rt, icpl) + require.NoError(t, r.doRefresh(ctx, true)) + + for i := uint(0); i < 10; i++ { + if i == icpl { + require.Equal(t, 0, rt.NPeersForCpl(i)) + continue + } + + require.Equal(t, 1, rt.NPeersForCpl(uint(i))) + } + require.Equal(t, 2, rt.NPeersForCpl(10)) +} diff --git a/go-libp2p-kad-dht/subscriber_notifee.go b/go-libp2p-kad-dht/subscriber_notifee.go new file mode 100644 index 0000000..c1eb693 --- /dev/null +++ b/go-libp2p-kad-dht/subscriber_notifee.go @@ -0,0 +1,141 @@ +package dht + +import ( + "fmt" + + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" +) + +func (dht *IpfsDHT) startNetworkSubscriber() error { + bufSize := eventbus.BufSize(256) + + evts := []interface{}{ + // register for event bus notifications of when peers successfully complete identification in order to update + // the routing table + new(event.EvtPeerIdentificationCompleted), + + // register for event bus protocol ID changes in order to update the routing table + new(event.EvtPeerProtocolsUpdated), + + // register for event bus notifications for when our local address/addresses change so we can + // advertise those to the network + new(event.EvtLocalAddressesUpdated), + + // we want to know when we are disconnecting from other peers. + new(event.EvtPeerConnectednessChanged), + } + + // register for event bus local routability changes in order to trigger switching between client and server modes + // only register for events if the DHT is operating in ModeAuto + if dht.auto == ModeAuto || dht.auto == ModeAutoServer { + evts = append(evts, new(event.EvtLocalReachabilityChanged)) + } + + subs, err := dht.host.EventBus().Subscribe(evts, bufSize) + if err != nil { + return fmt.Errorf("dht could not subscribe to eventbus events: %w", err) + } + + dht.wg.Add(1) + go func() { + defer dht.wg.Done() + defer subs.Close() + + for { + select { + case e, more := <-subs.Out(): + if !more { + return + } + + switch evt := e.(type) { + case event.EvtLocalAddressesUpdated: + // when our address changes, we should proactively tell our closest peers about it so + // we become discoverable quickly. The Identify protocol will push a signed peer record + // with our new address to all peers we are connected to. However, we might not necessarily be connected + // to our closet peers & so in the true spirit of Zen, searching for ourself in the network really is the best way + // to to forge connections with those matter. + if dht.autoRefresh || dht.testAddressUpdateProcessing { + dht.rtRefreshManager.RefreshNoWait() + } + case event.EvtPeerProtocolsUpdated: + handlePeerChangeEvent(dht, evt.Peer) + case event.EvtPeerIdentificationCompleted: + handlePeerChangeEvent(dht, evt.Peer) + case event.EvtPeerConnectednessChanged: + if evt.Connectedness != network.Connected { + dht.msgSender.OnDisconnect(dht.ctx, evt.Peer) + } + case event.EvtLocalReachabilityChanged: + if dht.auto == ModeAuto || dht.auto == ModeAutoServer { + handleLocalReachabilityChangedEvent(dht, evt) + } else { + // something has gone really wrong if we get an event we did not subscribe to + logger.Errorf("received LocalReachabilityChanged event that was not subscribed to") + } + default: + // something has gone really wrong if we get an event for another type + logger.Errorf("got wrong type from subscription: %T", e) + } + case <-dht.ctx.Done(): + return + } + } + }() + + return nil +} + +func handlePeerChangeEvent(dht *IpfsDHT, p peer.ID) { + valid, err := dht.validRTPeer(p) + if err != nil { + logger.Errorf("could not check peerstore for protocol support: err: %s", err) + return + } else if valid { + dht.peerFound(p) + } else { + dht.peerStoppedDHT(p) + } +} + +func handleLocalReachabilityChangedEvent(dht *IpfsDHT, e event.EvtLocalReachabilityChanged) { + var target mode + + switch e.Reachability { + case network.ReachabilityPrivate: + target = modeClient + case network.ReachabilityUnknown: + if dht.auto == ModeAutoServer { + target = modeServer + } else { + target = modeClient + } + case network.ReachabilityPublic: + target = modeServer + } + + logger.Infof("processed event %T; performing dht mode switch", e) + + err := dht.setMode(target) + // NOTE: the mode will be printed out as a decimal. + if err == nil { + logger.Infow("switched DHT mode successfully", "mode", target) + } else { + logger.Errorw("switching DHT mode failed", "mode", target, "error", err) + } +} + +// validRTPeer returns true if the peer supports the DHT protocol and false otherwise. Supporting the DHT protocol means +// supporting the primary protocols, we do not want to add peers that are speaking obsolete secondary protocols to our +// routing table +func (dht *IpfsDHT) validRTPeer(p peer.ID) (bool, error) { + b, err := dht.peerstore.FirstSupportedProtocol(p, dht.protocols...) + if len(b) == 0 || err != nil { + return false, err + } + + return dht.routingTablePeerFilter == nil || dht.routingTablePeerFilter(dht, p), nil +} diff --git a/go-libp2p-kad-dht/version.json b/go-libp2p-kad-dht/version.json new file mode 100644 index 0000000..86718ea --- /dev/null +++ b/go-libp2p-kad-dht/version.json @@ -0,0 +1,3 @@ +{ + "version": "v0.25.2" +} diff --git a/go-libp2p/core/canonicallog/canonicallog_test.go b/go-libp2p/core/canonicallog/canonicallog_test.go index fc03a6a..53cb27c 100644 --- a/go-libp2p/core/canonicallog/canonicallog_test.go +++ b/go-libp2p/core/canonicallog/canonicallog_test.go @@ -17,10 +17,11 @@ func TestLogs(t *testing.T) { t.Fatal(err) } - LogMisbehavingPeer(test.RandPeerIDFatal(t), multiaddr.StringCast("/ip4/1.2.3.4"), "somecomponent", fmt.Errorf("something"), "hi") + m, _ := multiaddr.StringCast("/ip4/1.2.3.4") + LogMisbehavingPeer(test.RandPeerIDFatal(t), m, "somecomponent", fmt.Errorf("something"), "hi") netAddr := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 80} LogMisbehavingPeerNetAddr(test.RandPeerIDFatal(t), netAddr, "somecomponent", fmt.Errorf("something"), "hello \"world\"") - LogPeerStatus(1, test.RandPeerIDFatal(t), multiaddr.StringCast("/ip4/1.2.3.4"), "extra", "info") + LogPeerStatus(1, test.RandPeerIDFatal(t), m, "extra", "info") } diff --git a/go-libp2p/core/peer/addrinfo.go b/go-libp2p/core/peer/addrinfo.go index de7dd4d..de5588b 100644 --- a/go-libp2p/core/peer/addrinfo.go +++ b/go-libp2p/core/peer/addrinfo.go @@ -25,8 +25,8 @@ var ErrInvalidAddr = fmt.Errorf("invalid p2p multiaddr") func AddrInfosFromP2pAddrs(maddrs ...ma.Multiaddr) ([]AddrInfo, error) { m := make(map[ID][]ma.Multiaddr) for _, maddr := range maddrs { - transport, id := SplitAddr(maddr) - if id == "" { + transport, id, err := SplitAddr(maddr) + if id == "" || err != nil { return nil, ErrInvalidAddr } if transport == nil { @@ -48,17 +48,21 @@ func AddrInfosFromP2pAddrs(maddrs ...ma.Multiaddr) ([]AddrInfo, error) { // // * Returns a nil transport if the address only contains a /p2p part. // * Returns an empty peer ID if the address doesn't contain a /p2p part. -func SplitAddr(m ma.Multiaddr) (transport ma.Multiaddr, id ID) { +func SplitAddr(m ma.Multiaddr) (transport ma.Multiaddr, id ID, err error) { if m == nil { - return nil, "" + return nil, "", nil + } + + transport, p2ppart, err := ma.SplitLast(m) + if err != nil { + return nil, "", err } - transport, p2ppart := ma.SplitLast(m) if p2ppart == nil || p2ppart.Protocol().Code != ma.P_P2P { - return m, "" + return m, "", nil } id = ID(p2ppart.RawValue()) // already validated by the multiaddr library. - return transport, id + return transport, id, nil } // AddrInfoFromString builds an AddrInfo from the string representation of a Multiaddr @@ -73,8 +77,8 @@ func AddrInfoFromString(s string) (*AddrInfo, error) { // AddrInfoFromP2pAddr converts a Multiaddr to an AddrInfo. func AddrInfoFromP2pAddr(m ma.Multiaddr) (*AddrInfo, error) { - transport, id := SplitAddr(m) - if id == "" { + transport, id, err := SplitAddr(m) + if id == "" || err != nil { return nil, ErrInvalidAddr } info := &AddrInfo{ID: id} diff --git a/go-libp2p/core/peer/addrinfo_test.go b/go-libp2p/core/peer/addrinfo_test.go index 3fd1556..408390e 100644 --- a/go-libp2p/core/peer/addrinfo_test.go +++ b/go-libp2p/core/peer/addrinfo_test.go @@ -19,13 +19,13 @@ func init() { if err != nil { panic(err) } - maddrPeer = ma.StringCast("/p2p/" + testID.String()) - maddrTpt = ma.StringCast("/ip4/127.0.0.1/tcp/1234") + maddrPeer, _ = ma.StringCast("/p2p/" + testID.String()) + maddrTpt, _ = ma.StringCast("/ip4/127.0.0.1/tcp/1234") maddrFull = maddrTpt.Encapsulate(maddrPeer) } func TestSplitAddr(t *testing.T) { - tpt, id := SplitAddr(maddrFull) + tpt, id, _ := SplitAddr(maddrFull) if !tpt.Equal(maddrTpt) { t.Fatal("expected transport") } @@ -33,7 +33,7 @@ func TestSplitAddr(t *testing.T) { t.Fatalf("%s != %s", id, testID) } - tpt, id = SplitAddr(maddrPeer) + tpt, id, _ = SplitAddr(maddrPeer) if tpt != nil { t.Fatal("expected no transport") } @@ -41,7 +41,7 @@ func TestSplitAddr(t *testing.T) { t.Fatalf("%s != %s", id, testID) } - tpt, id = SplitAddr(maddrTpt) + tpt, id, _ = SplitAddr(maddrTpt) if !tpt.Equal(maddrTpt) { t.Fatal("expected a transport") } @@ -91,22 +91,30 @@ func TestAddrInfosFromP2pAddrs(t *testing.T) { t.Fatal("expected nil multiaddr to fail") } + m1, _ := ma.StringCast("/ip4/128.199.219.111/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64") + m2, _ := ma.StringCast("/ip4/104.236.76.40/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64") + m3, _ := ma.StringCast("/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd") + m4, _ := ma.StringCast("/ip4/178.62.158.247/tcp/4001/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd") + m5, _ := ma.StringCast("/ipfs/QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM") addrs := []ma.Multiaddr{ - ma.StringCast("/ip4/128.199.219.111/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64"), - ma.StringCast("/ip4/104.236.76.40/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64"), + m1, + m2, - ma.StringCast("/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd"), - ma.StringCast("/ip4/178.62.158.247/tcp/4001/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd"), + m3, + m4, - ma.StringCast("/ipfs/QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM"), + m5, } + p1, _ := ma.StringCast("/ip4/128.199.219.111/tcp/4001") + p2, _ := ma.StringCast("/ip4/104.236.76.40/tcp/4001") + p3, _ := ma.StringCast("/ip4/178.62.158.247/tcp/4001") expected := map[string][]ma.Multiaddr{ "QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64": { - ma.StringCast("/ip4/128.199.219.111/tcp/4001"), - ma.StringCast("/ip4/104.236.76.40/tcp/4001"), + p1, + p2, }, "QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd": { - ma.StringCast("/ip4/178.62.158.247/tcp/4001"), + p3, }, "QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM": nil, } diff --git a/go-libp2p/core/peer/record.go b/go-libp2p/core/peer/record.go index 49d89e1..4d87421 100644 --- a/go-libp2p/core/peer/record.go +++ b/go-libp2p/core/peer/record.go @@ -137,13 +137,13 @@ var ( func TimestampSeq() uint64 { now := uint64(time.Now().UnixNano()) lastTimestampMu.Lock() - defer lastTimestampMu.Unlock() // Not all clocks are strictly increasing, but we need these sequence numbers to be strictly // increasing. if now <= lastTimestamp { now = lastTimestamp + 1 } lastTimestamp = now + lastTimestampMu.Unlock() return now } diff --git a/go-libp2p/go.mod b/go-libp2p/go.mod index bff7698..1d949b0 100644 --- a/go-libp2p/go.mod +++ b/go-libp2p/go.mod @@ -4,8 +4,13 @@ go 1.21 retract v0.26.1 // Tag was applied incorrectly due to a bug in the release workflow. +replace github.com/multiformats/go-multiaddr => ../go-multiaddr + +replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns + require ( github.com/benbjohnson/clock v1.3.5 + github.com/cloudflare/circl v1.3.9 github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/flynn/noise v1.1.0 diff --git a/go-libp2p/go.sum b/go-libp2p/go.sum index 5455f12..2bf30e0 100644 --- a/go-libp2p/go.sum +++ b/go-libp2p/go.sum @@ -29,6 +29,8 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.9 h1:QFrlgFYf2Qpi8bSpVPK1HBvWpx16v/1TZivyo7pGuBE= +github.com/cloudflare/circl v1.3.9/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -103,6 +105,7 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -134,6 +137,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= @@ -164,6 +168,10 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= @@ -208,7 +216,6 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= @@ -220,37 +227,37 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdn github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.12.4 h1:rrKqpY9h+n80EwhhC/kkcunCZZ7URIF8yN1WEUt2Hvc= -github.com/multiformats/go-multiaddr v0.12.4/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= -github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= -github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= -github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= -github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= @@ -444,6 +451,8 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= @@ -453,6 +462,7 @@ golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOM golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -466,7 +476,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -486,7 +498,9 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= @@ -536,8 +550,10 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -552,6 +568,7 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= @@ -565,6 +582,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= @@ -592,6 +610,7 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= @@ -634,6 +653,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= @@ -641,6 +661,7 @@ honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= diff --git a/go-libp2p/libp2p_test.go b/go-libp2p/libp2p_test.go index 8a9a8ed..c5d5a50 100644 --- a/go-libp2p/libp2p_test.go +++ b/go-libp2p/libp2p_test.go @@ -163,6 +163,10 @@ func TestChainOptions(t *testing.T) { } } } +func tStringCast(s string) ma.Multiaddr { + st, _ := ma.StringCast(s) + return st +} func TestTransportConstructorTCP(t *testing.T) { h, err := New( @@ -171,8 +175,8 @@ func TestTransportConstructorTCP(t *testing.T) { ) require.NoError(t, err) defer h.Close() - require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0"))) - err = h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")) + require.NoError(t, h.Network().Listen(tStringCast("/ip4/127.0.0.1/tcp/0"))) + err = h.Network().Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1")) require.Error(t, err) require.Contains(t, err.Error(), swarm.ErrNoTransport.Error()) } @@ -184,8 +188,8 @@ func TestTransportConstructorQUIC(t *testing.T) { ) require.NoError(t, err) defer h.Close() - require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"))) - err = h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0")) + require.NoError(t, h.Network().Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1"))) + err = h.Network().Listen(tStringCast("/ip4/127.0.0.1/tcp/0")) require.Error(t, err) require.Contains(t, err.Error(), swarm.ErrNoTransport.Error()) } @@ -290,8 +294,8 @@ func TestTransportConstructorWebTransport(t *testing.T) { ) require.NoError(t, err) defer h.Close() - require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))) - err = h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/")) + require.NoError(t, h.Network().Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))) + err = h.Network().Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/")) require.Error(t, err) require.Contains(t, err.Error(), swarm.ErrNoTransport.Error()) } @@ -311,12 +315,12 @@ func TestTransportCustomAddressWebTransport(t *testing.T) { ) require.NoError(t, err) defer h.Close() - require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))) + require.NoError(t, h.Network().Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))) addrs := h.Addrs() require.Len(t, addrs, 1) require.NotEqual(t, addrs[0], customAddr) - restOfAddr, lastComp := ma.SplitLast(addrs[0]) - restOfAddr, secondToLastComp := ma.SplitLast(restOfAddr) + restOfAddr, lastComp, _ := ma.SplitLast(addrs[0]) + restOfAddr, secondToLastComp, _ := ma.SplitLast(restOfAddr) require.Equal(t, ma.P_CERTHASH, lastComp.Protocol().Code) require.Equal(t, ma.P_CERTHASH, secondToLastComp.Protocol().Code) require.True(t, restOfAddr.Equal(customAddr)) @@ -343,7 +347,7 @@ func TestTransportCustomAddressWebTransportDoesNotStall(t *testing.T) { defer h.Close() addrs := h.Addrs() require.Len(t, addrs, 1) - _, lastComp := ma.SplitLast(addrs[0]) + _, lastComp, _ := ma.SplitLast(addrs[0]) require.NotEqual(t, ma.P_CERTHASH, lastComp.Protocol().Code) // We did not add the certhash to the multiaddr require.Equal(t, addrs[0], customAddr) @@ -447,7 +451,7 @@ func TestDialCircuitAddrWithWrappedResourceManager(t *testing.T) { h.Peerstore().AddAddrs(relay.ID(), relay.Addrs(), 10*time.Minute) h.Peerstore().AddAddr(peerBehindRelay.ID(), - ma.StringCast( + tStringCast( fmt.Sprintf("/p2p/%s/p2p-circuit", relay.ID()), ), peerstore.TempAddrTTL, diff --git a/go-libp2p/p2p/discovery/backoff/backoffcache.go b/go-libp2p/p2p/discovery/backoff/backoffcache.go index c8f1180..bbfffc7 100644 --- a/go-libp2p/p2p/discovery/backoff/backoffcache.go +++ b/go-libp2p/p2p/discovery/backoff/backoffcache.go @@ -143,7 +143,6 @@ func (d *BackoffDiscovery) FindPeers(ctx context.Context, ns string, opts ...dis } c.mux.Lock() - defer c.mux.Unlock() timeExpired := d.clock.Now().After(c.nextDiscover) @@ -165,6 +164,7 @@ func (d *BackoffDiscovery) FindPeers(ctx context.Context, ns string, opts ...dis } } close(pch) + c.mux.Unlock() return pch, nil } @@ -172,6 +172,7 @@ func (d *BackoffDiscovery) FindPeers(ctx context.Context, ns string, opts ...dis if !c.ongoing { pch, err := d.disc.FindPeers(ctx, ns, opts...) if err != nil { + c.mux.Unlock() return nil, err } @@ -189,12 +190,12 @@ func (d *BackoffDiscovery) FindPeers(ctx context.Context, ns string, opts ...dis c.sendingChs[evtCh] = options.Limit go findPeerReceiver(ctx, pch, evtCh, rcvPeers) - + c.mux.Unlock() return pch, nil } func findPeerDispatcher(ctx context.Context, c *backoffCache, pch <-chan peer.AddrInfo) { - defer func() { + cleanup := func() { c.mux.Lock() // If the peer addresses have changed reset the backoff @@ -212,12 +213,13 @@ func findPeerDispatcher(ctx context.Context, c *backoffCache, pch <-chan peer.Ad } c.sendingChs = make(map[chan peer.AddrInfo]int) c.mux.Unlock() - }() + } for { select { case ai, ok := <-pch: if !ok { + cleanup() return } c.mux.Lock() @@ -246,14 +248,13 @@ func findPeerDispatcher(ctx context.Context, c *backoffCache, pch <-chan peer.Ad c.mux.Unlock() case <-ctx.Done(): + cleanup() return } } } func findPeerReceiver(ctx context.Context, pch, evtCh chan peer.AddrInfo, rcvPeers []peer.AddrInfo) { - defer close(pch) - for { select { case ai, ok := <-evtCh: @@ -279,12 +280,15 @@ func findPeerReceiver(ctx context.Context, pch, evtCh chan peer.AddrInfo, rcvPee select { case pch <- p: case <-ctx.Done(): + close(pch) return } } + close(pch) return } case <-ctx.Done(): + close(pch) return } } diff --git a/go-libp2p/p2p/discovery/backoff/backoffconnector.go b/go-libp2p/p2p/discovery/backoff/backoffconnector.go index 504cdbb..6cc7aab 100644 --- a/go-libp2p/p2p/discovery/backoff/backoffconnector.go +++ b/go-libp2p/p2p/discovery/backoff/backoffconnector.go @@ -77,13 +77,12 @@ func (c *BackoffConnector) Connect(ctx context.Context, peerCh <-chan peer.AddrI go func(pi peer.AddrInfo) { ctx, cancel := context.WithTimeout(ctx, c.connTryDur) - defer cancel() err := c.host.Connect(ctx, pi) if err != nil { log.Debugf("Error connecting to pubsub peer %s: %s", pi.ID, err.Error()) - return } + cancel() }(pi) case <-ctx.Done(): diff --git a/go-libp2p/p2p/discovery/mdns/mdns.go b/go-libp2p/p2p/discovery/mdns/mdns.go index 637fcdc..a57c767 100644 --- a/go-libp2p/p2p/discovery/mdns/mdns.go +++ b/go-libp2p/p2p/discovery/mdns/mdns.go @@ -86,7 +86,10 @@ func (s *mdnsService) Close() error { func (s *mdnsService) getIPs(addrs []ma.Multiaddr) ([]string, error) { var ip4, ip6 string for _, addr := range addrs { - first, _ := ma.SplitFirst(addr) + first, _, err := ma.SplitFirst(addr) + if err != nil { + return nil, err + } if first == nil { continue } @@ -154,7 +157,6 @@ func (s *mdnsService) startResolver(ctx context.Context) { s.resolverWG.Add(2) entryChan := make(chan *zeroconf.ServiceEntry, 1000) go func() { - defer s.resolverWG.Done() for entry := range entryChan { // We only care about the TXT records. // Ignore A, AAAA and PTR. @@ -183,12 +185,13 @@ func (s *mdnsService) startResolver(ctx context.Context) { go s.notifee.HandlePeerFound(info) } } + s.resolverWG.Done() }() go func() { - defer s.resolverWG.Done() if err := zeroconf.Browse(ctx, s.serviceName, mdnsDomain, entryChan); err != nil { log.Debugf("zeroconf browsing failed: %s", err) } + s.resolverWG.Done() }() } diff --git a/go-libp2p/p2p/discovery/routing/routing.go b/go-libp2p/p2p/discovery/routing/routing.go index 6fee750..8198630 100644 --- a/go-libp2p/p2p/discovery/routing/routing.go +++ b/go-libp2p/p2p/discovery/routing/routing.go @@ -45,13 +45,14 @@ func (d *RoutingDiscovery) Advertise(ctx context.Context, ns string, opts ...dis // closest peers to the key/CID before it goes on to provide the record to them. // Not setting a timeout here will make the DHT wander forever. pctx, cancel := context.WithTimeout(ctx, 60*time.Second) - defer cancel() err = d.Provide(pctx, cid, true) if err != nil { + cancel() return 0, err } + cancel() return ttl, nil } diff --git a/go-libp2p/p2p/host/autonat/autonat.go b/go-libp2p/p2p/host/autonat/autonat.go index 479f31e..1ff92d8 100644 --- a/go-libp2p/p2p/host/autonat/autonat.go +++ b/go-libp2p/p2p/host/autonat/autonat.go @@ -156,17 +156,13 @@ func ipInList(candidate ma.Multiaddr, list []ma.Multiaddr) bool { } func (as *AmbientAutoNAT) background() { - defer close(as.backgroundRunning) // wait a bit for the node to come online and establish some connections // before starting autodetection delay := as.config.bootDelay subChan := as.subscriber.Out() - defer as.subscriber.Close() - defer as.emitReachabilityChanged.Close() timer := time.NewTimer(delay) - defer timer.Stop() timerRunning := true retryProbe := false for { @@ -174,7 +170,8 @@ func (as *AmbientAutoNAT) background() { // new inbound connection. case conn := <-as.inboundConn: localAddrs := as.host.Addrs() - if manet.IsPublicAddr(conn.RemoteMultiaddr()) && + is, err := manet.IsPublicAddr(conn.RemoteMultiaddr()) + if is && err == nil && !ipInList(conn.RemoteMultiaddr(), localAddrs) { as.lastInbound = time.Now() } @@ -201,6 +198,10 @@ func (as *AmbientAutoNAT) background() { // probe finished. case err, ok := <-as.dialResponses: if !ok { + close(as.backgroundRunning) + as.subscriber.Close() + as.emitReachabilityChanged.Close() + timer.Stop() return } if IsDialRefused(err) { @@ -214,6 +215,10 @@ func (as *AmbientAutoNAT) background() { timerRunning = false retryProbe = false case <-as.ctx.Done(): + close(as.backgroundRunning) + as.subscriber.Close() + as.emitReachabilityChanged.Close() + timer.Stop() return } @@ -381,7 +386,6 @@ func (as *AmbientAutoNAT) tryProbe(p peer.ID) bool { func (as *AmbientAutoNAT) probe(pi *peer.AddrInfo) { cli := NewAutoNATClient(as.host, as.config.addressFunc, as.metricsTracer) ctx, cancel := context.WithTimeout(as.ctx, as.config.requestTimeout) - defer cancel() err := cli.DialBack(ctx, pi.ID) log.Debugf("Dialback through peer %s completed: err: %s", pi.ID, err) @@ -389,6 +393,7 @@ func (as *AmbientAutoNAT) probe(pi *peer.AddrInfo) { select { case as.dialResponses <- err: case <-as.ctx.Done(): + cancel() return } } diff --git a/go-libp2p/p2p/host/autonat/client.go b/go-libp2p/p2p/host/autonat/client.go index fa0e03b..d0122d8 100644 --- a/go-libp2p/p2p/host/autonat/client.go +++ b/go-libp2p/p2p/host/autonat/client.go @@ -51,12 +51,8 @@ func (c *client) DialBack(ctx context.Context, p peer.ID) error { s.Reset() return err } - defer s.Scope().ReleaseMemory(maxMsgSize) s.SetDeadline(time.Now().Add(streamTimeout)) - // Might as well just reset the stream. Once we get to this point, we - // don't care about being nice. - defer s.Close() r := pbio.NewDelimitedReader(s, maxMsgSize) w := pbio.NewDelimitedWriter(s) @@ -64,16 +60,22 @@ func (c *client) DialBack(ctx context.Context, p peer.ID) error { req := newDialMessage(peer.AddrInfo{ID: c.h.ID(), Addrs: c.addrFunc()}) if err := w.WriteMsg(req); err != nil { s.Reset() + s.Scope().ReleaseMemory(maxMsgSize) + s.Close() return err } var res pb.Message if err := r.ReadMsg(&res); err != nil { s.Reset() + s.Scope().ReleaseMemory(maxMsgSize) + s.Close() return err } if res.GetType() != pb.Message_DIAL_RESPONSE { s.Reset() + s.Scope().ReleaseMemory(maxMsgSize) + s.Close() return fmt.Errorf("unexpected response: %s", res.GetType().String()) } @@ -81,6 +83,8 @@ func (c *client) DialBack(ctx context.Context, p peer.ID) error { if c.mt != nil { c.mt.ReceivedDialResponse(status) } + s.Scope().ReleaseMemory(maxMsgSize) + s.Close() switch status { case pb.Message_OK: return nil diff --git a/go-libp2p/p2p/host/autonat/dialpolicy.go b/go-libp2p/p2p/host/autonat/dialpolicy.go index 9615229..8372227 100644 --- a/go-libp2p/p2p/host/autonat/dialpolicy.go +++ b/go-libp2p/p2p/host/autonat/dialpolicy.go @@ -30,7 +30,7 @@ func (d *dialPolicy) skipDial(addr ma.Multiaddr) bool { } // skip private network (unroutable) addresses - if !manet.IsPublicAddr(addr) { + if is, err := manet.IsPublicAddr(addr); !is && err == nil { return true } candidateIP, err := manet.ToIP(addr) @@ -60,7 +60,11 @@ func (d *dialPolicy) skipPeer(addrs []ma.Multiaddr) bool { localAddrs := d.host.Addrs() localHosts := make([]net.IP, 0) for _, lAddr := range localAddrs { - if _, err := lAddr.ValueForProtocol(ma.P_CIRCUIT); err != nil && manet.IsPublicAddr(lAddr) { + is, err := manet.IsPublicAddr(lAddr) + if err != nil { + continue + } + if _, err := lAddr.ValueForProtocol(ma.P_CIRCUIT); err != nil && is { lIP, err := manet.ToIP(lAddr) if err != nil { continue @@ -72,7 +76,11 @@ func (d *dialPolicy) skipPeer(addrs []ma.Multiaddr) bool { // if a public IP of the peer is one of ours: skip the peer. goodPublic := false for _, addr := range addrs { - if _, err := addr.ValueForProtocol(ma.P_CIRCUIT); err != nil && manet.IsPublicAddr(addr) { + is, err := manet.IsPublicAddr(addr) + if err != nil { + continue + } + if _, err := addr.ValueForProtocol(ma.P_CIRCUIT); err != nil && is { aIP, err := manet.ToIP(addr) if err != nil { continue diff --git a/go-libp2p/p2p/host/autonat/metrics.go b/go-libp2p/p2p/host/autonat/metrics.go index 4207d4e..be2d90d 100644 --- a/go-libp2p/p2p/host/autonat/metrics.go +++ b/go-libp2p/p2p/host/autonat/metrics.go @@ -138,23 +138,23 @@ func (mt *metricsTracer) ReachabilityStatusConfidence(confidence int) { func (mt *metricsTracer) ReceivedDialResponse(status pb.Message_ResponseStatus) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) *tags = append(*tags, getResponseStatus(status)) receivedDialResponseTotal.WithLabelValues(*tags...).Inc() + metricshelper.PutStringSlice(tags) } func (mt *metricsTracer) OutgoingDialResponse(status pb.Message_ResponseStatus) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) *tags = append(*tags, getResponseStatus(status)) outgoingDialResponseTotal.WithLabelValues(*tags...).Inc() + metricshelper.PutStringSlice(tags) } func (mt *metricsTracer) OutgoingDialRefused(reason string) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) *tags = append(*tags, reason) outgoingDialRefusedTotal.WithLabelValues(*tags...).Inc() + metricshelper.PutStringSlice(tags) } func (mt *metricsTracer) NextProbeTime(t time.Time) { diff --git a/go-libp2p/p2p/host/autonat/notify.go b/go-libp2p/p2p/host/autonat/notify.go index a57bf15..50e9be1 100644 --- a/go-libp2p/p2p/host/autonat/notify.go +++ b/go-libp2p/p2p/host/autonat/notify.go @@ -17,8 +17,7 @@ func (as *AmbientAutoNAT) ListenClose(net network.Network, a ma.Multiaddr) {} // Connected is part of the network.Notifiee interface func (as *AmbientAutoNAT) Connected(net network.Network, c network.Conn) { - if c.Stat().Direction == network.DirInbound && - manet.IsPublicAddr(c.RemoteMultiaddr()) { + if is, err := manet.IsPublicAddr(c.RemoteMultiaddr()); c.Stat().Direction == network.DirInbound && is && err == nil { select { case as.inboundConn <- c: default: diff --git a/go-libp2p/p2p/host/autonat/svc.go b/go-libp2p/p2p/host/autonat/svc.go index d293550..1601ba3 100644 --- a/go-libp2p/p2p/host/autonat/svc.go +++ b/go-libp2p/p2p/host/autonat/svc.go @@ -62,10 +62,8 @@ func (as *autoNATService) handleStream(s network.Stream) { s.Reset() return } - defer s.Scope().ReleaseMemory(maxMsgSize) s.SetDeadline(time.Now().Add(streamTimeout)) - defer s.Close() pid := s.Conn().RemotePeer() log.Debugf("New stream from %s", pid) @@ -80,6 +78,8 @@ func (as *autoNATService) handleStream(s network.Stream) { if err != nil { log.Debugf("Error reading message from %s: %s", pid, err.Error()) s.Reset() + s.Scope().ReleaseMemory(maxMsgSize) + s.Close() return } @@ -87,6 +87,8 @@ func (as *autoNATService) handleStream(s network.Stream) { if t != pb.Message_DIAL { log.Debugf("Unexpected message from %s: %s (%d)", pid, t.String(), t) s.Reset() + s.Scope().ReleaseMemory(maxMsgSize) + s.Close() return } @@ -98,11 +100,15 @@ func (as *autoNATService) handleStream(s network.Stream) { if err != nil { log.Debugf("Error writing response to %s: %s", pid, err.Error()) s.Reset() + s.Scope().ReleaseMemory(maxMsgSize) + s.Close() return } if as.config.metricsTracer != nil { as.config.metricsTracer.OutgoingDialResponse(res.GetDialResponse().GetStatus()) } + s.Scope().ReleaseMemory(maxMsgSize) + s.Close() } func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Message_PeerInfo) *pb.Message_DialResponse { @@ -137,7 +143,10 @@ func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Me } // Determine the peer's IP address. - hostIP, _ := ma.SplitFirst(obsaddr) + hostIP, _, err := ma.SplitFirst(obsaddr) + if err != nil { + return newDialResponseError(pb.Message_E_INTERNAL_ERROR, err.Error()) + } switch hostIP.Protocol().Code { case ma.P_IP4, ma.P_IP6: default: @@ -160,7 +169,7 @@ func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Me // For security reasons, we _only_ dial the observed IP address. // Replace other IP addresses with the observed one so we can still try the // requested ports/transports. - if ip, rest := ma.SplitFirst(addr); !ip.Equal(hostIP) { + if ip, rest, _ := ma.SplitFirst(addr); !ip.Equal(hostIP) { // Make sure it's an IP address switch ip.Protocol().Code { case ma.P_IP4, ma.P_IP6: @@ -221,36 +230,36 @@ func (as *autoNATService) doDial(pi peer.AddrInfo) *pb.Message_DialResponse { as.mx.Unlock() ctx, cancel := context.WithTimeout(context.Background(), as.config.dialTimeout) - defer cancel() as.config.dialer.Peerstore().ClearAddrs(pi.ID) as.config.dialer.Peerstore().AddAddrs(pi.ID, pi.Addrs, peerstore.TempAddrTTL) - defer func() { - as.config.dialer.Peerstore().ClearAddrs(pi.ID) - as.config.dialer.Peerstore().RemovePeer(pi.ID) - }() - conn, err := as.config.dialer.DialPeer(ctx, pi.ID) if err != nil { log.Debugf("error dialing %s: %s", pi.ID, err.Error()) // wait for the context to timeout to avoid leaking timing information // this renders the service ineffective as a port scanner <-ctx.Done() + cancel() + as.config.dialer.Peerstore().ClearAddrs(pi.ID) + as.config.dialer.Peerstore().RemovePeer(pi.ID) return newDialResponseError(pb.Message_E_DIAL_ERROR, "dial failed") } ra := conn.RemoteMultiaddr() as.config.dialer.ClosePeer(pi.ID) + cancel() + as.config.dialer.Peerstore().ClearAddrs(pi.ID) + as.config.dialer.Peerstore().RemovePeer(pi.ID) return newDialResponseOK(ra) } // Enable the autoNAT service if it is not running. func (as *autoNATService) Enable() { as.instanceLock.Lock() - defer as.instanceLock.Unlock() if as.instance != nil { + as.instanceLock.Unlock() return } ctx, cancel := context.WithCancel(context.Background()) @@ -259,18 +268,19 @@ func (as *autoNATService) Enable() { as.config.host.SetStreamHandler(AutoNATProto, as.handleStream) go as.background(ctx) + as.instanceLock.Unlock() } // Disable the autoNAT service if it is running. func (as *autoNATService) Disable() { as.instanceLock.Lock() - defer as.instanceLock.Unlock() if as.instance != nil { as.config.host.RemoveStreamHandler(AutoNATProto) as.instance() as.instance = nil <-as.backgroundRunning } + as.instanceLock.Unlock() } func (as *autoNATService) Close() error { @@ -279,10 +289,7 @@ func (as *autoNATService) Close() error { } func (as *autoNATService) background(ctx context.Context) { - defer close(as.backgroundRunning) - timer := time.NewTimer(as.config.throttleResetPeriod) - defer timer.Stop() for { select { @@ -294,6 +301,8 @@ func (as *autoNATService) background(ctx context.Context) { jitter := rand.Float32() * float32(as.config.throttleResetJitter) timer.Reset(as.config.throttleResetPeriod + time.Duration(int64(jitter))) case <-ctx.Done(): + close(as.backgroundRunning) + timer.Stop() return } } diff --git a/go-libp2p/p2p/host/autorelay/addrsplosion.go b/go-libp2p/p2p/host/autorelay/addrsplosion.go index 710dab1..f9a2c98 100644 --- a/go-libp2p/p2p/host/autorelay/addrsplosion.go +++ b/go-libp2p/p2p/host/autorelay/addrsplosion.go @@ -17,13 +17,13 @@ func cleanupAddressSet(addrs []ma.Multiaddr) []ma.Multiaddr { continue } - if manet.IsPublicAddr(a) || isDNSAddr(a) { + if is, err := manet.IsPublicAddr(a); (is && err == nil) || isDNSAddr(a) { public = append(public, a) continue } // discard unroutable addrs - if manet.IsPrivateAddr(a) { + if is, err := manet.IsPrivateAddr(a); is && err == nil { private = append(private, a) } } @@ -38,7 +38,10 @@ func cleanupAddressSet(addrs []ma.Multiaddr) []ma.Multiaddr { func isRelayAddr(a ma.Multiaddr) bool { isRelay := false - ma.ForEach(a, func(c ma.Component) bool { + ma.ForEach(a, func(c ma.Component, e error) bool { + if e != nil { + return false + } switch c.Protocol().Code { case ma.P_CIRCUIT: isRelay = true @@ -52,7 +55,7 @@ func isRelayAddr(a ma.Multiaddr) bool { } func isDNSAddr(a ma.Multiaddr) bool { - if first, _ := ma.SplitFirst(a); first != nil { + if first, _, err := ma.SplitFirst(a); err == nil && first != nil { switch first.Protocol().Code { case ma.P_DNS, ma.P_DNS4, ma.P_DNS6, ma.P_DNSADDR: return true @@ -84,7 +87,10 @@ func addrKeyAndPort(a ma.Multiaddr) (string, int) { port int ) - ma.ForEach(a, func(c ma.Component) bool { + ma.ForEach(a, func(c ma.Component, e error) bool { + if e != nil { + return false + } switch c.Protocol().Code { case ma.P_TCP, ma.P_UDP: port = int(binary.BigEndian.Uint16(c.RawValue())) diff --git a/go-libp2p/p2p/host/autorelay/addrsplosion_test.go b/go-libp2p/p2p/host/autorelay/addrsplosion_test.go index 1bbd910..b03823d 100644 --- a/go-libp2p/p2p/host/autorelay/addrsplosion_test.go +++ b/go-libp2p/p2p/host/autorelay/addrsplosion_test.go @@ -84,7 +84,8 @@ func TestCleanupAddrs(t *testing.T) { func makeAddrList(strs ...string) []ma.Multiaddr { result := make([]ma.Multiaddr, 0, len(strs)) for _, s := range strs { - result = append(result, ma.StringCast(s)) + m, _ := ma.StringCast(s) + result = append(result, m) } return result } diff --git a/go-libp2p/p2p/host/autorelay/autorelay.go b/go-libp2p/p2p/host/autorelay/autorelay.go index 5900798..f952fbf 100644 --- a/go-libp2p/p2p/host/autorelay/autorelay.go +++ b/go-libp2p/p2p/host/autorelay/autorelay.go @@ -59,8 +59,8 @@ func NewAutoRelay(bhost *basic.BasicHost, opts ...Option) (*AutoRelay, error) { func (r *AutoRelay) Start() { r.refCount.Add(1) go func() { - defer r.refCount.Done() r.background() + r.refCount.Done() }() } @@ -70,14 +70,15 @@ func (r *AutoRelay) background() { log.Debug("failed to subscribe to the EvtLocalReachabilityChanged") return } - defer subReachability.Close() for { select { case <-r.ctx.Done(): + subReachability.Close() return case ev, ok := <-subReachability.Out(): if !ok { + subReachability.Close() return } // TODO: push changed addresses @@ -109,12 +110,15 @@ func (r *AutoRelay) hostAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { func (r *AutoRelay) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { r.mx.Lock() - defer r.mx.Unlock() if r.status != network.ReachabilityPrivate { + r.mx.Unlock() return addrs } - return r.relayFinder.relayAddrs(addrs) + + a := r.relayFinder.relayAddrs(addrs) + r.mx.Unlock() + return a } func (r *AutoRelay) Close() error { diff --git a/go-libp2p/p2p/host/autorelay/autorelay_test.go b/go-libp2p/p2p/host/autorelay/autorelay_test.go index 410f8cb..54b8af5 100644 --- a/go-libp2p/p2p/host/autorelay/autorelay_test.go +++ b/go-libp2p/p2p/host/autorelay/autorelay_test.go @@ -43,11 +43,11 @@ func numRelays(h host.Host) int { func usedRelays(h host.Host) []peer.ID { m := make(map[peer.ID]struct{}) for _, addr := range h.Addrs() { - addr, comp := ma.SplitLast(addr) + addr, comp, _ := ma.SplitLast(addr) if comp.Protocol().Code != ma.P_CIRCUIT { // not a relay addr continue } - _, comp = ma.SplitLast(addr) + _, comp, _ = ma.SplitLast(addr) if comp.Protocol().Code != ma.P_P2P { panic("expected p2p component") } @@ -96,7 +96,7 @@ func newRelay(t *testing.T) host.Host { saddr := addr.String() if strings.HasPrefix(saddr, "/ip4/127.0.0.1/") { addrNoIP := strings.TrimPrefix(saddr, "/ip4/127.0.0.1") - addrs[i] = ma.StringCast("/dns4/localhost" + addrNoIP) + addrs[i], _ = ma.StringCast("/dns4/localhost" + addrNoIP) } } return addrs @@ -206,7 +206,7 @@ func TestBackoff(t *testing.T) { saddr := addr.String() if strings.HasPrefix(saddr, "/ip4/127.0.0.1/") { addrNoIP := strings.TrimPrefix(saddr, "/ip4/127.0.0.1") - addrs[i] = ma.StringCast("/dns4/localhost" + addrNoIP) + addrs[i], _ = ma.StringCast("/dns4/localhost" + addrNoIP) } } return addrs diff --git a/go-libp2p/p2p/host/autorelay/metrics.go b/go-libp2p/p2p/host/autorelay/metrics.go index af4b53e..ba5ee57 100644 --- a/go-libp2p/p2p/host/autorelay/metrics.go +++ b/go-libp2p/p2p/host/autorelay/metrics.go @@ -193,7 +193,6 @@ func (mt *metricsTracer) ReservationOpened(cnt int) { func (mt *metricsTracer) ReservationRequestFinished(isRefresh bool, err error) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) if isRefresh { *tags = append(*tags, "refresh") @@ -206,6 +205,7 @@ func (mt *metricsTracer) ReservationRequestFinished(isRefresh bool, err error) { if !isRefresh && err == nil { reservationsOpenedTotal.Inc() } + metricshelper.PutStringSlice(tags) } func (mt *metricsTracer) RelayAddressUpdated() { @@ -218,27 +218,30 @@ func (mt *metricsTracer) RelayAddressCount(cnt int) { func (mt *metricsTracer) CandidateChecked(supportsCircuitV2 bool) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) + if supportsCircuitV2 { *tags = append(*tags, "yes") } else { *tags = append(*tags, "no") } candidatesCircuitV2SupportTotal.WithLabelValues(*tags...).Inc() + metricshelper.PutStringSlice(tags) } func (mt *metricsTracer) CandidateAdded(cnt int) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) + *tags = append(*tags, "added") candidatesTotal.WithLabelValues(*tags...).Add(float64(cnt)) + metricshelper.PutStringSlice(tags) } func (mt *metricsTracer) CandidateRemoved(cnt int) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) + *tags = append(*tags, "removed") candidatesTotal.WithLabelValues(*tags...).Add(float64(cnt)) + metricshelper.PutStringSlice(tags) } func (mt *metricsTracer) CandidateLoopState(state candidateLoopState) { @@ -247,7 +250,6 @@ func (mt *metricsTracer) CandidateLoopState(state candidateLoopState) { func (mt *metricsTracer) ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) *tags = append(*tags, "allowed peer source call") scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextAllowedCallToPeerSource.Unix())) @@ -263,6 +265,8 @@ func (mt *metricsTracer) ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes) *tags = append(*tags, "old candidate check") scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextOldCandidateCheck.Unix())) + + metricshelper.PutStringSlice(tags) } func (mt *metricsTracer) DesiredReservations(cnt int) { diff --git a/go-libp2p/p2p/host/autorelay/options.go b/go-libp2p/p2p/host/autorelay/options.go index 26ba920..e30aedb 100644 --- a/go-libp2p/p2p/host/autorelay/options.go +++ b/go-libp2p/p2p/host/autorelay/options.go @@ -72,11 +72,11 @@ func WithStaticRelays(static []peer.AddrInfo) Option { numPeers = len(static) } c := make(chan peer.AddrInfo, numPeers) - defer close(c) for i := 0; i < numPeers; i++ { c <- static[i] } + close(c) return c })(c) WithMinCandidates(len(static))(c) diff --git a/go-libp2p/p2p/host/autorelay/relay_finder.go b/go-libp2p/p2p/host/autorelay/relay_finder.go index 13f8c63..ad7d815 100644 --- a/go-libp2p/p2p/host/autorelay/relay_finder.go +++ b/go-libp2p/p2p/host/autorelay/relay_finder.go @@ -119,14 +119,14 @@ func (rf *relayFinder) background(ctx context.Context) { peerSourceRateLimiter := make(chan struct{}, 1) rf.refCount.Add(1) go func() { - defer rf.refCount.Done() rf.findNodes(ctx, peerSourceRateLimiter) + rf.refCount.Done() }() rf.refCount.Add(1) go func() { - defer rf.refCount.Done() rf.handleNewCandidates(ctx) + rf.refCount.Done() }() subConnectedness, err := rf.host.EventBus().Subscribe(new(event.EvtPeerConnectednessChanged), eventbus.Name("autorelay (relay finder)")) @@ -134,11 +134,9 @@ func (rf *relayFinder) background(ctx context.Context) { log.Error("failed to subscribe to the EvtPeerConnectednessChanged") return } - defer subConnectedness.Close() now := rf.conf.clock.Now() bootDelayTimer := rf.conf.clock.InstantTimer(now.Add(rf.conf.bootDelay)) - defer bootDelayTimer.Stop() // This is the least frequent event. It's our fallback timer if we don't have any other work to do. leastFrequentInterval := rf.conf.minInterval @@ -162,12 +160,14 @@ func (rf *relayFinder) background(ctx context.Context) { } workTimer := rf.conf.clock.InstantTimer(rf.runScheduledWork(ctx, now, scheduledWork, peerSourceRateLimiter)) - defer workTimer.Stop() for { select { case ev, ok := <-subConnectedness.Out(): if !ok { + subConnectedness.Close() + bootDelayTimer.Stop() + workTimer.Stop() return } evt := ev.(event.EvtPeerConnectednessChanged) @@ -206,6 +206,9 @@ func (rf *relayFinder) background(ctx context.Context) { // Ignore the next time because we aren't scheduling any future work here _ = rf.runScheduledWork(ctx, rf.conf.clock.Now(), scheduledWork, peerSourceRateLimiter) case <-ctx.Done(): + subConnectedness.Close() + bootDelayTimer.Stop() + workTimer.Stop() return } } @@ -282,7 +285,6 @@ func (rf *relayFinder) clearOldCandidates(now time.Time) time.Time { var deleted bool rf.candidateMx.Lock() - defer rf.candidateMx.Unlock() for id, cand := range rf.candidates { expiry := cand.added.Add(rf.conf.maxCandidateAge) if expiry.After(now) { @@ -298,7 +300,7 @@ func (rf *relayFinder) clearOldCandidates(now time.Time) time.Time { if deleted { rf.notifyMaybeNeedNewCandidates() } - + rf.candidateMx.Unlock() return nextTime } @@ -308,7 +310,6 @@ func (rf *relayFinder) clearBackoff(now time.Time) time.Time { nextTime := now.Add(rf.conf.backoff) rf.candidateMx.Lock() - defer rf.candidateMx.Unlock() for id, t := range rf.backoff { expiry := t.Add(rf.conf.backoff) if expiry.After(now) { @@ -320,6 +321,7 @@ func (rf *relayFinder) clearBackoff(now time.Time) time.Time { delete(rf.backoff, id) } } + rf.candidateMx.Unlock() return nextTime } @@ -383,11 +385,11 @@ func (rf *relayFinder) findNodes(ctx context.Context, peerSourceRateLimiter <-ch rf.refCount.Add(1) wg.Add(1) go func() { - defer rf.refCount.Done() - defer wg.Done() if added := rf.handleNewNode(ctx, pi); added { rf.notifyNewCandidate() } + rf.refCount.Done() + wg.Done() }() case <-ctx.Done(): rf.metricsTracer.CandidateLoopState(stopped) @@ -430,13 +432,13 @@ func (rf *relayFinder) handleNewNode(ctx context.Context, pi peer.AddrInfo) (add } ctx, cancel := context.WithTimeout(ctx, 20*time.Second) - defer cancel() supportsV2, err := rf.tryNode(ctx, pi) if err != nil { log.Debugf("node %s not accepted as a candidate: %s", pi.ID, err) if err == errProtocolNotSupported { rf.metricsTracer.CandidateChecked(false) } + cancel() return false } rf.metricsTracer.CandidateChecked(true) @@ -444,6 +446,7 @@ func (rf *relayFinder) handleNewNode(ctx context.Context, pi peer.AddrInfo) (add rf.candidateMx.Lock() if len(rf.candidates) > rf.conf.maxCandidates { rf.candidateMx.Unlock() + cancel() return false } log.Debugw("node supports relay protocol", "peer", pi.ID, "supports circuit v2", supportsV2) @@ -453,6 +456,7 @@ func (rf *relayFinder) handleNewNode(ctx context.Context, pi peer.AddrInfo) (add supportsRelayV2: supportsV2, }) rf.candidateMx.Unlock() + cancel() return true } @@ -588,7 +592,6 @@ func (rf *relayFinder) connectToRelay(ctx context.Context, cand *candidate) (*ci id := cand.ai.ID ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() var rsvp *circuitv2.Reservation @@ -598,6 +601,7 @@ func (rf *relayFinder) connectToRelay(ctx context.Context, cand *candidate) (*ci rf.candidateMx.Lock() rf.removeCandidate(cand.ai.ID) rf.candidateMx.Unlock() + cancel() return nil, fmt.Errorf("failed to connect: %w", err) } } @@ -615,6 +619,7 @@ func (rf *relayFinder) connectToRelay(ctx context.Context, cand *candidate) (*ci rf.candidateMx.Lock() rf.removeCandidate(id) rf.candidateMx.Unlock() + cancel() return rsvp, err } @@ -716,9 +721,9 @@ func (rf *relayFinder) selectCandidates() []*candidate { // through which we can be dialed. func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { rf.relayMx.Lock() - defer rf.relayMx.Unlock() if rf.cachedAddrs != nil && rf.conf.clock.Now().Before(rf.cachedAddrsExpiry) { + rf.relayMx.Unlock() return rf.cachedAddrs } @@ -726,7 +731,7 @@ func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { // only keep private addrs from the original addr set for _, addr := range addrs { - if manet.IsPrivateAddr(addr) { + if is, err := manet.IsPrivateAddr(addr); is && err == nil { raddrs = append(raddrs, addr) } } @@ -735,8 +740,11 @@ func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { relayAddrCnt := 0 for p := range rf.relays { addrs := cleanupAddressSet(rf.host.Peerstore().Addrs(p)) + circuit, err := ma.StringCast(fmt.Sprintf("/p2p/%s/p2p-circuit", p)) + if err != nil { + continue + } relayAddrCnt += len(addrs) - circuit := ma.StringCast(fmt.Sprintf("/p2p/%s/p2p-circuit", p)) for _, addr := range addrs { pub := addr.Encapsulate(circuit) raddrs = append(raddrs, pub) @@ -747,13 +755,14 @@ func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { rf.cachedAddrsExpiry = rf.conf.clock.Now().Add(30 * time.Second) rf.metricsTracer.RelayAddressCount(relayAddrCnt) + rf.relayMx.Unlock() return raddrs } func (rf *relayFinder) Start() error { rf.ctxCancelMx.Lock() - defer rf.ctxCancelMx.Unlock() if rf.ctxCancel != nil { + rf.ctxCancelMx.Unlock() return errAlreadyRunning } log.Debug("starting relay finder") @@ -764,15 +773,15 @@ func (rf *relayFinder) Start() error { rf.ctxCancel = cancel rf.refCount.Add(1) go func() { - defer rf.refCount.Done() rf.background(ctx) + rf.refCount.Done() }() + rf.ctxCancelMx.Unlock() return nil } func (rf *relayFinder) Stop() error { rf.ctxCancelMx.Lock() - defer rf.ctxCancelMx.Unlock() log.Debug("stopping relay finder") if rf.ctxCancel != nil { rf.ctxCancel() @@ -781,6 +790,7 @@ func (rf *relayFinder) Stop() error { rf.ctxCancel = nil rf.resetMetrics() + rf.ctxCancelMx.Unlock() return nil } diff --git a/go-libp2p/p2p/host/basic/basic_host.go b/go-libp2p/p2p/host/basic/basic_host.go index 8fc808e..6c5244c 100644 --- a/go-libp2p/p2p/host/basic/basic_host.go +++ b/go-libp2p/p2p/host/basic/basic_host.go @@ -327,7 +327,6 @@ func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) { func (h *BasicHost) updateLocalIpAddr() { h.addrMu.Lock() - defer h.addrMu.Unlock() h.filteredInterfaceAddrs = nil h.allInterfaceAddrs = nil @@ -367,6 +366,7 @@ func (h *BasicHost) updateLocalIpAddr() { // Then bail. There's nothing else we can do here. h.filteredInterfaceAddrs = append(h.filteredInterfaceAddrs, manet.IP4Loopback, manet.IP6Loopback) h.allInterfaceAddrs = h.filteredInterfaceAddrs + h.addrMu.Unlock() return } @@ -391,6 +391,7 @@ func (h *BasicHost) updateLocalIpAddr() { } } } + h.addrMu.Unlock() } // Start starts background tasks in the host @@ -508,7 +509,6 @@ func (h *BasicHost) makeSignedPeerRecord(addrs []ma.Multiaddr) (*record.Envelope } func (h *BasicHost) background() { - defer h.refCount.Done() var lastAddrs []ma.Multiaddr emitAddrChange := func(currentAddrs []ma.Multiaddr, lastAddrs []ma.Multiaddr) { @@ -548,7 +548,6 @@ func (h *BasicHost) background() { // periodically schedules an IdentifyPush to update our peers for changes // in our address set (if needed) ticker := time.NewTicker(addrChangeTickrInterval) - defer ticker.Stop() for { if len(h.network.ListenAddresses()) > 0 { @@ -564,6 +563,8 @@ func (h *BasicHost) background() { case <-ticker.C: case <-h.addrChangeChan: case <-h.ctx.Done(): + h.refCount.Done() + ticker.Stop() return } } @@ -831,7 +832,7 @@ func (h *BasicHost) NormalizeMultiaddr(addr ma.Multiaddr) ma.Multiaddr { if ok && n > 0 { out := addr for i := 0; i < n; i++ { - out, _ = ma.SplitLast(out) + out, _, _ = ma.SplitLast(out) } return out } @@ -885,7 +886,7 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr { } // Did the router give us a routable public addr? - if manet.IsPublicAddr(extMaddr) { + if is, err := manet.IsPublicAddr(extMaddr); is && err == nil { // well done continue } @@ -911,12 +912,12 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr { } // Drop the IP from the external maddr - _, extMaddrNoIP := ma.SplitFirst(extMaddr) + _, extMaddrNoIP, _ := ma.SplitFirst(extMaddr) for _, obsMaddr := range observed { // Extract a public observed addr. - ip, _ := ma.SplitFirst(obsMaddr) - if ip == nil || !manet.IsPublicAddr(ip) { + ip, _, _ := ma.SplitFirst(obsMaddr) + if is, err := manet.IsPublicAddr(ip); err != nil || !is || ip == nil { continue } @@ -937,7 +938,7 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr { return finalAddrs } -var wtComponent = ma.StringCast("/webtransport") +var wtComponent, _ = ma.StringCast("/webtransport") // inferWebtransportAddrsFromQuic infers more webtransport addresses from QUIC addresses. // This is useful when we discover our public QUIC address, but haven't discovered our public WebTransport addrs. @@ -952,7 +953,7 @@ func inferWebtransportAddrsFromQuic(in []ma.Multiaddr) []ma.Multiaddr { // Count the number of QUIC addrs, this will let us allocate just once at the beginning. quicAddrCount := 0 for _, addr := range in { - if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 { + if _, lastComponent, err := ma.SplitLast(addr); err == nil && lastComponent.Protocol().Code == ma.P_QUIC_V1 { quicAddrCount++ } } @@ -964,14 +965,14 @@ func inferWebtransportAddrsFromQuic(in []ma.Multiaddr) []ma.Multiaddr { if isWebtransport { for i := 0; i < numCertHashes; i++ { // Remove certhashes - addr, _ = ma.SplitLast(addr) + addr, _, _ = ma.SplitLast(addr) } webtransportAddrs[string(addr.Bytes())] = struct{}{} // Remove webtransport component, now it's a multiaddr that ends in /quic-v1 - addr, _ = ma.SplitLast(addr) + addr, _, _ = ma.SplitLast(addr) } - if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 { + if _, lastComponent, err := ma.SplitLast(addr); err == nil && lastComponent.Protocol().Code == ma.P_QUIC_V1 { bytes := addr.Bytes() if _, ok := quicOrWebtransportAddrs[string(bytes)]; ok { foundSameListeningAddr = true @@ -995,7 +996,7 @@ func inferWebtransportAddrsFromQuic(in []ma.Multiaddr) []ma.Multiaddr { for _, addr := range in { // Add all the original addresses out = append(out, addr) - if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 { + if _, lastComponent, err := ma.SplitLast(addr); err == nil && lastComponent.Protocol().Code == ma.P_QUIC_V1 { // Convert quic to webtransport addr = addr.Encapsulate(wtComponent) if _, ok := webtransportAddrs[string(addr.Bytes())]; ok { @@ -1021,13 +1022,16 @@ func trimHostAddrList(addrs []ma.Multiaddr, maxSize int) []ma.Multiaddr { score := func(addr ma.Multiaddr) int { var res int - if manet.IsPublicAddr(addr) { + if is, err := manet.IsPublicAddr(addr); is && err == nil { res |= 1 << 12 } else if !manet.IsIPLoopback(addr) { res |= 1 << 11 } var protocolWeight int - ma.ForEach(addr, func(c ma.Component) bool { + ma.ForEach(addr, func(c ma.Component, e error) bool { + if e != nil { + return false + } switch c.Protocol().Code { case ma.P_QUIC_V1: protocolWeight = 5 @@ -1065,17 +1069,19 @@ func trimHostAddrList(addrs []ma.Multiaddr, maxSize int) []ma.Multiaddr { // SetAutoNat sets the autonat service for the host. func (h *BasicHost) SetAutoNat(a autonat.AutoNAT) { h.addrMu.Lock() - defer h.addrMu.Unlock() + if h.autoNat == nil { h.autoNat = a } + h.addrMu.Unlock() } // GetAutoNat returns the host's AutoNAT service, if AutoNAT is enabled. func (h *BasicHost) GetAutoNat() autonat.AutoNAT { h.addrMu.Lock() - defer h.addrMu.Unlock() - return h.autoNat + n := h.autoNat + h.addrMu.Unlock() + return n } // Close shuts down the Host's services (network, etc). diff --git a/go-libp2p/p2p/host/basic/basic_host_test.go b/go-libp2p/p2p/host/basic/basic_host_test.go index c4f0680..ec50f61 100644 --- a/go-libp2p/p2p/host/basic/basic_host_test.go +++ b/go-libp2p/p2p/host/basic/basic_host_test.go @@ -95,7 +95,8 @@ func TestSignedPeerRecordWithNoListenAddrs(t *testing.T) { require.Empty(t, h.Addrs(), "expected no listen addrs") // now add a listen addr - require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/0.0.0.0/tcp/0"))) + m, _ := ma.StringCast("/ip4/0.0.0.0/tcp/0") + require.NoError(t, h.Network().Listen(m)) require.NotEmpty(t, h.Addrs(), "expected at least 1 listen addr") cab, ok := peerstore.GetCertifiedAddrBook(h.Peerstore()) @@ -168,7 +169,7 @@ func TestProtocolHandlerEvents(t *testing.T) { } func TestHostAddrsFactory(t *testing.T) { - maddr := ma.StringCast("/ip4/1.2.3.4/tcp/1234") + maddr, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1234") addrsFactory := func(addrs []ma.Multiaddr) []ma.Multiaddr { return []ma.Multiaddr{maddr} } @@ -209,7 +210,8 @@ func TestLocalIPChangesWhenListenAddrChanges(t *testing.T) { h.addrMu.Unlock() // change listen addrs and verify local IP addr is not nil again - require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/0.0.0.0/tcp/0"))) + m, _ := ma.StringCast("/ip4/0.0.0.0/tcp/0") + require.NoError(t, h.Network().Listen(m)) h.SignalAddressChange() time.Sleep(1 * time.Second) @@ -227,14 +229,15 @@ func TestAllAddrs(t *testing.T) { require.Nil(t, h.AllAddrs()) // listen on loopback - laddr := ma.StringCast("/ip4/127.0.0.1/tcp/0") + laddr, _ := ma.StringCast("/ip4/127.0.0.1/tcp/0") require.NoError(t, h.Network().Listen(laddr)) require.Len(t, h.AllAddrs(), 1) firstAddr := h.AllAddrs()[0] require.Equal(t, "/ip4/127.0.0.1", ma.Split(firstAddr)[0].String()) // listen on IPv4 0.0.0.0 - require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/0.0.0.0/tcp/0"))) + m, _ := ma.StringCast("/ip4/0.0.0.0/tcp/0") + require.NoError(t, h.Network().Listen(m)) // should contain localhost and private local addr along with previous listen address require.Len(t, h.AllAddrs(), 3) // Should still contain the original addr. @@ -551,7 +554,8 @@ func TestProtoDowngrade(t *testing.T) { func TestAddrChangeImmediatelyIfAddressNonEmpty(t *testing.T) { ctx := context.Background() - taddrs := []ma.Multiaddr{ma.StringCast("/ip4/1.2.3.4/tcp/1234")} + m, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1234") + taddrs := []ma.Multiaddr{m} starting := make(chan struct{}) h, err := NewHost(swarmt.GenSwarm(t), &HostOpts{AddrsFactory: func(addrs []ma.Multiaddr) []ma.Multiaddr { @@ -569,10 +573,11 @@ func TestAddrChangeImmediatelyIfAddressNonEmpty(t *testing.T) { defer sub.Close() h.Start() + n, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1234") expected := event.EvtLocalAddressesUpdated{ Diffs: true, Current: []event.UpdatedAddress{ - {Action: event.Added, Address: ma.StringCast("/ip4/1.2.3.4/tcp/1234")}, + {Action: event.Added, Address: n}, }, Removed: []event.UpdatedAddress{}} @@ -617,11 +622,14 @@ func TestHostAddrChangeDetection(t *testing.T) { // This test uses the address factory to provide several // sets of listen addresses for the host. It advances through // the sets by changing the currentAddrSet index var below. + m1, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1234") + m2, _ := ma.StringCast("/ip4/2.3.4.5/tcp/1234") + m3, _ := ma.StringCast("/ip4/3.4.5.6/tcp/4321") addrSets := [][]ma.Multiaddr{ {}, - {ma.StringCast("/ip4/1.2.3.4/tcp/1234")}, - {ma.StringCast("/ip4/1.2.3.4/tcp/1234"), ma.StringCast("/ip4/2.3.4.5/tcp/1234")}, - {ma.StringCast("/ip4/2.3.4.5/tcp/1234"), ma.StringCast("/ip4/3.4.5.6/tcp/4321")}, + {m1}, + {m1, m2}, + {m2, m3}, } // The events we expect the host to emit when SignalAddressChange is called @@ -630,26 +638,26 @@ func TestHostAddrChangeDetection(t *testing.T) { { Diffs: true, Current: []event.UpdatedAddress{ - {Action: event.Added, Address: ma.StringCast("/ip4/1.2.3.4/tcp/1234")}, + {Action: event.Added, Address: m1}, }, Removed: []event.UpdatedAddress{}, }, { Diffs: true, Current: []event.UpdatedAddress{ - {Action: event.Maintained, Address: ma.StringCast("/ip4/1.2.3.4/tcp/1234")}, - {Action: event.Added, Address: ma.StringCast("/ip4/2.3.4.5/tcp/1234")}, + {Action: event.Maintained, Address: m1}, + {Action: event.Added, Address: m2}, }, Removed: []event.UpdatedAddress{}, }, { Diffs: true, Current: []event.UpdatedAddress{ - {Action: event.Added, Address: ma.StringCast("/ip4/3.4.5.6/tcp/4321")}, - {Action: event.Maintained, Address: ma.StringCast("/ip4/2.3.4.5/tcp/1234")}, + {Action: event.Added, Address: m3}, + {Action: event.Maintained, Address: m2}, }, Removed: []event.UpdatedAddress{ - {Action: event.Removed, Address: ma.StringCast("/ip4/1.2.3.4/tcp/1234")}, + {Action: event.Removed, Address: m1}, }, }, } @@ -821,8 +829,8 @@ func TestNormalizeMultiaddr(t *testing.T) { h1, err := NewHost(swarmt.GenSwarm(t), nil) require.NoError(t, err) defer h1.Close() - - require.Equal(t, "/ip4/1.2.3.4/udp/9999/quic-v1/webtransport", h1.NormalizeMultiaddr(ma.StringCast("/ip4/1.2.3.4/udp/9999/quic-v1/webtransport/certhash/uEgNmb28")).String()) + m, _ := ma.StringCast("/ip4/1.2.3.4/udp/9999/quic-v1/webtransport/certhash/uEgNmb28") + require.Equal(t, "/ip4/1.2.3.4/udp/9999/quic-v1/webtransport", h1.NormalizeMultiaddr(m).String()) } func TestInferWebtransportAddrsFromQuic(t *testing.T) { @@ -883,7 +891,8 @@ func TestInferWebtransportAddrsFromQuic(t *testing.T) { sort.StringSlice(tc.out).Sort() min := make([]ma.Multiaddr, 0, len(tc.in)) for _, addr := range tc.in { - min = append(min, ma.StringCast(addr)) + m1, _ := ma.StringCast(addr) + min = append(min, m1) } outMa := inferWebtransportAddrsFromQuic(min) outStr := make([]string, 0, len(outMa)) @@ -905,14 +914,14 @@ func TestTrimHostAddrList(t *testing.T) { out []ma.Multiaddr } - tcpPublic := ma.StringCast("/ip4/1.1.1.1/tcp/1") - quicPublic := ma.StringCast("/ip4/1.1.1.1/udp/1/quic-v1") + tcpPublic, _ := ma.StringCast("/ip4/1.1.1.1/tcp/1") + quicPublic, _ := ma.StringCast("/ip4/1.1.1.1/udp/1/quic-v1") - tcpPrivate := ma.StringCast("/ip4/192.168.1.1/tcp/1") - quicPrivate := ma.StringCast("/ip4/192.168.1.1/udp/1/quic-v1") + tcpPrivate, _ := ma.StringCast("/ip4/192.168.1.1/tcp/1") + quicPrivate, _ := ma.StringCast("/ip4/192.168.1.1/udp/1/quic-v1") - tcpLocal := ma.StringCast("/ip4/127.0.0.1/tcp/1") - quicLocal := ma.StringCast("/ip4/127.0.0.1/udp/1/quic-v1") + tcpLocal, _ := ma.StringCast("/ip4/127.0.0.1/tcp/1") + quicLocal, _ := ma.StringCast("/ip4/127.0.0.1/udp/1/quic-v1") testCases := []testCase{ { diff --git a/go-libp2p/p2p/host/basic/natmgr.go b/go-libp2p/p2p/host/basic/natmgr.go index bc55e4d..add0465 100644 --- a/go-libp2p/p2p/host/basic/natmgr.go +++ b/go-libp2p/p2p/host/basic/natmgr.go @@ -89,27 +89,24 @@ func (nmgr *natManager) Close() error { func (nmgr *natManager) HasDiscoveredNAT() bool { nmgr.natMx.RLock() - defer nmgr.natMx.RUnlock() - return nmgr.nat != nil + h := nmgr.nat != nil + nmgr.natMx.RUnlock() + return h } func (nmgr *natManager) background(ctx context.Context) { - defer nmgr.refCount.Done() - - defer func() { - nmgr.natMx.Lock() - defer nmgr.natMx.Unlock() - - if nmgr.nat != nil { - nmgr.nat.Close() - } - }() - discoverCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() + natInstance, err := discoverNAT(discoverCtx) if err != nil { log.Info("DiscoverNAT error:", err) + nmgr.refCount.Done() + nmgr.natMx.Lock() + if nmgr.nat != nil { + nmgr.nat.Close() + } + nmgr.natMx.Unlock() + cancel() return } @@ -121,7 +118,6 @@ func (nmgr *natManager) background(ctx context.Context) { // we need to sign up here to avoid missing some notifs // before the NAT has been found. nmgr.net.Notify((*nmgrNetNotifiee)(nmgr)) - defer nmgr.net.StopNotify((*nmgrNetNotifiee)(nmgr)) nmgr.doSync() // sync one first. for { @@ -129,6 +125,14 @@ func (nmgr *natManager) background(ctx context.Context) { case <-nmgr.syncFlag: nmgr.doSync() // sync when our listen addresses change. case <-ctx.Done(): + nmgr.refCount.Done() + nmgr.natMx.Lock() + if nmgr.nat != nil { + nmgr.nat.Close() + } + nmgr.natMx.Unlock() + cancel() + nmgr.net.StopNotify((*nmgrNetNotifiee)(nmgr)) return } } @@ -150,8 +154,8 @@ func (nmgr *natManager) doSync() { var newAddresses []entry for _, maddr := range nmgr.net.ListenAddresses() { // Strip the IP - maIP, rest := ma.SplitFirst(maddr) - if maIP == nil || rest == nil { + maIP, rest, err := ma.SplitFirst(maddr) + if maIP == nil || rest == nil || err != nil { continue } @@ -168,8 +172,8 @@ func (nmgr *natManager) doSync() { } // Extract the port/protocol - proto, _ := ma.SplitFirst(rest) - if proto == nil { + proto, _, err := ma.SplitFirst(rest) + if proto == nil || err != nil { continue } @@ -195,9 +199,6 @@ func (nmgr *natManager) doSync() { } } - var wg sync.WaitGroup - defer wg.Wait() - // Close old mappings for e, v := range nmgr.tracked { if !v { @@ -217,15 +218,15 @@ func (nmgr *natManager) doSync() { func (nmgr *natManager) GetMapping(addr ma.Multiaddr) ma.Multiaddr { nmgr.natMx.Lock() - defer nmgr.natMx.Unlock() if nmgr.nat == nil { // NAT not yet initialized + nmgr.natMx.Unlock() return nil } var found bool var proto int // ma.P_TCP or ma.P_UDP - transport, rest := ma.SplitFunc(addr, func(c ma.Component) bool { + transport, rest, err := ma.SplitFunc(addr, func(c ma.Component) bool { if found { return true } @@ -233,13 +234,20 @@ func (nmgr *natManager) GetMapping(addr ma.Multiaddr) ma.Multiaddr { found = proto == ma.P_TCP || proto == ma.P_UDP return false }) + if err != nil { + nmgr.natMx.Unlock() + return nil + } + if !manet.IsThinWaist(transport) { + nmgr.natMx.Unlock() return nil } naddr, err := manet.ToNetAddr(transport) if err != nil { log.Error("error parsing net multiaddr %q: %s", transport, err) + nmgr.natMx.Unlock() return nil } @@ -258,16 +266,19 @@ func (nmgr *natManager) GetMapping(addr ma.Multiaddr) ma.Multiaddr { port = naddr.Port protocol = "udp" default: + nmgr.natMx.Unlock() return nil } if !ip.IsGlobalUnicast() && !ip.IsUnspecified() { // We only map global unicast & unspecified addresses ports, not broadcast, multicast, etc. + nmgr.natMx.Unlock() return nil } extAddr, ok := nmgr.nat.GetMapping(protocol, port) if !ok { + nmgr.natMx.Unlock() return nil } @@ -281,12 +292,14 @@ func (nmgr *natManager) GetMapping(addr ma.Multiaddr) ma.Multiaddr { mappedMaddr, err := manet.FromNetAddr(mappedAddr) if err != nil { log.Errorf("mapped addr can't be turned into a multiaddr %q: %s", mappedAddr, err) + nmgr.natMx.Unlock() return nil } extMaddr := mappedMaddr if rest != nil { extMaddr = ma.Join(extMaddr, rest) } + nmgr.natMx.Unlock() return extMaddr } diff --git a/go-libp2p/p2p/host/basic/natmgr_test.go b/go-libp2p/p2p/host/basic/natmgr_test.go index 8cbe861..a8e9a9a 100644 --- a/go-libp2p/p2p/host/basic/natmgr_test.go +++ b/go-libp2p/p2p/host/basic/natmgr_test.go @@ -42,23 +42,32 @@ func TestMapping(t *testing.T) { externalAddr := netip.AddrPortFrom(netip.AddrFrom4([4]byte{1, 2, 3, 4}), 4321) // pretend that we have a TCP mapping mockNAT.EXPECT().GetMapping("tcp", 1234).Return(externalAddr, true) - require.Equal(t, ma.StringCast("/ip4/1.2.3.4/tcp/4321"), m.GetMapping(ma.StringCast("/ip4/0.0.0.0/tcp/1234"))) + m1, _ := ma.StringCast("/ip4/1.2.3.4/tcp/4321") + m2, _ := ma.StringCast("/ip4/0.0.0.0/tcp/1234") + require.Equal(t, m1, m.GetMapping(m2)) // pretend that we have a QUIC mapping mockNAT.EXPECT().GetMapping("udp", 1234).Return(externalAddr, true) - require.Equal(t, ma.StringCast("/ip4/1.2.3.4/udp/4321/quic-v1"), m.GetMapping(ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1"))) + m3, _ := ma.StringCast("/ip4/1.2.3.4/udp/4321/quic-v1") + m4, _ := ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1") + require.Equal(t, m3, m.GetMapping(m4)) // pretend that there's no mapping mockNAT.EXPECT().GetMapping("tcp", 1234).Return(netip.AddrPort{}, false) - require.Nil(t, m.GetMapping(ma.StringCast("/ip4/0.0.0.0/tcp/1234"))) + m5, _ := ma.StringCast("/ip4/0.0.0.0/tcp/1234") + require.Nil(t, m.GetMapping(m5)) // make sure this works for WebSocket addresses as well mockNAT.EXPECT().GetMapping("tcp", 1234).Return(externalAddr, true) - require.Equal(t, ma.StringCast("/ip4/1.2.3.4/tcp/4321/ws"), m.GetMapping(ma.StringCast("/ip4/0.0.0.0/tcp/1234/ws"))) + m6, _ := ma.StringCast("/ip4/1.2.3.4/tcp/4321/ws") + m7, _ := ma.StringCast("/ip4/0.0.0.0/tcp/1234/ws") + require.Equal(t, m6, m.GetMapping(m7)) // make sure this works for WebTransport addresses as well mockNAT.EXPECT().GetMapping("udp", 1234).Return(externalAddr, true) - require.Equal(t, ma.StringCast("/ip4/1.2.3.4/udp/4321/quic-v1/webtransport"), m.GetMapping(ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1/webtransport"))) + m8, _ := ma.StringCast("/ip4/1.2.3.4/udp/4321/quic-v1/webtransport") + m9, _ := ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1/webtransport") + require.Equal(t, m8, m.GetMapping(m9)) } func TestAddAndRemoveListeners(t *testing.T) { @@ -77,7 +86,8 @@ func TestAddAndRemoveListeners(t *testing.T) { added := make(chan struct{}, 1) // add a TCP listener mockNAT.EXPECT().AddMapping(gomock.Any(), "tcp", 1234).Do(func(context.Context, string, int) { added <- struct{}{} }) - require.NoError(t, sw.Listen(ma.StringCast("/ip4/0.0.0.0/tcp/1234"))) + m1, _ := ma.StringCast("/ip4/0.0.0.0/tcp/1234") + require.NoError(t, sw.Listen(m1)) select { case <-added: case <-time.After(time.Second): @@ -86,7 +96,8 @@ func TestAddAndRemoveListeners(t *testing.T) { // add a QUIC listener mockNAT.EXPECT().AddMapping(gomock.Any(), "udp", 1234).Do(func(context.Context, string, int) { added <- struct{}{} }) - require.NoError(t, sw.Listen(ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1"))) + m2, _ := ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1") + require.NoError(t, sw.Listen(m2)) select { case <-added: case <-time.After(time.Second): @@ -95,7 +106,8 @@ func TestAddAndRemoveListeners(t *testing.T) { // remove the QUIC listener mockNAT.EXPECT().RemoveMapping(gomock.Any(), "udp", 1234).Do(func(context.Context, string, int) { added <- struct{}{} }) - sw.ListenClose(ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1")) + m3, _ := ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1") + sw.ListenClose(m3) select { case <-added: case <-time.After(time.Second): diff --git a/go-libp2p/p2p/host/eventbus/basic.go b/go-libp2p/p2p/host/eventbus/basic.go index 42365a7..2f5eb78 100644 --- a/go-libp2p/p2p/host/eventbus/basic.go +++ b/go-libp2p/p2p/host/eventbus/basic.go @@ -85,8 +85,8 @@ func (b *basicBus) withNode(typ reflect.Type, cb func(*node), async func(*node)) n.lk.Unlock() } else { go func() { - defer n.lk.Unlock() async(n) + n.lk.Unlock() }() } } @@ -305,12 +305,12 @@ func (b *basicBus) Emitter(evtType interface{}, opts ...event.EmitterOpt) (e eve // or subscribers for. func (b *basicBus) GetAllEventTypes() []reflect.Type { b.lk.RLock() - defer b.lk.RUnlock() types := make([]reflect.Type, 0, len(b.nodes)) for t := range b.nodes { types = append(types, t) } + b.lk.RUnlock() return types } diff --git a/go-libp2p/p2p/host/eventbus/basic_metrics.go b/go-libp2p/p2p/host/eventbus/basic_metrics.go index 8e7b1e8..92978a0 100644 --- a/go-libp2p/p2p/host/eventbus/basic_metrics.go +++ b/go-libp2p/p2p/host/eventbus/basic_metrics.go @@ -112,39 +112,38 @@ func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer { func (m *metricsTracer) EventEmitted(typ reflect.Type) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) *tags = append(*tags, strings.TrimPrefix(typ.String(), "event.")) eventsEmitted.WithLabelValues(*tags...).Inc() + metricshelper.PutStringSlice(tags) } func (m *metricsTracer) AddSubscriber(typ reflect.Type) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) *tags = append(*tags, strings.TrimPrefix(typ.String(), "event.")) totalSubscribers.WithLabelValues(*tags...).Inc() + metricshelper.PutStringSlice(tags) } func (m *metricsTracer) RemoveSubscriber(typ reflect.Type) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) *tags = append(*tags, strings.TrimPrefix(typ.String(), "event.")) totalSubscribers.WithLabelValues(*tags...).Dec() + metricshelper.PutStringSlice(tags) } func (m *metricsTracer) SubscriberQueueLength(name string, n int) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) *tags = append(*tags, name) subscriberQueueLength.WithLabelValues(*tags...).Set(float64(n)) + metricshelper.PutStringSlice(tags) } func (m *metricsTracer) SubscriberQueueFull(name string, isFull bool) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) *tags = append(*tags, name) observer := subscriberQueueFull.WithLabelValues(*tags...) @@ -153,12 +152,13 @@ func (m *metricsTracer) SubscriberQueueFull(name string, isFull bool) { } else { observer.Set(0) } + metricshelper.PutStringSlice(tags) } func (m *metricsTracer) SubscriberEventQueued(name string) { tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) *tags = append(*tags, name) subscriberEventQueued.WithLabelValues(*tags...).Inc() + metricshelper.PutStringSlice(tags) } diff --git a/go-libp2p/p2p/host/peerstore/metrics.go b/go-libp2p/p2p/host/peerstore/metrics.go index e05fda7..2dcfbe9 100644 --- a/go-libp2p/p2p/host/peerstore/metrics.go +++ b/go-libp2p/p2p/host/peerstore/metrics.go @@ -47,8 +47,9 @@ func (m *metrics) RecordLatency(p peer.ID, next time.Duration) { // of all measurements of a peer's latency. func (m *metrics) LatencyEWMA(p peer.ID) time.Duration { m.mutex.RLock() - defer m.mutex.RUnlock() - return m.latmap[p] + l := m.latmap[p] + m.mutex.RUnlock() + return l } func (m *metrics) RemovePeer(p peer.ID) { diff --git a/go-libp2p/p2p/host/peerstore/pstoreds/addr_book.go b/go-libp2p/p2p/host/peerstore/pstoreds/addr_book.go index 2023de1..1d67d36 100644 --- a/go-libp2p/p2p/host/peerstore/pstoreds/addr_book.go +++ b/go-libp2p/p2p/host/peerstore/pstoreds/addr_book.go @@ -602,7 +602,11 @@ func cleanAddrs(addrs []ma.Multiaddr, pid peer.ID) []ma.Multiaddr { clean := make([]ma.Multiaddr, 0, len(addrs)) for _, addr := range addrs { // Remove suffix of /p2p/peer-id from address - addr, addrPid := peer.SplitAddr(addr) + addr, addrPid, err := peer.SplitAddr(addr) + if err != nil { + log.Warnw("Was passed a bad multiaddr", "peer", pid, "err", err) + continue + } if addr == nil { log.Warnw("Was passed a nil multiaddr", "peer", pid) continue diff --git a/go-libp2p/p2p/host/peerstore/pstoreds/addr_book_gc.go b/go-libp2p/p2p/host/peerstore/pstoreds/addr_book_gc.go index 2e35e7a..80929f6 100644 --- a/go-libp2p/p2p/host/peerstore/pstoreds/addr_book_gc.go +++ b/go-libp2p/p2p/host/peerstore/pstoreds/addr_book_gc.go @@ -92,24 +92,23 @@ func newAddressBookGc(ctx context.Context, ab *dsAddrBook) (*dsAddrBookGc, error // gc prunes expired addresses from the datastore at regular intervals. It should be spawned as a goroutine. func (gc *dsAddrBookGc) background() { - defer gc.ab.childrenDone.Done() select { case <-gc.ab.clock.After(gc.ab.opts.GCInitialDelay): case <-gc.ab.ctx.Done(): // yield if we have been cancelled/closed before the delay elapses. + gc.ab.childrenDone.Done() return } purgeTimer := time.NewTicker(gc.ab.opts.GCPurgeInterval) - defer purgeTimer.Stop() var lookaheadCh <-chan time.Time + var lookaheadTimer *time.Ticker if gc.lookaheadEnabled { - lookaheadTimer := time.NewTicker(gc.ab.opts.GCLookaheadInterval) + lookaheadTimer = time.NewTicker(gc.ab.opts.GCLookaheadInterval) lookaheadCh = lookaheadTimer.C gc.populateLookahead() // do a lookahead now - defer lookaheadTimer.Stop() } for { @@ -122,6 +121,11 @@ func (gc *dsAddrBookGc) background() { gc.populateLookahead() case <-gc.ctx.Done(): + gc.ab.childrenDone.Done() + purgeTimer.Stop() + if lookaheadTimer != nil { + lookaheadTimer.Stop() + } return } } diff --git a/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go b/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go index 209937c..940c3f2 100644 --- a/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go +++ b/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go @@ -108,15 +108,15 @@ func WithClock(clock clock) AddrBookOption { // background periodically schedules a gc func (mab *memoryAddrBook) background(ctx context.Context) { - defer mab.refCount.Done() ticker := time.NewTicker(1 * time.Hour) - defer ticker.Stop() for { select { case <-ticker.C: mab.gc() case <-ctx.Done(): + mab.refCount.Done() + ticker.Stop() return } } @@ -203,9 +203,10 @@ func (mab *memoryAddrBook) ConsumePeerRecord(recordEnvelope *record.Envelope, tt // ensure seq is greater than, or equal to, the last received s := mab.segments.get(rec.PeerID) s.Lock() - defer s.Unlock() + lastState, found := s.signedPeerRecords[rec.PeerID] if found && lastState.Seq > rec.Seq { + s.Unlock() return false, nil } s.signedPeerRecords[rec.PeerID] = &peerRecordState{ @@ -213,15 +214,15 @@ func (mab *memoryAddrBook) ConsumePeerRecord(recordEnvelope *record.Envelope, tt Seq: rec.Seq, } mab.addAddrsUnlocked(s, rec.PeerID, rec.Addrs, ttl, true) + s.Unlock() return true, nil } func (mab *memoryAddrBook) addAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) { s := mab.segments.get(p) s.Lock() - defer s.Unlock() - mab.addAddrsUnlocked(s, p, addrs, ttl, false) + s.Unlock() } func (mab *memoryAddrBook) addAddrsUnlocked(s *addrSegment, p peer.ID, addrs []ma.Multiaddr, ttl time.Duration, signed bool) { @@ -239,7 +240,11 @@ func (mab *memoryAddrBook) addAddrsUnlocked(s *addrSegment, p peer.ID, addrs []m exp := mab.clock.Now().Add(ttl) for _, addr := range addrs { // Remove suffix of /p2p/peer-id from address - addr, addrPid := peer.SplitAddr(addr) + addr, addrPid, err := peer.SplitAddr(addr) + if err != nil { + log.Warnw("Was passed multiaddr with err", "peer", p, "err", err) + continue + } if addr == nil { log.Warnw("Was passed nil multiaddr", "peer", p) continue @@ -278,7 +283,6 @@ func (mab *memoryAddrBook) SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Durati func (mab *memoryAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) { s := mab.segments.get(p) s.Lock() - defer s.Unlock() amap, ok := s.addrs[p] if !ok { @@ -288,7 +292,11 @@ func (mab *memoryAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Du exp := mab.clock.Now().Add(ttl) for _, addr := range addrs { - addr, addrPid := peer.SplitAddr(addr) + addr, addrPid, err := peer.SplitAddr(addr) + if err != nil { + log.Warnw("Was passed multiaddr with err", "peer", p, "err", err) + continue + } if addr == nil { log.Warnw("was passed nil multiaddr", "peer", p) continue @@ -308,6 +316,7 @@ func (mab *memoryAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Du delete(amap, key) } } + s.Unlock() } // UpdateAddrs updates the addresses associated with the given peer that have @@ -315,10 +324,11 @@ func (mab *memoryAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Du func (mab *memoryAddrBook) UpdateAddrs(p peer.ID, oldTTL time.Duration, newTTL time.Duration) { s := mab.segments.get(p) s.Lock() - defer s.Unlock() + exp := mab.clock.Now().Add(newTTL) amap, found := s.addrs[p] if !found { + s.Unlock() return } @@ -333,15 +343,17 @@ func (mab *memoryAddrBook) UpdateAddrs(p peer.ID, oldTTL time.Duration, newTTL t } } } + s.Unlock() } // Addrs returns all known (and valid) addresses for a given peer func (mab *memoryAddrBook) Addrs(p peer.ID) []ma.Multiaddr { s := mab.segments.get(p) s.RLock() - defer s.RUnlock() - return validAddrs(mab.clock.Now(), s.addrs[p]) + v := validAddrs(mab.clock.Now(), s.addrs[p]) + s.RUnlock() + return v } func validAddrs(now time.Time, amap map[string]*expiringAddr) []ma.Multiaddr { @@ -364,19 +376,21 @@ func validAddrs(now time.Time, amap map[string]*expiringAddr) []ma.Multiaddr { func (mab *memoryAddrBook) GetPeerRecord(p peer.ID) *record.Envelope { s := mab.segments.get(p) s.RLock() - defer s.RUnlock() // although the signed record gets garbage collected when all addrs inside it are expired, // we may be in between the expiration time and the GC interval // so, we check to see if we have any valid signed addrs before returning the record if len(validAddrs(mab.clock.Now(), s.addrs[p])) == 0 { + s.RUnlock() return nil } state := s.signedPeerRecords[p] if state == nil { + s.RUnlock() return nil } + s.RUnlock() return state.Envelope } @@ -384,10 +398,10 @@ func (mab *memoryAddrBook) GetPeerRecord(p peer.ID) *record.Envelope { func (mab *memoryAddrBook) ClearAddrs(p peer.ID) { s := mab.segments.get(p) s.Lock() - defer s.Unlock() delete(s.addrs, p) delete(s.signedPeerRecords, p) + s.Unlock() } // AddrStream returns a channel on which all new addresses discovered for a @@ -395,15 +409,15 @@ func (mab *memoryAddrBook) ClearAddrs(p peer.ID) { func (mab *memoryAddrBook) AddrStream(ctx context.Context, p peer.ID) <-chan ma.Multiaddr { s := mab.segments.get(p) s.RLock() - defer s.RUnlock() baseaddrslice := s.addrs[p] initial := make([]ma.Multiaddr, 0, len(baseaddrslice)) for _, a := range baseaddrslice { initial = append(initial, a.Addr) } - - return mab.subManager.AddrStream(ctx, p, initial) + c := mab.subManager.AddrStream(ctx, p, initial) + s.RUnlock() + return c } type addrSub struct { @@ -436,14 +450,15 @@ func NewAddrSubManager() *AddrSubManager { // from the manager. func (mgr *AddrSubManager) removeSub(p peer.ID, s *addrSub) { mgr.mu.Lock() - defer mgr.mu.Unlock() subs := mgr.subs[p] if len(subs) == 1 { if subs[0] != s { + mgr.mu.Unlock() return } delete(mgr.subs, p) + mgr.mu.Unlock() return } @@ -452,21 +467,23 @@ func (mgr *AddrSubManager) removeSub(p peer.ID, s *addrSub) { subs[i] = subs[len(subs)-1] subs[len(subs)-1] = nil mgr.subs[p] = subs[:len(subs)-1] + mgr.mu.Unlock() return } } + mgr.mu.Unlock() } // BroadcastAddr broadcasts a new address to all subscribed streams. func (mgr *AddrSubManager) BroadcastAddr(p peer.ID, addr ma.Multiaddr) { mgr.mu.RLock() - defer mgr.mu.RUnlock() if subs, ok := mgr.subs[p]; ok { for _, sub := range subs { sub.pubAddr(addr) } } + mgr.mu.RUnlock() } // AddrStream creates a new subscription for a given peer ID, pre-populating the @@ -482,8 +499,6 @@ func (mgr *AddrSubManager) AddrStream(ctx context.Context, p peer.ID, initial [] sort.Sort(addrList(initial)) go func(buffer []ma.Multiaddr) { - defer close(out) - sent := make(map[string]struct{}, len(buffer)) for _, a := range buffer { sent[string(a.Bytes())] = struct{}{} @@ -521,6 +536,7 @@ func (mgr *AddrSubManager) AddrStream(ctx context.Context, p peer.ID, initial [] } case <-ctx.Done(): mgr.removeSub(p, sub) + close(out) return } } diff --git a/go-libp2p/p2p/host/peerstore/pstoremem/keybook.go b/go-libp2p/p2p/host/peerstore/pstoremem/keybook.go index f995a08..b560892 100644 --- a/go-libp2p/p2p/host/peerstore/pstoremem/keybook.go +++ b/go-libp2p/p2p/host/peerstore/pstoremem/keybook.go @@ -69,8 +69,9 @@ func (mkb *memoryKeyBook) AddPubKey(p peer.ID, pk ic.PubKey) error { func (mkb *memoryKeyBook) PrivKey(p peer.ID) ic.PrivKey { mkb.RLock() - defer mkb.RUnlock() - return mkb.sks[p] + i := mkb.sks[p] + mkb.RUnlock() + return i } func (mkb *memoryKeyBook) AddPrivKey(p peer.ID, sk ic.PrivKey) error { diff --git a/go-libp2p/p2p/host/peerstore/pstoremem/metadata.go b/go-libp2p/p2p/host/peerstore/pstoremem/metadata.go index 305c741..b536ad8 100644 --- a/go-libp2p/p2p/host/peerstore/pstoremem/metadata.go +++ b/go-libp2p/p2p/host/peerstore/pstoremem/metadata.go @@ -23,27 +23,31 @@ func NewPeerMetadata() *memoryPeerMetadata { func (ps *memoryPeerMetadata) Put(p peer.ID, key string, val interface{}) error { ps.dslock.Lock() - defer ps.dslock.Unlock() + m, ok := ps.ds[p] if !ok { m = make(map[string]interface{}) ps.ds[p] = m } m[key] = val + ps.dslock.Unlock() return nil } func (ps *memoryPeerMetadata) Get(p peer.ID, key string) (interface{}, error) { ps.dslock.RLock() - defer ps.dslock.RUnlock() + m, ok := ps.ds[p] if !ok { + ps.dslock.RUnlock() return nil, pstore.ErrNotFound } val, ok := m[key] if !ok { + ps.dslock.RUnlock() return nil, pstore.ErrNotFound } + ps.dslock.RUnlock() return val, nil } diff --git a/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go b/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go index 51c4b02..b8440ab 100644 --- a/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go +++ b/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go @@ -76,15 +76,16 @@ func (pb *memoryProtoBook) internProtocol(proto protocol.ID) protocol.ID { // intern with the write lock pb.lk.Lock() - defer pb.lk.Unlock() // check again in case it got interned in between locks interned, ok = pb.interned[proto] if ok { + pb.lk.Unlock() return interned } pb.interned[proto] = proto + pb.lk.Unlock() return proto } @@ -109,7 +110,6 @@ func (pb *memoryProtoBook) SetProtocols(p peer.ID, protos ...protocol.ID) error func (pb *memoryProtoBook) AddProtocols(p peer.ID, protos ...protocol.ID) error { s := pb.segments.get(p) s.Lock() - defer s.Unlock() protomap, ok := s.protocols[p] if !ok { @@ -117,49 +117,51 @@ func (pb *memoryProtoBook) AddProtocols(p peer.ID, protos ...protocol.ID) error s.protocols[p] = protomap } if len(protomap)+len(protos) > pb.maxProtos { + s.Unlock() return errTooManyProtocols } for _, proto := range protos { protomap[pb.internProtocol(proto)] = struct{}{} } + s.Unlock() return nil } func (pb *memoryProtoBook) GetProtocols(p peer.ID) ([]protocol.ID, error) { s := pb.segments.get(p) s.RLock() - defer s.RUnlock() out := make([]protocol.ID, 0, len(s.protocols[p])) for k := range s.protocols[p] { out = append(out, k) } + s.RUnlock() return out, nil } func (pb *memoryProtoBook) RemoveProtocols(p peer.ID, protos ...protocol.ID) error { s := pb.segments.get(p) s.Lock() - defer s.Unlock() protomap, ok := s.protocols[p] if !ok { // nothing to remove. + s.Unlock() return nil } for _, proto := range protos { delete(protomap, pb.internProtocol(proto)) } + s.Unlock() return nil } func (pb *memoryProtoBook) SupportsProtocols(p peer.ID, protos ...protocol.ID) ([]protocol.ID, error) { s := pb.segments.get(p) s.RLock() - defer s.RUnlock() out := make([]protocol.ID, 0, len(protos)) for _, proto := range protos { @@ -168,19 +170,21 @@ func (pb *memoryProtoBook) SupportsProtocols(p peer.ID, protos ...protocol.ID) ( } } + s.RUnlock() return out, nil } func (pb *memoryProtoBook) FirstSupportedProtocol(p peer.ID, protos ...protocol.ID) (protocol.ID, error) { s := pb.segments.get(p) s.RLock() - defer s.RUnlock() for _, proto := range protos { if _, ok := s.protocols[p][proto]; ok { + s.RUnlock() return proto, nil } } + s.RUnlock() return "", nil } diff --git a/go-libp2p/p2p/host/peerstore/pstoremem/sorting_test.go b/go-libp2p/p2p/host/peerstore/pstoremem/sorting_test.go index 82c76ef..89b5325 100644 --- a/go-libp2p/p2p/host/peerstore/pstoremem/sorting_test.go +++ b/go-libp2p/p2p/host/peerstore/pstoremem/sorting_test.go @@ -9,10 +9,10 @@ import ( ) func TestAddressSorting(t *testing.T) { - u1 := ma.StringCast("/ip4/152.12.23.53/udp/1234/utp") - u2l := ma.StringCast("/ip4/127.0.0.1/udp/1234/utp") - local := ma.StringCast("/ip4/127.0.0.1/tcp/1234") - norm := ma.StringCast("/ip4/6.5.4.3/tcp/1234") + u1, _ := ma.StringCast("/ip4/152.12.23.53/udp/1234/utp") + u2l, _ := ma.StringCast("/ip4/127.0.0.1/udp/1234/utp") + local, _ := ma.StringCast("/ip4/127.0.0.1/tcp/1234") + norm, _ := ma.StringCast("/ip4/6.5.4.3/tcp/1234") l := addrList{local, u1, u2l, norm} sort.Sort(l) diff --git a/go-libp2p/p2p/host/resource-manager/allowlist.go b/go-libp2p/p2p/host/resource-manager/allowlist.go index d2bdb86..d2bb842 100644 --- a/go-libp2p/p2p/host/resource-manager/allowlist.go +++ b/go-libp2p/p2p/host/resource-manager/allowlist.go @@ -52,8 +52,13 @@ func toIPNet(ma multiaddr.Multiaddr) (*net.IPNet, peer.ID, error) { var allowedPeerStr string var allowedPeer peer.ID var isIPV4 bool + var err error - multiaddr.ForEach(ma, func(c multiaddr.Component) bool { + multiaddr.ForEach(ma, func(c multiaddr.Component, e error) bool { + if e != nil { + err = e + return false + } if c.Protocol().Code == multiaddr.P_IP4 || c.Protocol().Code == multiaddr.P_IP6 { isIPV4 = c.Protocol().Code == multiaddr.P_IP4 ipString = c.Value() @@ -67,6 +72,10 @@ func toIPNet(ma multiaddr.Multiaddr) (*net.IPNet, peer.ID, error) { return ipString == "" || mask == "" || allowedPeerStr == "" }) + if err != nil { + return nil, allowedPeer, err + } + if ipString == "" { return nil, allowedPeer, errors.New("missing ip address") } @@ -170,10 +179,10 @@ func (al *Allowlist) Allowed(ma multiaddr.Multiaddr) bool { return false } al.mu.RLock() - defer al.mu.RUnlock() for _, network := range al.allowedNetworks { if network.Contains(ip) { + al.mu.RUnlock() return true } } @@ -181,11 +190,12 @@ func (al *Allowlist) Allowed(ma multiaddr.Multiaddr) bool { for _, allowedNetworks := range al.allowedPeerByNetwork { for _, network := range allowedNetworks { if network.Contains(ip) { + al.mu.RUnlock() return true } } } - + al.mu.RUnlock() return false } @@ -195,11 +205,11 @@ func (al *Allowlist) AllowedPeerAndMultiaddr(peerID peer.ID, ma multiaddr.Multia return false } al.mu.RLock() - defer al.mu.RUnlock() for _, network := range al.allowedNetworks { if network.Contains(ip) { // We found a match that isn't constrained by a peerID + al.mu.RUnlock() return true } } @@ -207,10 +217,11 @@ func (al *Allowlist) AllowedPeerAndMultiaddr(peerID peer.ID, ma multiaddr.Multia if expectedNetworks, ok := al.allowedPeerByNetwork[peerID]; ok { for _, expectedNetwork := range expectedNetworks { if expectedNetwork.Contains(ip) { + al.mu.RUnlock() return true } } } - + al.mu.RUnlock() return false } diff --git a/go-libp2p/p2p/host/resource-manager/allowlist_test.go b/go-libp2p/p2p/host/resource-manager/allowlist_test.go index d665b63..6481d28 100644 --- a/go-libp2p/p2p/host/resource-manager/allowlist_test.go +++ b/go-libp2p/p2p/host/resource-manager/allowlist_test.go @@ -19,13 +19,16 @@ func ExampleWithAllowlistedMultiaddrs() { } limits := DefaultLimits.AutoScale() + m1, _ := multiaddr.StringCast("/ip4/1.2.3.4") + m2, _ := multiaddr.StringCast("/ip4/2.2.3.4/p2p/" + somePeer.String()) + m3, _ := multiaddr.StringCast("/ip4/1.2.3.0/ipcidr/24") rcmgr, err := NewResourceManager(NewFixedLimiter(limits), WithAllowlistedMultiaddrs([]multiaddr.Multiaddr{ // Any peer connecting from this IP address - multiaddr.StringCast("/ip4/1.2.3.4"), + m1, // Only the specified peer from this address - multiaddr.StringCast("/ip4/2.2.3.4/p2p/" + somePeer.String()), + m2, // Only peers from this 1.2.3.0/24 IP address range - multiaddr.StringCast("/ip4/1.2.3.0/ipcidr/24"), + m3, })) if err != nil { panic("Failed to start resource manager") @@ -37,7 +40,7 @@ func ExampleWithAllowlistedMultiaddrs() { func TestAllowedSimple(t *testing.T) { allowlist := newAllowlist() - ma := multiaddr.StringCast("/ip4/1.2.3.4/tcp/1234") + ma, _ := multiaddr.StringCast("/ip4/1.2.3.4/tcp/1234") err := allowlist.Add(ma) if err != nil { t.Fatalf("failed to add ip4: %s", err) @@ -62,8 +65,8 @@ func TestAllowedWithPeer(t *testing.T) { peerA := test.RandPeerIDFatal(t) peerB := test.RandPeerIDFatal(t) - multiaddrA := multiaddr.StringCast("/ip4/1.2.3.4/tcp/1234") - multiaddrB := multiaddr.StringCast("/ip4/2.2.3.4/tcp/1234") + multiaddrA, _ := multiaddr.StringCast("/ip4/1.2.3.4/tcp/1234") + multiaddrB, _ := multiaddr.StringCast("/ip4/2.2.3.4/tcp/1234") testcases := []testcase{ { @@ -183,7 +186,7 @@ func TestRemoved(t *testing.T) { allowedMA string } peerA := test.RandPeerIDFatal(t) - maA := multiaddr.StringCast("/ip4/1.2.3.4") + maA, _ := multiaddr.StringCast("/ip4/1.2.3.4") testCases := []testCase{ {name: "ip4", allowedMA: "/ip4/1.2.3.4"}, @@ -195,7 +198,7 @@ func TestRemoved(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { allowlist := newAllowlist() - ma := multiaddr.StringCast(tc.allowedMA) + ma, _ := multiaddr.StringCast(tc.allowedMA) err := allowlist.Add(ma) if err != nil { @@ -256,9 +259,9 @@ func BenchmarkAllowlistCheck(b *testing.B) { var ma multiaddr.Multiaddr if i%ratioOfSpecifiedPeers == 0 { - ma = multiaddr.StringCast(ipString + "/p2p/" + test.RandPeerIDFatal(b).String()) + ma, _ = multiaddr.StringCast(ipString + "/p2p/" + test.RandPeerIDFatal(b).String()) } else { - ma = multiaddr.StringCast(ipString) + ma, _ = multiaddr.StringCast(ipString) } if err != nil { b.Fatalf("Failed to generate multiaddr: %v", ipString) diff --git a/go-libp2p/p2p/host/resource-manager/conn_limiter.go b/go-libp2p/p2p/host/resource-manager/conn_limiter.go index 717249e..ce55656 100644 --- a/go-libp2p/p2p/host/resource-manager/conn_limiter.go +++ b/go-libp2p/p2p/host/resource-manager/conn_limiter.go @@ -130,7 +130,6 @@ func newConnLimiter() *connLimiter { func (cl *connLimiter) addNetworkPrefixLimit(isIP6 bool, npLimit NetworkPrefixLimit) { cl.mu.Lock() - defer cl.mu.Unlock() if isIP6 { cl.networkPrefixLimitV6 = append(cl.networkPrefixLimitV6, npLimit) cl.networkPrefixLimitV6 = sortNetworkPrefixes(cl.networkPrefixLimitV6) @@ -138,12 +137,13 @@ func (cl *connLimiter) addNetworkPrefixLimit(isIP6 bool, npLimit NetworkPrefixLi cl.networkPrefixLimitV4 = append(cl.networkPrefixLimitV4, npLimit) cl.networkPrefixLimitV4 = sortNetworkPrefixes(cl.networkPrefixLimitV4) } + cl.mu.Unlock() } // addConn adds a connection for the given IP address. It returns true if the connection is allowed. func (cl *connLimiter) addConn(ip netip.Addr) bool { cl.mu.Lock() - defer cl.mu.Unlock() + networkPrefixLimits := cl.networkPrefixLimitV4 connsPerNetworkPrefix := cl.connsPerNetworkPrefixV4 limits := cl.connLimitPerSubnetV4 @@ -170,11 +170,13 @@ func (cl *connLimiter) addConn(ip netip.Addr) bool { for i, limit := range networkPrefixLimits { if limit.Network.Contains(ip) { if connsPerNetworkPrefix[i]+1 > limit.ConnCount { + cl.mu.Unlock() return false } connsPerNetworkPrefix[i]++ // Done. If we find a match in the network prefix limits, we use // that and don't use the general subnet limits. + cl.mu.Unlock() return true } } @@ -191,6 +193,7 @@ func (cl *connLimiter) addConn(ip netip.Addr) bool { for i, limit := range limits { prefix, err := ip.Prefix(limit.PrefixLength) if err != nil { + cl.mu.Unlock() return false } masked := prefix.String() @@ -202,6 +205,7 @@ func (cl *connLimiter) addConn(ip netip.Addr) bool { connsPerLimit[i][masked] = 0 } if counts+1 > limit.ConnCount { + cl.mu.Unlock() return false } } @@ -213,12 +217,12 @@ func (cl *connLimiter) addConn(ip netip.Addr) bool { connsPerLimit[i][masked]++ } + cl.mu.Unlock() return true } func (cl *connLimiter) rmConn(ip netip.Addr) { cl.mu.Lock() - defer cl.mu.Unlock() networkPrefixLimits := cl.networkPrefixLimitV4 connsPerNetworkPrefix := cl.connsPerNetworkPrefixV4 limits := cl.connLimitPerSubnetV4 @@ -247,10 +251,12 @@ func (cl *connLimiter) rmConn(ip netip.Addr) { count := connsPerNetworkPrefix[i] if count <= 0 { log.Errorf("unexpected conn count for ip %s. Was this not added with addConn first?", ip) + cl.mu.Unlock() return } connsPerNetworkPrefix[i]-- // Done. We updated the count in the defined network prefix limit. + cl.mu.Unlock() return } } @@ -285,4 +291,5 @@ func (cl *connLimiter) rmConn(ip netip.Addr) { delete(connsPerLimit[i], masked) } } + cl.mu.Unlock() } diff --git a/go-libp2p/p2p/host/resource-manager/rcmgr.go b/go-libp2p/p2p/host/resource-manager/rcmgr.go index 2eb1b3a..f6206de 100644 --- a/go-libp2p/p2p/host/resource-manager/rcmgr.go +++ b/go-libp2p/p2p/host/resource-manager/rcmgr.go @@ -240,28 +240,30 @@ func (r *resourceManager) ViewTransient(f func(network.ResourceScope) error) err func (r *resourceManager) ViewService(srv string, f func(network.ServiceScope) error) error { s := r.getServiceScope(srv) - defer s.DecRef() + e := f(s) + s.DecRef() - return f(s) + return e } func (r *resourceManager) ViewProtocol(proto protocol.ID, f func(network.ProtocolScope) error) error { s := r.getProtocolScope(proto) - defer s.DecRef() + e := f(s) + s.DecRef() - return f(s) + return e } func (r *resourceManager) ViewPeer(p peer.ID, f func(network.PeerScope) error) error { s := r.getPeerScope(p) - defer s.DecRef() + e := f(s) + s.DecRef() - return f(s) + return e } func (r *resourceManager) getServiceScope(svc string) *serviceScope { r.mx.Lock() - defer r.mx.Unlock() s, ok := r.svc[svc] if !ok { @@ -270,12 +272,12 @@ func (r *resourceManager) getServiceScope(svc string) *serviceScope { } s.IncRef() + r.mx.Unlock() return s } func (r *resourceManager) getProtocolScope(proto protocol.ID) *protocolScope { r.mx.Lock() - defer r.mx.Unlock() s, ok := r.proto[proto] if !ok { @@ -284,22 +286,22 @@ func (r *resourceManager) getProtocolScope(proto protocol.ID) *protocolScope { } s.IncRef() + r.mx.Unlock() return s } func (r *resourceManager) setStickyProtocol(proto protocol.ID) { r.mx.Lock() - defer r.mx.Unlock() if r.stickyProto == nil { r.stickyProto = make(map[protocol.ID]struct{}) } r.stickyProto[proto] = struct{}{} + r.mx.Unlock() } func (r *resourceManager) getPeerScope(p peer.ID) *peerScope { r.mx.Lock() - defer r.mx.Unlock() s, ok := r.peer[p] if !ok { @@ -308,33 +310,34 @@ func (r *resourceManager) getPeerScope(p peer.ID) *peerScope { } s.IncRef() + r.mx.Unlock() return s } func (r *resourceManager) setStickyPeer(p peer.ID) { r.mx.Lock() - defer r.mx.Unlock() if r.stickyPeer == nil { r.stickyPeer = make(map[peer.ID]struct{}) } r.stickyPeer[p] = struct{}{} + r.mx.Unlock() } func (r *resourceManager) nextConnId() int64 { r.mx.Lock() - defer r.mx.Unlock() r.connId++ + r.mx.Unlock() return r.connId } func (r *resourceManager) nextStreamId() int64 { r.mx.Lock() - defer r.mx.Unlock() r.streamId++ + r.mx.Unlock() return r.streamId } @@ -414,17 +417,16 @@ func (r *resourceManager) Close() error { } func (r *resourceManager) background() { - defer r.wg.Done() - // periodically garbage collects unused peer and protocol scopes ticker := time.NewTicker(time.Minute) - defer ticker.Stop() for { select { case <-ticker.C: r.gc() case <-r.cancelCtx.Done(): + ticker.Stop() + r.wg.Done() return } } @@ -432,7 +434,6 @@ func (r *resourceManager) background() { func (r *resourceManager) gc() { r.mx.Lock() - defer r.mx.Unlock() for proto, s := range r.proto { _, sticky := r.stickyProto[proto] @@ -482,6 +483,7 @@ func (r *resourceManager) gc() { } s.Unlock() } + r.mx.Unlock() } func newSystemScope(limit Limit, rcmgr *resourceManager, name string) *systemScope { @@ -633,11 +635,11 @@ func (s *serviceScope) Name() string { func (s *serviceScope) getPeerScope(p peer.ID) *resourceScope { s.Lock() - defer s.Unlock() ps, ok := s.peers[p] if ok { ps.IncRef() + s.Unlock() return ps } @@ -651,6 +653,7 @@ func (s *serviceScope) getPeerScope(p peer.ID) *resourceScope { s.peers[p] = ps ps.IncRef() + s.Unlock() return ps } @@ -660,11 +663,11 @@ func (s *protocolScope) Protocol() protocol.ID { func (s *protocolScope) getPeerScope(p peer.ID) *resourceScope { s.Lock() - defer s.Unlock() ps, ok := s.peers[p] if ok { ps.IncRef() + s.Unlock() return ps } @@ -678,6 +681,7 @@ func (s *protocolScope) getPeerScope(p peer.ID) *resourceScope { s.peers[p] = ps ps.IncRef() + s.Unlock() return ps } @@ -687,26 +691,29 @@ func (s *peerScope) Peer() peer.ID { func (s *connectionScope) PeerScope() network.PeerScope { s.Lock() - defer s.Unlock() // avoid nil is not nil footgun; go.... if s.peer == nil { + s.Unlock() return nil } + s.Unlock() return s.peer } func (s *connectionScope) Done() { s.Lock() - defer s.Unlock() + if s.done { + s.Unlock() return } if s.ip.IsValid() { s.rcmgr.connLimiter.rmConn(s.ip) } s.resourceScope.doneUnlocked() + s.Unlock() } // transferAllowedToStandard transfers this connection scope from being part of @@ -731,15 +738,9 @@ func (s *connectionScope) transferAllowedToStandard() (err error) { } systemScope.IncRef() - // Undo this if we fail later - defer func() { - if err != nil { - systemScope.ReleaseForChild(stat) - systemScope.DecRef() - } - }() - if err := transientScope.ReserveForChild(stat); err != nil { + systemScope.ReleaseForChild(stat) + systemScope.DecRef() return err } transientScope.IncRef() @@ -754,9 +755,9 @@ func (s *connectionScope) transferAllowedToStandard() (err error) { func (s *connectionScope) SetPeer(p peer.ID) error { s.Lock() - defer s.Unlock() if s.peer != nil { + s.Unlock() return fmt.Errorf("connection scope already attached to a peer") } @@ -778,6 +779,7 @@ func (s *connectionScope) SetPeer(p peer.ID) error { // was _almost_ an allowlisted connection. if err := s.transferAllowedToStandard(); err != nil { // Failed to transfer this connection to the standard scopes + s.Unlock() return err } @@ -795,6 +797,7 @@ func (s *connectionScope) SetPeer(p peer.ID) error { s.peer.DecRef() s.peer = nil s.rcmgr.metrics.BlockPeer(p) + s.Unlock() return err } @@ -809,26 +812,29 @@ func (s *connectionScope) SetPeer(p peer.ID) error { s.resourceScope.edges = edges s.rcmgr.metrics.AllowPeer(p) + s.Unlock() return nil } func (s *streamScope) ProtocolScope() network.ProtocolScope { s.Lock() - defer s.Unlock() // avoid nil is not nil footgun; go.... if s.proto == nil { + s.Unlock() return nil } - return s.proto + sc := s.proto + s.Unlock() + return sc } func (s *streamScope) SetProtocol(proto protocol.ID) error { s.Lock() - defer s.Unlock() if s.proto != nil { + s.Unlock() return fmt.Errorf("stream scope already attached to a protocol") } @@ -840,6 +846,7 @@ func (s *streamScope) SetProtocol(proto protocol.ID) error { s.proto.DecRef() s.proto = nil s.rcmgr.metrics.BlockProtocol(proto) + s.Unlock() return err } @@ -851,6 +858,7 @@ func (s *streamScope) SetProtocol(proto protocol.ID) error { s.peerProtoScope.DecRef() s.peerProtoScope = nil s.rcmgr.metrics.BlockProtocolPeer(proto, s.peer.peer) + s.Unlock() return err } @@ -867,29 +875,33 @@ func (s *streamScope) SetProtocol(proto protocol.ID) error { s.resourceScope.edges = edges s.rcmgr.metrics.AllowProtocol(proto) + s.Unlock() return nil } func (s *streamScope) ServiceScope() network.ServiceScope { s.Lock() - defer s.Unlock() // avoid nil is not nil footgun; go.... if s.svc == nil { + s.Unlock() return nil } - return s.svc + svc := s.svc + s.Unlock() + return svc } func (s *streamScope) SetService(svc string) error { s.Lock() - defer s.Unlock() if s.svc != nil { + s.Unlock() return fmt.Errorf("stream scope already attached to a service") } if s.proto == nil { + s.Unlock() return fmt.Errorf("stream scope not attached to a protocol") } @@ -901,6 +913,7 @@ func (s *streamScope) SetService(svc string) error { s.svc.DecRef() s.svc = nil s.rcmgr.metrics.BlockService(svc) + s.Unlock() return err } @@ -913,6 +926,7 @@ func (s *streamScope) SetService(svc string) error { s.peerSvcScope.DecRef() s.peerSvcScope = nil s.rcmgr.metrics.BlockServicePeer(svc, s.peer.peer) + s.Unlock() return err } @@ -928,17 +942,19 @@ func (s *streamScope) SetService(svc string) error { s.resourceScope.edges = edges s.rcmgr.metrics.AllowService(svc) + s.Unlock() return nil } func (s *streamScope) PeerScope() network.PeerScope { s.Lock() - defer s.Unlock() // avoid nil is not nil footgun; go.... if s.peer == nil { return nil } - return s.peer + p := s.peer + s.Unlock() + return p } diff --git a/go-libp2p/p2p/host/resource-manager/rcmgr_test.go b/go-libp2p/p2p/host/resource-manager/rcmgr_test.go index bf79f7a..400477d 100644 --- a/go-libp2p/p2p/host/resource-manager/rcmgr_test.go +++ b/go-libp2p/p2p/host/resource-manager/rcmgr_test.go @@ -13,7 +13,7 @@ import ( "github.com/multiformats/go-multiaddr" ) -var dummyMA = multiaddr.StringCast("/ip4/1.2.3.4/tcp/1234") +var dummyMA, _ = multiaddr.StringCast("/ip4/1.2.3.4/tcp/1234") func TestResourceManager(t *testing.T) { peerA := peer.ID("A") @@ -1000,9 +1000,11 @@ func TestResourceManagerWithAllowlist(t *testing.T) { baseLimit.Apply(limits.allowlistedTransient) limits.allowlistedTransient = baseLimit + m1, _ := multiaddr.StringCast("/ip4/1.2.3.4") + m2, _ := multiaddr.StringCast("/ip4/4.3.2.1/p2p/" + peerA.String()) rcmgr, err := NewResourceManager(NewFixedLimiter(limits), WithAllowlistedMultiaddrs([]multiaddr.Multiaddr{ - multiaddr.StringCast("/ip4/1.2.3.4"), - multiaddr.StringCast("/ip4/4.3.2.1/p2p/" + peerA.String()), + m1, + m2, })) if err != nil { t.Fatal(err) @@ -1014,14 +1016,17 @@ func TestResourceManagerWithAllowlist(t *testing.T) { t.Fatal("Expected to be able to get the allowlist") } + m3, _ := multiaddr.StringCast("/ip4/1.2.3.5") + m4, _ := multiaddr.StringCast("/ip4/1.2.3.4") + // A connection comes in from a non-allowlisted ip address - _, err = rcmgr.OpenConnection(network.DirInbound, true, multiaddr.StringCast("/ip4/1.2.3.5")) + _, err = rcmgr.OpenConnection(network.DirInbound, true, m3) if err == nil { t.Fatalf("Expected this to fail. err=%v", err) } // A connection comes in from an allowlisted ip address - connScope, err := rcmgr.OpenConnection(network.DirInbound, true, multiaddr.StringCast("/ip4/1.2.3.4")) + connScope, err := rcmgr.OpenConnection(network.DirInbound, true, m4) if err != nil { t.Fatal(err) } @@ -1031,8 +1036,11 @@ func TestResourceManagerWithAllowlist(t *testing.T) { t.Fatal(err) } + m5, _ := multiaddr.StringCast("/ip4/4.3.2.1") + m6, _ := multiaddr.StringCast("/ip4/4.3.2.1") + // A connection comes in that looks like it should be allowlisted, but then has the wrong peer id. - connScope, err = rcmgr.OpenConnection(network.DirInbound, true, multiaddr.StringCast("/ip4/4.3.2.1")) + connScope, err = rcmgr.OpenConnection(network.DirInbound, true, m5) if err != nil { t.Fatal(err) } @@ -1043,7 +1051,7 @@ func TestResourceManagerWithAllowlist(t *testing.T) { } // A connection comes in that looks like it should be allowlisted, and it has the allowlisted peer id - connScope, err = rcmgr.OpenConnection(network.DirInbound, true, multiaddr.StringCast("/ip4/4.3.2.1")) + connScope, err = rcmgr.OpenConnection(network.DirInbound, true, m6) if err != nil { t.Fatal(err) } @@ -1060,9 +1068,12 @@ func TestAllowlistAndConnLimiterPlayNice(t *testing.T) { limits.allowlistedSystem.Conns = 8 limits.allowlistedSystem.ConnsInbound = 8 limits.allowlistedSystem.ConnsOutbound = 8 + m1, _ := multiaddr.StringCast("/ip4/1.2.3.0/ipcidr/24") + m2, _ := multiaddr.StringCast("/ip6/1:2:3::/ipcidr/58") + m3, _ := multiaddr.StringCast("/ip4/1.2.3.0/ipcidr/24") t.Run("IPv4", func(t *testing.T) { rcmgr, err := NewResourceManager(NewFixedLimiter(limits), WithAllowlistedMultiaddrs([]multiaddr.Multiaddr{ - multiaddr.StringCast("/ip4/1.2.3.0/ipcidr/24"), + m1, }), WithNetworkPrefixLimit([]NetworkPrefixLimit{}, []NetworkPrefixLimit{})) if err != nil { t.Fatal(err) @@ -1077,7 +1088,7 @@ func TestAllowlistAndConnLimiterPlayNice(t *testing.T) { }) t.Run("IPv6", func(t *testing.T) { rcmgr, err := NewResourceManager(NewFixedLimiter(limits), WithAllowlistedMultiaddrs([]multiaddr.Multiaddr{ - multiaddr.StringCast("/ip6/1:2:3::/ipcidr/58"), + m2, }), WithNetworkPrefixLimit([]NetworkPrefixLimit{}, []NetworkPrefixLimit{})) if err != nil { t.Fatal(err) @@ -1093,7 +1104,7 @@ func TestAllowlistAndConnLimiterPlayNice(t *testing.T) { t.Run("Does not override if you set a limit directly", func(t *testing.T) { rcmgr, err := NewResourceManager(NewFixedLimiter(limits), WithAllowlistedMultiaddrs([]multiaddr.Multiaddr{ - multiaddr.StringCast("/ip4/1.2.3.0/ipcidr/24"), + m3, }), WithNetworkPrefixLimit([]NetworkPrefixLimit{ {Network: netip.MustParsePrefix("1.2.3.0/24"), ConnCount: 1}, }, []NetworkPrefixLimit{})) diff --git a/go-libp2p/p2p/host/resource-manager/scope.go b/go-libp2p/p2p/host/resource-manager/scope.go index 8a6bc23..17003ec 100644 --- a/go-libp2p/p2p/host/resource-manager/scope.go +++ b/go-libp2p/p2p/host/resource-manager/scope.go @@ -338,9 +338,9 @@ func (s *resourceScope) wrapError(err error) error { func (s *resourceScope) ReserveMemory(size int, prio uint8) error { s.Lock() - defer s.Unlock() if s.done { + s.Unlock() return s.wrapError(network.ErrResourceScopeClosed) } @@ -348,17 +348,20 @@ func (s *resourceScope) ReserveMemory(size int, prio uint8) error { log.Debugw("blocked memory reservation", logValuesMemoryLimit(s.name, "", s.rc.stat(), err)...) s.trace.BlockReserveMemory(s.name, prio, int64(size), s.rc.memory) s.metrics.BlockMemory(size) + s.Unlock() return s.wrapError(err) } if err := s.reserveMemoryForEdges(size, prio); err != nil { s.rc.releaseMemory(int64(size)) s.metrics.BlockMemory(size) + s.Unlock() return s.wrapError(err) } s.trace.ReserveMemory(s.name, prio, int64(size), s.rc.memory) s.metrics.AllowMemory(size) + s.Unlock() return nil } diff --git a/go-libp2p/p2p/host/routed/routed_test.go b/go-libp2p/p2p/host/routed/routed_test.go index 66b8054..3898787 100644 --- a/go-libp2p/p2p/host/routed/routed_test.go +++ b/go-libp2p/p2p/host/routed/routed_test.go @@ -34,12 +34,13 @@ func TestRoutedHostConnectToObsoleteAddresses(t *testing.T) { require.NoError(t, err) defer h2.Close() + m, _ := ma.StringCast("/ip4/127.0.0.1/tcp/1234") // assemble the AddrInfo struct to use for the connection attempt pi := peer.AddrInfo{ ID: h2.ID(), // Use a wrong multi address for host 2, so that the initial connection attempt will fail // (we have obsolete, old multi address information) - Addrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/1234")}, + Addrs: []ma.Multiaddr{m}, } // Build mock routing module and replace the FindPeer function. @@ -70,12 +71,14 @@ func TestRoutedHostConnectFindPeerNoUsefulAddrs(t *testing.T) { require.NoError(t, err) defer h2.Close() + m1, _ := ma.StringCast("/ip4/127.0.0.1/tcp/1234") + // assemble the AddrInfo struct to use for the connection attempt pi := peer.AddrInfo{ ID: h2.ID(), // Use a wrong multi address for host 2, so that the initial connection attempt will fail // (we have obsolete, old multi address information) - Addrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/1234")}, + Addrs: []ma.Multiaddr{m1}, } // Build mock routing module and replace the FindPeer function. diff --git a/go-libp2p/p2p/http/example_test.go b/go-libp2p/p2p/http/example_test.go index ca87fe4..6c563d1 100644 --- a/go-libp2p/p2p/http/example_test.go +++ b/go-libp2p/p2p/http/example_test.go @@ -14,10 +14,15 @@ import ( ma "github.com/multiformats/go-multiaddr" ) +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func ExampleHost_withAStockGoHTTPClient() { server := libp2phttp.Host{ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/0/http")}, } // A server with a simple echo protocol @@ -65,14 +70,14 @@ func ExampleHost_listenOnHTTPTransportAndStreams() { server := libp2phttp.Host{ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/0/http")}, StreamHost: serverStreamHost, } go server.Serve() defer server.Close() for _, a := range server.Addrs() { - _, transport := ma.SplitLast(a) + _, transport, _ := ma.SplitLast(a) fmt.Printf("Server listening on transport: %s\n", transport) } // Output: Server listening on transport: /quic-v1 @@ -128,7 +133,7 @@ func ExampleHost_overLibp2pStreams() { func ExampleHost_Serve() { server := libp2phttp.Host{ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/50221/http")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/50221/http")}, } go server.Serve() @@ -142,7 +147,7 @@ func ExampleHost_Serve() { func ExampleHost_SetHTTPHandler() { server := libp2phttp.Host{ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/0/http")}, } server.SetHTTPHandler("/hello/1", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -176,7 +181,7 @@ func ExampleHost_SetHTTPHandler() { func ExampleHost_SetHTTPHandlerAtPath() { server := libp2phttp.Host{ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/0/http")}, } server.SetHTTPHandlerAtPath("/hello/1", "/other-place/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -213,7 +218,7 @@ func ExampleHost_NamespacedClient() { // Create the server server := libp2phttp.Host{ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/0/http")}, } server.SetHTTPHandlerAtPath("/hello/1", "/other-place/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -251,7 +256,7 @@ func ExampleHost_NamespaceRoundTripper() { // Create the server server := libp2phttp.Host{ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/0/http")}, } server.SetHTTPHandler("/hello/1", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -295,7 +300,7 @@ func ExampleHost_NewConstrainedRoundTripper() { // Create the server server := libp2phttp.Host{ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/0/http")}, } server.SetHTTPHandler("/hello/1", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/go-libp2p/p2p/http/libp2phttp.go b/go-libp2p/p2p/http/libp2phttp.go index 5ff025e..10ab9e1 100644 --- a/go-libp2p/p2p/http/libp2phttp.go +++ b/go-libp2p/p2p/http/libp2phttp.go @@ -249,13 +249,19 @@ func (h *Host) setupListeners(listenerErrCh chan error) error { var listenAddr ma.Multiaddr if parsedAddr.useHTTPS && parsedAddr.sni != "" && parsedAddr.sni != host { - listenAddr = ma.StringCast(fmt.Sprintf("/ip4/%s/tcp/%s/tls/sni/%s/http", host, port, parsedAddr.sni)) + listenAddr, err = ma.StringCast(fmt.Sprintf("/ip4/%s/tcp/%s/tls/sni/%s/http", host, port, parsedAddr.sni)) + if err != nil { + return err + } } else { scheme := "http" if parsedAddr.useHTTPS { scheme = "https" } - listenAddr = ma.StringCast(fmt.Sprintf("/ip4/%s/tcp/%s/%s", host, port, scheme)) + listenAddr, err = ma.StringCast(fmt.Sprintf("/ip4/%s/tcp/%s/%s", host, port, scheme)) + if err != nil { + return err + } } if parsedAddr.useHTTPS { @@ -720,7 +726,10 @@ type httpMultiaddr struct { func parseMultiaddr(addr ma.Multiaddr) httpMultiaddr { out := httpMultiaddr{} - ma.ForEach(addr, func(c ma.Component) bool { + ma.ForEach(addr, func(c ma.Component, e error) bool { + if e != nil { + return false + } switch c.Protocol().Code { case ma.P_IP4, ma.P_IP6, ma.P_DNS, ma.P_DNS4, ma.P_DNS6: out.host = c.Value() @@ -748,7 +757,7 @@ var tlsComponent, _ = ma.NewComponent("tls", "") // Returns a bool indicating if the input multiaddr has an http (or https) component. func normalizeHTTPMultiaddr(addr ma.Multiaddr) (ma.Multiaddr, bool) { isHTTPMultiaddr := false - beforeHTTPS, afterIncludingHTTPS := ma.SplitFunc(addr, func(c ma.Component) bool { + beforeHTTPS, afterIncludingHTTPS, err := ma.SplitFunc(addr, func(c ma.Component) bool { if c.Protocol().Code == ma.P_HTTP { isHTTPMultiaddr = true } @@ -760,12 +769,20 @@ func normalizeHTTPMultiaddr(addr ma.Multiaddr) (ma.Multiaddr, bool) { return false }) + if err != nil { + return addr, false + } + if afterIncludingHTTPS == nil { // No HTTPS component, just return the original return addr, isHTTPMultiaddr } - _, afterHTTPS := ma.SplitFirst(afterIncludingHTTPS) + _, afterHTTPS, err := ma.SplitFirst(afterIncludingHTTPS) + if err != nil { + return addr, false + } + if afterHTTPS == nil { return ma.Join(beforeHTTPS, tlsComponent, httpComponent), isHTTPMultiaddr } diff --git a/go-libp2p/p2p/http/libp2phttp_test.go b/go-libp2p/p2p/http/libp2phttp_test.go index a444c6e..e31278b 100644 --- a/go-libp2p/p2p/http/libp2phttp_test.go +++ b/go-libp2p/p2p/http/libp2phttp_test.go @@ -215,7 +215,7 @@ func TestRoundTrippers(t *testing.T) { httpHost := libp2phttp.Host{ InsecureAllowHTTP: true, StreamHost: serverHost, - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/0/http")}, } httpHost.SetHTTPHandler("/hello", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -383,7 +383,7 @@ func TestPlainOldHTTPServer(t *testing.T) { name: "using libp2phttp", do: func(t *testing.T, request *http.Request) (*http.Response, error) { var clientHttpHost libp2phttp.Host - rt, err := clientHttpHost.NewConstrainedRoundTripper(peer.AddrInfo{Addrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/" + serverAddrParts[1] + "/http")}}) + rt, err := clientHttpHost.NewConstrainedRoundTripper(peer.AddrInfo{Addrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/" + serverAddrParts[1] + "/http")}}) require.NoError(t, err) client := &http.Client{Transport: rt} @@ -391,7 +391,7 @@ func TestPlainOldHTTPServer(t *testing.T) { }, getWellKnown: func(t *testing.T) (libp2phttp.PeerMeta, error) { var clientHttpHost libp2phttp.Host - rt, err := clientHttpHost.NewConstrainedRoundTripper(peer.AddrInfo{Addrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/" + serverAddrParts[1] + "/http")}}) + rt, err := clientHttpHost.NewConstrainedRoundTripper(peer.AddrInfo{Addrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/" + serverAddrParts[1] + "/http")}}) require.NoError(t, err) return rt.(libp2phttp.PeerMetadataGetter).GetPeerMetadata() }, @@ -450,7 +450,7 @@ func TestPlainOldHTTPServer(t *testing.T) { func TestHostZeroValue(t *testing.T) { server := libp2phttp.Host{ InsecureAllowHTTP: true, - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/0/http")}, } server.SetHTTPHandler("/hello", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("hello")) })) go func() { @@ -473,7 +473,7 @@ func TestHostZeroValue(t *testing.T) { func TestHTTPS(t *testing.T) { server := libp2phttp.Host{ TLSConfig: selfSignedTLSConfig(t), - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/https")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/0/https")}, } server.SetHTTPHandler(httpping.PingProtocolID, httpping.Ping{}) go func() { @@ -535,7 +535,7 @@ func TestCustomServeMux(t *testing.T) { serveMux.Handle("/ping/", httpping.Ping{}) server := libp2phttp.Host{ - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/0/http")}, ServeMux: serveMux, InsecureAllowHTTP: true, } @@ -595,7 +595,7 @@ func TestSetHandlerAtPath(t *testing.T) { nestedMx := http.NewServeMux() nestedMx.HandleFunc(tc.rest, hf) server := libp2phttp.Host{ - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/0/http")}, InsecureAllowHTTP: true, } server.SetHTTPHandlerAtPath("test", tc.prefix, nestedMx) @@ -641,13 +641,13 @@ func TestServerLegacyWellKnownResource(t *testing.T) { t.Cleanup(func() { server.Close() }) addrPort, err := netip.ParseAddrPort(l.Addr().String()) require.NoError(t, err) - return ma.StringCast(fmt.Sprintf("/ip4/%s/tcp/%d/http", addrPort.Addr().String(), addrPort.Port())) + return tStringCast(fmt.Sprintf("/ip4/%s/tcp/%d/http", addrPort.Addr().String(), addrPort.Port())) } mkServerlibp2phttp := func(enableLegacyWellKnown bool) ma.Multiaddr { server := libp2phttp.Host{ EnableCompatibilityWithLegacyWellKnownEndpoint: enableLegacyWellKnown, - ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")}, + ListenAddrs: []ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/0/http")}, InsecureAllowHTTP: true, } server.SetHTTPHandler(httpping.PingProtocolID, httpping.Ping{}) diff --git a/go-libp2p/p2p/metricshelper/conn.go b/go-libp2p/p2p/metricshelper/conn.go index b07016c..b207f58 100644 --- a/go-libp2p/p2p/metricshelper/conn.go +++ b/go-libp2p/p2p/metricshelper/conn.go @@ -13,9 +13,14 @@ func GetTransport(a ma.Multiaddr) string { return "other" } -func GetIPVersion(addr ma.Multiaddr) string { +func GetIPVersion(addr ma.Multiaddr) (string, error) { version := "unknown" - ma.ForEach(addr, func(c ma.Component) bool { + var err error + ma.ForEach(addr, func(c ma.Component, e error) bool { + if e != nil { + err = e + return false + } if c.Protocol().Code == ma.P_IP4 { version = "ip4" return false @@ -25,5 +30,5 @@ func GetIPVersion(addr ma.Multiaddr) string { } return true }) - return version + return version, err } diff --git a/go-libp2p/p2p/net/conngater/conngater_test.go b/go-libp2p/p2p/net/conngater/conngater_test.go index d60b5e4..7ddf45a 100644 --- a/go-libp2p/p2p/net/conngater/conngater_test.go +++ b/go-libp2p/p2p/net/conngater/conngater_test.go @@ -11,6 +11,11 @@ import ( ma "github.com/multiformats/go-multiaddr" ) +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func TestConnectionGater(t *testing.T) { ds := datastore.NewMapDatastore() @@ -76,32 +81,32 @@ func TestConnectionGater(t *testing.T) { } // test addr and subnet blocking - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.4/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/1.2.3.4/tcp/1234")) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.4") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.4/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/1.2.3.4/tcp/1234")}) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.4") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.5/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/1.2.3.5/tcp/1234")) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.5") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.5/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/1.2.3.5/tcp/1234")}) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.5") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/2.3.4.5/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/2.3.4.5/tcp/1234")) if !allow { t.Fatal("expected gater to allow peerB in 2.3.4.5") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/2.3.4.5/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/2.3.4.5/tcp/1234")}) if !allow { t.Fatal("expected gater to allow peerB in 2.3.4.5") } @@ -111,32 +116,32 @@ func TestConnectionGater(t *testing.T) { t.Fatal(err) } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.4/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/1.2.3.4/tcp/1234")) if allow { t.Fatal("expected gater to deny peerB in 1.2.3.4") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.4/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/1.2.3.4/tcp/1234")}) if allow { t.Fatal("expected gater to deny peerB in 1.2.3.4") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.5/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/1.2.3.5/tcp/1234")) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.5") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.5/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/1.2.3.5/tcp/1234")}) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.5") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/2.3.4.5/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/2.3.4.5/tcp/1234")) if !allow { t.Fatal("expected gater to allow peerB in 2.3.4.5") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/2.3.4.5/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/2.3.4.5/tcp/1234")}) if !allow { t.Fatal("expected gater to allow peerB in 2.3.4.5") } @@ -146,22 +151,22 @@ func TestConnectionGater(t *testing.T) { t.Fatal(err) } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.5/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/1.2.3.5/tcp/1234")) if allow { t.Fatal("expected gater to deny peerB in 1.2.3.5") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.5/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/1.2.3.5/tcp/1234")}) if allow { t.Fatal("expected gater to deny peerB in 1.2.3.5") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/2.3.4.5/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/2.3.4.5/tcp/1234")) if !allow { t.Fatal("expected gater to allow peerB in 2.3.4.5") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/2.3.4.5/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/2.3.4.5/tcp/1234")}) if !allow { t.Fatal("expected gater to allow peerB in 2.3.4.5") } @@ -208,32 +213,32 @@ func TestConnectionGater(t *testing.T) { t.Fatal("expected gater to allow peerB") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.4/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/1.2.3.4/tcp/1234")) if allow { t.Fatal("expected gater to deny peerB in 1.2.3.4") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.4/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/1.2.3.4/tcp/1234")}) if allow { t.Fatal("expected gater to deny peerB in 1.2.3.4") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.5/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/1.2.3.5/tcp/1234")) if allow { t.Fatal("expected gater to deny peerB in 1.2.3.5") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.5/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/1.2.3.5/tcp/1234")}) if allow { t.Fatal("expected gater to deny peerB in 1.2.3.5") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/2.3.4.5/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/2.3.4.5/tcp/1234")) if !allow { t.Fatal("expected gater to allow peerB in 2.3.4.5") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/2.3.4.5/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/2.3.4.5/tcp/1234")}) if !allow { t.Fatal("expected gater to allow peerB in 2.3.4.5") } @@ -274,32 +279,32 @@ func TestConnectionGater(t *testing.T) { t.Fatal("expected gater to allow peerB") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.4/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/1.2.3.4/tcp/1234")) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.4") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.4/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/1.2.3.4/tcp/1234")}) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.4") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.5/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/1.2.3.5/tcp/1234")) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.5") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.5/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/1.2.3.5/tcp/1234")}) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.5") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/2.3.4.5/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/2.3.4.5/tcp/1234")) if !allow { t.Fatal("expected gater to allow peerB in 2.3.4.5") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/2.3.4.5/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/2.3.4.5/tcp/1234")}) if !allow { t.Fatal("expected gater to allow peerB in 2.3.4.5") } @@ -330,32 +335,32 @@ func TestConnectionGater(t *testing.T) { t.Fatal("expected gater to allow peerB") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.4/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/1.2.3.4/tcp/1234")) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.4") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.4/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/1.2.3.4/tcp/1234")}) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.4") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.5/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/1.2.3.5/tcp/1234")) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.5") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.5/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/1.2.3.5/tcp/1234")}) if !allow { t.Fatal("expected gater to allow peerB in 1.2.3.5") } - allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/2.3.4.5/tcp/1234")) + allow = cg.InterceptAddrDial(peerB, tStringCast("/ip4/2.3.4.5/tcp/1234")) if !allow { t.Fatal("expected gater to allow peerB in 2.3.4.5") } - allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/2.3.4.5/tcp/1234")}) + allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: tStringCast("/ip4/2.3.4.5/tcp/1234")}) if !allow { t.Fatal("expected gater to allow peerB in 2.3.4.5") } diff --git a/go-libp2p/p2p/net/swarm/black_hole_detector.go b/go-libp2p/p2p/net/swarm/black_hole_detector.go index dd7849e..00328f5 100644 --- a/go-libp2p/p2p/net/swarm/black_hole_detector.go +++ b/go-libp2p/p2p/net/swarm/black_hole_detector.go @@ -181,7 +181,7 @@ type blackHoleDetector struct { func (d *blackHoleDetector) FilterAddrs(addrs []ma.Multiaddr) (valid []ma.Multiaddr, blackHoled []ma.Multiaddr) { hasUDP, hasIPv6 := false, false for _, a := range addrs { - if !manet.IsPublicAddr(a) { + if is, err := manet.IsPublicAddr(a); !is || err != nil { continue } if isProtocolAddr(a, ma.P_UDP) { @@ -206,7 +206,7 @@ func (d *blackHoleDetector) FilterAddrs(addrs []ma.Multiaddr) (valid []ma.Multia return ma.FilterAddrs( addrs, func(a ma.Multiaddr) bool { - if !manet.IsPublicAddr(a) { + if is, err := manet.IsPublicAddr(a); !is && err == nil { return true } // allow all UDP addresses while probing irrespective of IPv6 black hole state @@ -233,7 +233,7 @@ func (d *blackHoleDetector) FilterAddrs(addrs []ma.Multiaddr) (valid []ma.Multia // RecordResult updates the state of the relevant `blackHoleFilter`s for addr func (d *blackHoleDetector) RecordResult(addr ma.Multiaddr, success bool) { - if !manet.IsPublicAddr(addr) { + if is, err := manet.IsPublicAddr(addr); !is && err == nil { return } if d.udp != nil && isProtocolAddr(addr, ma.P_UDP) { diff --git a/go-libp2p/p2p/net/swarm/black_hole_detector_test.go b/go-libp2p/p2p/net/swarm/black_hole_detector_test.go index 1ab2cbe..80ef45d 100644 --- a/go-libp2p/p2p/net/swarm/black_hole_detector_test.go +++ b/go-libp2p/p2p/net/swarm/black_hole_detector_test.go @@ -78,11 +78,15 @@ func TestBlackHoleDetectorInApplicableAddress(t *testing.T) { udpConfig := blackHoleConfig{Enabled: true, N: 10, MinSuccesses: 5} ipv6Config := blackHoleConfig{Enabled: true, N: 10, MinSuccesses: 5} bhd := newBlackHoleDetector(udpConfig, ipv6Config, nil) + m1, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1234") + m2, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1233") + m3, _ := ma.StringCast("/ip6/::1/udp/1234/quic-v1") + m4, _ := ma.StringCast("/ip4/192.168.1.5/udp/1234/quic-v1") addrs := []ma.Multiaddr{ - ma.StringCast("/ip4/1.2.3.4/tcp/1234"), - ma.StringCast("/ip4/1.2.3.4/tcp/1233"), - ma.StringCast("/ip6/::1/udp/1234/quic-v1"), - ma.StringCast("/ip4/192.168.1.5/udp/1234/quic-v1"), + m1, + m2, + m3, + m4, } for i := 0; i < 1000; i++ { filteredAddrs, _ := bhd.FilterAddrs(addrs) @@ -96,8 +100,8 @@ func TestBlackHoleDetectorInApplicableAddress(t *testing.T) { func TestBlackHoleDetectorUDPDisabled(t *testing.T) { ipv6Config := blackHoleConfig{Enabled: true, N: 10, MinSuccesses: 5} bhd := newBlackHoleDetector(blackHoleConfig{Enabled: false}, ipv6Config, nil) - publicAddr := ma.StringCast("/ip4/1.2.3.4/udp/1234/quic-v1") - privAddr := ma.StringCast("/ip4/192.168.1.5/udp/1234/quic-v1") + publicAddr, _ := ma.StringCast("/ip4/1.2.3.4/udp/1234/quic-v1") + privAddr, _ := ma.StringCast("/ip4/192.168.1.5/udp/1234/quic-v1") for i := 0; i < 100; i++ { bhd.RecordResult(publicAddr, false) } @@ -112,8 +116,8 @@ func TestBlackHoleDetectorUDPDisabled(t *testing.T) { func TestBlackHoleDetectorIPv6Disabled(t *testing.T) { udpConfig := blackHoleConfig{Enabled: true, N: 10, MinSuccesses: 5} bhd := newBlackHoleDetector(udpConfig, blackHoleConfig{Enabled: false}, nil) - publicAddr := ma.StringCast("/ip6/2001::1/tcp/1234") - privAddr := ma.StringCast("/ip6/::1/tcp/1234") + publicAddr, _ := ma.StringCast("/ip6/2001::1/tcp/1234") + privAddr, _ := ma.StringCast("/ip6/::1/tcp/1234") for i := 0; i < 100; i++ { bhd.RecordResult(publicAddr, false) } @@ -131,7 +135,7 @@ func TestBlackHoleDetectorProbes(t *testing.T) { udp: &blackHoleFilter{n: 2, minSuccesses: 1, name: "udp"}, ipv6: &blackHoleFilter{n: 3, minSuccesses: 1, name: "ipv6"}, } - udp6Addr := ma.StringCast("/ip6/2001::1/udp/1234/quic-v1") + udp6Addr, _ := ma.StringCast("/ip6/2001::1/udp/1234/quic-v1") addrs := []ma.Multiaddr{udp6Addr} for i := 0; i < 3; i++ { bhd.RecordResult(udp6Addr, false) @@ -152,14 +156,14 @@ func TestBlackHoleDetectorProbes(t *testing.T) { } func TestBlackHoleDetectorAddrFiltering(t *testing.T) { - udp6Pub := ma.StringCast("/ip6/2001::1/udp/1234/quic-v1") - udp6Pri := ma.StringCast("/ip6/::1/udp/1234/quic-v1") - udp4Pub := ma.StringCast("/ip4/1.2.3.4/udp/1234/quic-v1") - udp4Pri := ma.StringCast("/ip4/192.168.1.5/udp/1234/quic-v1") - tcp6Pub := ma.StringCast("/ip6/2001::1/tcp/1234/quic-v1") - tcp6Pri := ma.StringCast("/ip6/::1/tcp/1234/quic-v1") - tcp4Pub := ma.StringCast("/ip4/1.2.3.4/tcp/1234/quic-v1") - tcp4Pri := ma.StringCast("/ip4/192.168.1.5/tcp/1234/quic-v1") + udp6Pub, _ := ma.StringCast("/ip6/2001::1/udp/1234/quic-v1") + udp6Pri, _ := ma.StringCast("/ip6/::1/udp/1234/quic-v1") + udp4Pub, _ := ma.StringCast("/ip4/1.2.3.4/udp/1234/quic-v1") + udp4Pri, _ := ma.StringCast("/ip4/192.168.1.5/udp/1234/quic-v1") + tcp6Pub, _ := ma.StringCast("/ip6/2001::1/tcp/1234/quic-v1") + tcp6Pri, _ := ma.StringCast("/ip6/::1/tcp/1234/quic-v1") + tcp4Pub, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1234/quic-v1") + tcp4Pri, _ := ma.StringCast("/ip4/192.168.1.5/tcp/1234/quic-v1") makeBHD := func(udpBlocked, ipv6Blocked bool) *blackHoleDetector { bhd := &blackHoleDetector{ diff --git a/go-libp2p/p2p/net/swarm/dial_error_test.go b/go-libp2p/p2p/net/swarm/dial_error_test.go index 3231d6f..c949a93 100644 --- a/go-libp2p/p2p/net/swarm/dial_error_test.go +++ b/go-libp2p/p2p/net/swarm/dial_error_test.go @@ -10,7 +10,7 @@ import ( ) func TestTransportError(t *testing.T) { - aa := ma.StringCast("/ip4/1.2.3.4/tcp/1234") + aa, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1234") te := &TransportError{Address: aa, Cause: ErrDialBackoff} require.ErrorIs(t, te, ErrDialBackoff, "TransportError should implement Unwrap") } @@ -21,8 +21,8 @@ func TestDialError(t *testing.T) { "DialError Unwrap should handle DialError.Cause") require.ErrorIs(t, de, de, "DialError Unwrap should handle match to self") - aa := ma.StringCast("/ip4/1.2.3.4/tcp/1234") - ab := ma.StringCast("/ip6/1::1/udp/1234/quic-v1") + aa, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1234") + ab, _ := ma.StringCast("/ip6/1::1/udp/1234/quic-v1") de = &DialError{ Peer: "pid", DialErrors: []TransportError{ diff --git a/go-libp2p/p2p/net/swarm/dial_ranker.go b/go-libp2p/p2p/net/swarm/dial_ranker.go index 7e58876..f282310 100644 --- a/go-libp2p/p2p/net/swarm/dial_ranker.go +++ b/go-libp2p/p2p/net/swarm/dial_ranker.go @@ -73,7 +73,7 @@ func NoDelayDialRanker(addrs []ma.Multiaddr) []network.AddrDelay { // We dial lowest ports first as they are more likely to be the listen port. func DefaultDialRanker(addrs []ma.Multiaddr) []network.AddrDelay { relay, addrs := filterAddrs(addrs, isRelayAddr) - pvt, addrs := filterAddrs(addrs, manet.IsPrivateAddr) + pvt, addrs := filterAddrs(addrs, func(a ma.Multiaddr) bool { is, err := manet.IsPrivateAddr(a); return is && err == nil }) public, addrs := filterAddrs(addrs, func(a ma.Multiaddr) bool { return isProtocolAddr(a, ma.P_IP4) || isProtocolAddr(a, ma.P_IP6) }) var relayOffset time.Duration @@ -235,7 +235,10 @@ func score(a ma.Multiaddr) int { func isProtocolAddr(a ma.Multiaddr, p int) bool { found := false - ma.ForEach(a, func(c ma.Component) bool { + ma.ForEach(a, func(c ma.Component, e error) bool { + if e != nil { + return false + } if c.Protocol().Code == p { found = true return false diff --git a/go-libp2p/p2p/net/swarm/dial_ranker_test.go b/go-libp2p/p2p/net/swarm/dial_ranker_test.go index 5ef3cc2..6222d9c 100644 --- a/go-libp2p/p2p/net/swarm/dial_ranker_test.go +++ b/go-libp2p/p2p/net/swarm/dial_ranker_test.go @@ -20,15 +20,15 @@ func sortAddrDelays(addrDelays []network.AddrDelay) { } func TestNoDelayDialRanker(t *testing.T) { - q1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1") - q1v1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1") - wt1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport/") - q2 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1") - q2v1 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1") - q3 := ma.StringCast("/ip4/1.2.3.4/udp/3/quic-v1") - q3v1 := ma.StringCast("/ip4/1.2.3.4/udp/3/quic-v1") - q4 := ma.StringCast("/ip4/1.2.3.4/udp/4/quic-v1") - t1 := ma.StringCast("/ip4/1.2.3.5/tcp/1/") + q1, _ := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1") + q1v1, _ := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1") + wt1, _ := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport/") + q2, _ := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1") + q2v1, _ := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1") + q3, _ := ma.StringCast("/ip4/1.2.3.4/udp/3/quic-v1") + q3v1, _ := ma.StringCast("/ip4/1.2.3.4/udp/3/quic-v1") + q4, _ := ma.StringCast("/ip4/1.2.3.4/udp/4/quic-v1") + t1, _ := ma.StringCast("/ip4/1.2.3.5/tcp/1/") testCase := []struct { name string @@ -70,14 +70,14 @@ func TestNoDelayDialRanker(t *testing.T) { } func TestDelayRankerQUICDelay(t *testing.T) { - q1v1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1") - wt1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport/") - q2v1 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1") - q3v1 := ma.StringCast("/ip4/1.2.3.4/udp/3/quic-v1") + q1v1, _ := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1") + wt1, _ := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport/") + q2v1, _ := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1") + q3v1, _ := ma.StringCast("/ip4/1.2.3.4/udp/3/quic-v1") - q1v16 := ma.StringCast("/ip6/1::2/udp/1/quic-v1") - q2v16 := ma.StringCast("/ip6/1::2/udp/2/quic-v1") - q3v16 := ma.StringCast("/ip6/1::2/udp/3/quic-v1") + q1v16, _ := ma.StringCast("/ip6/1::2/udp/1/quic-v1") + q2v16, _ := ma.StringCast("/ip6/1::2/udp/2/quic-v1") + q3v16, _ := ma.StringCast("/ip6/1::2/udp/3/quic-v1") testCase := []struct { name string @@ -151,17 +151,15 @@ func TestDelayRankerQUICDelay(t *testing.T) { } func TestDelayRankerTCPDelay(t *testing.T) { - q1v1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1") - q2v1 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1") - - q1v16 := ma.StringCast("/ip6/1::2/udp/1/quic-v1") - q2v16 := ma.StringCast("/ip6/1::2/udp/2/quic-v1") - q3v16 := ma.StringCast("/ip6/1::2/udp/3/quic-v1") - - t1 := ma.StringCast("/ip4/1.2.3.5/tcp/1/") - t1v6 := ma.StringCast("/ip6/1::2/tcp/1") - t2 := ma.StringCast("/ip4/1.2.3.4/tcp/2") - t3 := ma.StringCast("/ip4/1.2.3.4/tcp/3") + q1v1, _ := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1") + q2v1, _ := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1") + q1v16, _ := ma.StringCast("/ip6/1::2/udp/1/quic-v1") + q2v16, _ := ma.StringCast("/ip6/1::2/udp/2/quic-v1") + q3v16, _ := ma.StringCast("/ip6/1::2/udp/3/quic-v1") + t1, _ := ma.StringCast("/ip4/1.2.3.5/tcp/1/") + t1v6, _ := ma.StringCast("/ip6/1::2/tcp/1") + t2, _ := ma.StringCast("/ip4/1.2.3.4/tcp/2") + t3, _ := ma.StringCast("/ip4/1.2.3.4/tcp/3") testCase := []struct { name string @@ -247,12 +245,12 @@ func TestDelayRankerTCPDelay(t *testing.T) { } func TestDelayRankerRelay(t *testing.T) { - q1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1") - q2 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1") + q1, _ := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1") + q2, _ := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1") pid := test.RandPeerIDFatal(t) - r1 := ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/tcp/1/p2p-circuit/p2p/%s", pid)) - r2 := ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/udp/1/quic/p2p-circuit/p2p/%s", pid)) + r1, _ := ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/tcp/1/p2p-circuit/p2p/%s", pid)) + r2, _ := ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/udp/1/quic/p2p-circuit/p2p/%s", pid)) testCase := []struct { name string diff --git a/go-libp2p/p2p/net/swarm/dial_test.go b/go-libp2p/p2p/net/swarm/dial_test.go index 3b139ef..400e6cd 100644 --- a/go-libp2p/p2p/net/swarm/dial_test.go +++ b/go-libp2p/p2p/net/swarm/dial_test.go @@ -61,12 +61,13 @@ func TestBasicDialPeerWithResolver(t *testing.T) { // that the resovler has to resolve this var s2Addrs []ma.Multiaddr for _, a := range s2.ListenAddresses() { - _, rest := ma.SplitFunc(a, func(c ma.Component) bool { + _, rest, _ := ma.SplitFunc(a, func(c ma.Component) bool { return c.Protocol().Code == ma.P_TCP || c.Protocol().Code == ma.P_UDP }, ) if rest != nil { - s2Addrs = append(s2Addrs, ma.StringCast("/dns4/example.com").Encapsulate(rest)) + e, _ := ma.StringCast("/dns4/example.com") + s2Addrs = append(s2Addrs, e.Encapsulate(rest)) } } @@ -646,7 +647,8 @@ func TestDialSelf(t *testing.T) { func TestDialQUICDraft29(t *testing.T) { s := makeDialOnlySwarm(t) id := testutil.RandPeerIDFatal(t) - s.Peerstore().AddAddr(id, ma.StringCast("/ip4/127.0.0.1/udp/1234/quic"), time.Hour) + l, _ := ma.StringCast("/ip4/127.0.0.1/udp/1234/quic") + s.Peerstore().AddAddr(id, l, time.Hour) _, err := s.DialPeer(context.Background(), id) require.ErrorIs(t, err, swarm.ErrQUICDraft29) require.ErrorIs(t, err, swarm.ErrNoTransport) diff --git a/go-libp2p/p2p/net/swarm/dial_worker.go b/go-libp2p/p2p/net/swarm/dial_worker.go index 0cac6e4..b95ca0a 100644 --- a/go-libp2p/p2p/net/swarm/dial_worker.go +++ b/go-libp2p/p2p/net/swarm/dial_worker.go @@ -330,7 +330,7 @@ loop: if res.Kind == tpt.UpdateKindHandshakeProgressed { // Only wait for public addresses to complete dialing since private dials // are quick any way - if manet.IsPublicAddr(res.Addr) { + if is, err := manet.IsPublicAddr(res.Addr); is && err == nil { ad.expectedTCPUpgradeTime = w.cl.Now().Add(PublicTCPDelay) } scheduleNextDial() diff --git a/go-libp2p/p2p/net/swarm/dial_worker_test.go b/go-libp2p/p2p/net/swarm/dial_worker_test.go index 9e9465f..d352861 100644 --- a/go-libp2p/p2p/net/swarm/dial_worker_test.go +++ b/go-libp2p/p2p/net/swarm/dial_worker_test.go @@ -58,11 +58,14 @@ func newPeer(t *testing.T) (crypto.PrivKey, peer.ID) { func makeSwarm(t *testing.T) *Swarm { s := makeSwarmWithNoListenAddrs(t, WithDialTimeout(1*time.Second)) - if err := s.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0")); err != nil { + q, _ := ma.StringCast("/ip4/127.0.0.1/tcp/0") + if err := s.Listen(q); err != nil { t.Fatal(err) } - if err := s.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")); err != nil { + q, _ = ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1") + + if err := s.Listen(q); err != nil { t.Fatal(err) } @@ -235,7 +238,9 @@ func TestDialWorkerLoopFailure(t *testing.T) { _, p2 := newPeer(t) - s1.Peerstore().AddAddrs(p2, []ma.Multiaddr{ma.StringCast("/ip4/11.0.0.1/tcp/1234"), ma.StringCast("/ip4/11.0.0.1/udp/1234/quic-v1")}, peerstore.PermanentAddrTTL) + m1, _ := ma.StringCast("/ip4/11.0.0.1/tcp/1234") + m2, _ := ma.StringCast("/ip4/11.0.0.1/udp/1234/quic-v1") + s1.Peerstore().AddAddrs(p2, []ma.Multiaddr{m1, m2}, peerstore.PermanentAddrTTL) reqch := make(chan dialRequest) resch := make(chan dialResponse) @@ -260,7 +265,9 @@ func TestDialWorkerLoopConcurrentFailure(t *testing.T) { _, p2 := newPeer(t) - s1.Peerstore().AddAddrs(p2, []ma.Multiaddr{ma.StringCast("/ip4/11.0.0.1/tcp/1234"), ma.StringCast("/ip4/11.0.0.1/udp/1234/quic-v1")}, peerstore.PermanentAddrTTL) + m1, _ := ma.StringCast("/ip4/11.0.0.1/tcp/1234") + m2, _ := ma.StringCast("/ip4/11.0.0.1/udp/1234/quic-v1") + s1.Peerstore().AddAddrs(p2, []ma.Multiaddr{m1, m2}, peerstore.PermanentAddrTTL) reqch := make(chan dialRequest) worker := newDialWorker(s1, p2, reqch, nil) @@ -308,7 +315,10 @@ func TestDialWorkerLoopConcurrentMix(t *testing.T) { defer s2.Close() s1.Peerstore().AddAddrs(s2.LocalPeer(), s2.ListenAddresses(), peerstore.PermanentAddrTTL) - s1.Peerstore().AddAddrs(s2.LocalPeer(), []ma.Multiaddr{ma.StringCast("/ip4/11.0.0.1/tcp/1234"), ma.StringCast("/ip4/11.0.0.1/udp/1234/quic-v1")}, peerstore.PermanentAddrTTL) + + m1, _ := ma.StringCast("/ip4/11.0.0.1/tcp/1234") + m2, _ := ma.StringCast("/ip4/11.0.0.1/udp/1234/quic-v1") + s1.Peerstore().AddAddrs(s2.LocalPeer(), []ma.Multiaddr{m1, m2}, peerstore.PermanentAddrTTL) reqch := make(chan dialRequest) worker := newDialWorker(s1, s2.LocalPeer(), reqch, nil) @@ -352,7 +362,8 @@ func TestDialWorkerLoopConcurrentFailureStress(t *testing.T) { var addrs []ma.Multiaddr for i := 0; i < 16; i++ { - addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/11.0.0.%d/tcp/%d", i%256, 1234+i))) + m1, _ := ma.StringCast(fmt.Sprintf("/ip4/11.0.0.%d/tcp/%d", i%256, 1234+i)) + addrs = append(addrs, m1) } s1.Peerstore().AddAddrs(p2, addrs, peerstore.PermanentAddrTTL) @@ -398,7 +409,8 @@ func TestDialWorkerLoopConcurrentFailureStress(t *testing.T) { func TestDialQueueNextBatch(t *testing.T) { addrs := make([]ma.Multiaddr, 0) for i := 0; i < 10; i++ { - addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/tcp/%d", i))) + m1, _ := ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/tcp/%d", i)) + addrs = append(addrs, m1) } testcase := []struct { name string @@ -523,8 +535,9 @@ func (s schedulingTestCase) Generate(rand *mrand.Rand, size int) reflect.Value { input := make([]timedDial, size) delays := make(map[time.Duration]struct{}) for i := 0; i < size; i++ { + m1, _ := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", i+10550)) input[i] = timedDial{ - addr: ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", i+10550)), + addr: m1, delay: time.Duration(mrand.Intn(100)) * 10 * time.Millisecond, // max 1 second success: false, failAfter: time.Duration(mrand.Intn(100)) * 10 * time.Millisecond, // max 1 second @@ -752,7 +765,8 @@ func TestCheckDialWorkerLoopScheduling(t *testing.T) { for i := 0; i < 10; i++ { for { p := 20000 + i - addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p))) + m1, _ := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p)) + addrs = append(addrs, m1) break } } @@ -798,7 +812,8 @@ func TestDialWorkerLoopRanking(t *testing.T) { for i := 0; i < 10; i++ { for { p := 20000 + i - addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p))) + m1, _ := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p)) + addrs = append(addrs, m1) break } } @@ -915,15 +930,17 @@ func TestDialWorkerLoopSchedulingProperty(t *testing.T) { } func TestDialWorkerLoopQuicOverTCP(t *testing.T) { + m1, _ := ma.StringCast("/ip4/127.0.0.1/udp/20000/quic-v1") + m2, _ := ma.StringCast("/ip4/127.0.0.1/tcp/20000") tc := schedulingTestCase{ input: []timedDial{ { - addr: ma.StringCast("/ip4/127.0.0.1/udp/20000/quic-v1"), + addr: m1, delay: 0, success: true, }, { - addr: ma.StringCast("/ip4/127.0.0.1/tcp/20000"), + addr: m2, delay: 30 * time.Millisecond, success: true, }, @@ -950,14 +967,14 @@ func TestDialWorkerLoopHolePunching(t *testing.T) { defer s2.Close() // t1 will accept and keep the other end waiting - t1 := ma.StringCast("/ip4/127.0.0.1/tcp/10000") + t1, _ := ma.StringCast("/ip4/127.0.0.1/tcp/10000") recvCh := make(chan struct{}) list, ch := makeTCPListener(t, t1, recvCh) // ignore ch because we want to hang forever defer list.Close() defer func() { ch <- struct{}{} }() // close listener // t2 will succeed - t2 := ma.StringCast("/ip4/127.0.0.1/tcp/10001") + t2, _ := ma.StringCast("/ip4/127.0.0.1/tcp/10001") err := s2.AddListenAddr(t2) if err != nil { @@ -1023,8 +1040,8 @@ func TestDialWorkerLoopAddrDedup(t *testing.T) { s2 := makeSwarm(t) defer s1.Close() defer s2.Close() - t1 := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10000)) - t2 := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10000)) + t1, _ := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10000)) + t2, _ := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10000)) // acceptAndClose accepts a connection and closes it acceptAndClose := func(a ma.Multiaddr, ch chan struct{}, closech chan struct{}) { @@ -1090,9 +1107,9 @@ func TestDialWorkerLoopTCPConnUpgradeWait(t *testing.T) { defer s2.Close() // Connection to a1 will fail but a1 is a public address so we can test waiting for tcp // connection established dial update. ipv4only.arpa reserved address. - a1 := ma.StringCast(fmt.Sprintf("/ip4/192.0.0.170/tcp/%d", 10001)) + a1, _ := ma.StringCast(fmt.Sprintf("/ip4/192.0.0.170/tcp/%d", 10001)) // Connection to a2 will succeed. - a2 := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10002)) + a2, _ := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10002)) s2.Listen(a2) s1.Peerstore().AddAddrs(s2.LocalPeer(), []ma.Multiaddr{a1, a2}, peerstore.PermanentAddrTTL) diff --git a/go-libp2p/p2p/net/swarm/limiter_test.go b/go-libp2p/p2p/net/swarm/limiter_test.go index 8e61958..b4ebadb 100644 --- a/go-libp2p/p2p/net/swarm/limiter_test.go +++ b/go-libp2p/p2p/net/swarm/limiter_test.go @@ -19,7 +19,8 @@ import ( ) func addrWithPort(p int) ma.Multiaddr { - return ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p)) + m1, _ := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p)) + return m1 } // in these tests I use addresses with tcp ports over a certain number to @@ -155,7 +156,7 @@ func TestFDLimiting(t *testing.T) { } pid5 := peer.ID("testpeer5") - utpaddr := ma.StringCast("/ip4/127.0.0.1/udp/7777/utp") + utpaddr, _ := ma.StringCast("/ip4/127.0.0.1/udp/7777/utp") // This should complete immediately since utp addresses arent blocked by fd rate limiting l.AddDialJob(&dialJob{ctx: ctx, peer: pid5, addr: utpaddr, resp: resch}) @@ -172,7 +173,7 @@ func TestFDLimiting(t *testing.T) { // A relay address with tcp transport will complete because we do not consume fds for dials // with relay addresses as the fd will be consumed when we actually dial the relay server. pid6 := test.RandPeerIDFatal(t) - relayAddr := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/20/p2p-circuit/p2p/%s", pid6)) + relayAddr, _ := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/20/p2p-circuit/p2p/%s", pid6)) l.AddDialJob(&dialJob{ctx: ctx, peer: pid6, addr: relayAddr, resp: resch}) select { @@ -216,11 +217,13 @@ func TestTokenRedistribution(t *testing.T) { tryDialAddrs(ctx, l, pid, bads, resch) } + m1, _ := ma.StringCast("/ip4/127.0.0.1/tcp/1001") + // add a good dial job for peer 1 l.AddDialJob(&dialJob{ ctx: ctx, peer: pids[1], - addr: ma.StringCast("/ip4/127.0.0.1/tcp/1001"), + addr: m1, resp: resch, }) diff --git a/go-libp2p/p2p/net/swarm/swarm_addr_test.go b/go-libp2p/p2p/net/swarm/swarm_addr_test.go index 435866e..eff7e80 100644 --- a/go-libp2p/p2p/net/swarm/swarm_addr_test.go +++ b/go-libp2p/p2p/net/swarm/swarm_addr_test.go @@ -100,10 +100,17 @@ func TestDialAddressSelection(t *testing.T) { require.NoError(t, err) require.NoError(t, s.AddTransport(circuitTr)) - require.Equal(t, tcpTr, s.TransportForDialing(ma.StringCast("/ip4/127.0.0.1/tcp/1234"))) - require.Equal(t, quicTr, s.TransportForDialing(ma.StringCast("/ip4/127.0.0.1/udp/1234/quic-v1"))) - require.Equal(t, circuitTr, s.TransportForDialing(ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/1234/quic/p2p-circuit/p2p/%s", id)))) - require.Equal(t, webtransportTr, s.TransportForDialing(ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/%s", certHash)))) - require.Nil(t, s.TransportForDialing(ma.StringCast("/ip4/1.2.3.4"))) - require.Nil(t, s.TransportForDialing(ma.StringCast("/ip4/1.2.3.4/tcp/443/ws"))) + m1, _ := ma.StringCast("/ip4/127.0.0.1/tcp/1234") + m2, _ := ma.StringCast("/ip4/127.0.0.1/udp/1234/quic-v1") + m3, _ := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/1234/quic/p2p-circuit/p2p/%s", id)) + m4, _ := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/%s", certHash)) + m5, _ := ma.StringCast("/ip4/1.2.3.4") + m6, _ := ma.StringCast("/ip4/1.2.3.4/tcp/443/ws") + + require.Equal(t, tcpTr, s.TransportForDialing(m1)) + require.Equal(t, quicTr, s.TransportForDialing(m2)) + require.Equal(t, circuitTr, s.TransportForDialing(m3)) + require.Equal(t, webtransportTr, s.TransportForDialing(m4)) + require.Nil(t, s.TransportForDialing(m5)) + require.Nil(t, s.TransportForDialing(m6)) } diff --git a/go-libp2p/p2p/net/swarm/swarm_dial.go b/go-libp2p/p2p/net/swarm/swarm_dial.go index f639ce1..c2e4b71 100644 --- a/go-libp2p/p2p/net/swarm/swarm_dial.go +++ b/go-libp2p/p2p/net/swarm/swarm_dial.go @@ -434,7 +434,10 @@ func (s *Swarm) filterKnownUndialables(p peer.ID, addrs []ma.Multiaddr) (goodAdd var ourAddrs []ma.Multiaddr for _, addr := range lisAddrs { // we're only sure about filtering out /ip4 and /ip6 addresses, so far - ma.ForEach(addr, func(c ma.Component) bool { + ma.ForEach(addr, func(c ma.Component, e error) bool { + if e != nil { + return true + } if c.Protocol().Code == ma.P_IP4 || c.Protocol().Code == ma.P_IP6 { ourAddrs = append(ourAddrs, addr) } @@ -504,9 +507,13 @@ func (s *Swarm) filterKnownUndialables(p peer.ID, addrs []ma.Multiaddr) (goodAdd // limitedDial will start a dial to the given peer when // it is able, respecting the various different types of rate // limiting that occur without using extra goroutines per addr -func (s *Swarm) limitedDial(ctx context.Context, p peer.ID, a ma.Multiaddr, resp chan transport.DialUpdate) { +func (s *Swarm) limitedDial(ctx context.Context, p peer.ID, a ma.Multiaddr, resp chan transport.DialUpdate) error { timeout := s.dialTimeout - if manet.IsPrivateAddr(a) && s.dialTimeoutLocal < s.dialTimeout { + is, err := manet.IsPrivateAddr(a) + if err != nil { + return err + } + if is && s.dialTimeoutLocal < s.dialTimeout { timeout = s.dialTimeoutLocal } s.limiter.AddDialJob(&dialJob{ @@ -516,6 +523,7 @@ func (s *Swarm) limitedDial(ctx context.Context, p peer.ID, a ma.Multiaddr, resp ctx: ctx, timeout: timeout, }) + return nil } // dialAddr is the actual dial for an addr, indirectly invoked through the limiter @@ -582,12 +590,12 @@ func (s *Swarm) dialAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr, updC // For a circuit-relay address, we look at the address of the relay server/proxy // and use the same logic as above to decide. func isFdConsumingAddr(addr ma.Multiaddr) bool { - first, _ := ma.SplitFunc(addr, func(c ma.Component) bool { + first, _, err := ma.SplitFunc(addr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_CIRCUIT }) // for safety - if first == nil { + if err == nil && first == nil { return true } diff --git a/go-libp2p/p2p/net/swarm/swarm_dial_test.go b/go-libp2p/p2p/net/swarm/swarm_dial_test.go index 83f94b9..3d9eb36 100644 --- a/go-libp2p/p2p/net/swarm/swarm_dial_test.go +++ b/go-libp2p/p2p/net/swarm/swarm_dial_test.go @@ -29,6 +29,11 @@ import ( "github.com/stretchr/testify/require" ) +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func TestAddrsForDial(t *testing.T) { mockResolver := madns.MockResolver{IP: make(map[string][]net.IPAddr)} ipaddr, err := net.ResolveIPAddr("ip4", "1.2.3.4") @@ -63,7 +68,7 @@ func TestAddrsForDial(t *testing.T) { otherPeer := test.RandPeerIDFatal(t) - ps.AddAddr(otherPeer, ma.StringCast("/dns4/example.com/tcp/1234/wss"), time.Hour) + ps.AddAddr(otherPeer, tStringCast("/dns4/example.com/tcp/1234/wss"), time.Hour) ctx := context.Background() mas, _, err := s.addrsForDial(ctx, otherPeer) @@ -107,8 +112,8 @@ func TestDedupAddrsForDial(t *testing.T) { otherPeer := test.RandPeerIDFatal(t) - ps.AddAddr(otherPeer, ma.StringCast("/dns4/example.com/tcp/1234"), time.Hour) - ps.AddAddr(otherPeer, ma.StringCast("/ip4/1.2.3.4/tcp/1234"), time.Hour) + ps.AddAddr(otherPeer, tStringCast("/dns4/example.com/tcp/1234"), time.Hour) + ps.AddAddr(otherPeer, tStringCast("/ip4/1.2.3.4/tcp/1234"), time.Hour) ctx := context.Background() mas, _, err := s.addrsForDial(ctx, otherPeer) @@ -164,11 +169,11 @@ func TestAddrResolution(t *testing.T) { p1 := test.RandPeerIDFatal(t) p2 := test.RandPeerIDFatal(t) - addr1 := ma.StringCast("/dnsaddr/example.com") - addr2 := ma.StringCast("/ip4/192.0.2.1/tcp/123") + addr1 := tStringCast("/dnsaddr/example.com") + addr2 := tStringCast("/ip4/192.0.2.1/tcp/123") - p2paddr2 := ma.StringCast("/ip4/192.0.2.1/tcp/123/p2p/" + p1.String()) - p2paddr3 := ma.StringCast("/ip4/192.0.2.1/tcp/123/p2p/" + p2.String()) + p2paddr2 := tStringCast("/ip4/192.0.2.1/tcp/123/p2p/" + p1.String()) + p2paddr3 := tStringCast("/ip4/192.0.2.1/tcp/123/p2p/" + p2.String()) backend := &madns.MockResolver{ TXT: map[string][]string{"_dnsaddr.example.com": { @@ -200,13 +205,13 @@ func TestAddrResolutionRecursive(t *testing.T) { p1 := test.RandPeerIDFatal(t) p2 := test.RandPeerIDFatal(t) - addr1 := ma.StringCast("/dnsaddr/example.com") - addr2 := ma.StringCast("/ip4/192.0.2.1/tcp/123") - p2paddr1 := ma.StringCast("/dnsaddr/example.com/p2p/" + p1.String()) - p2paddr2 := ma.StringCast("/dnsaddr/example.com/p2p/" + p2.String()) - p2paddr1i := ma.StringCast("/dnsaddr/foo.example.com/p2p/" + p1.String()) - p2paddr2i := ma.StringCast("/dnsaddr/bar.example.com/p2p/" + p2.String()) - p2paddr1f := ma.StringCast("/ip4/192.0.2.1/tcp/123/p2p/" + p1.String()) + addr1 := tStringCast("/dnsaddr/example.com") + addr2 := tStringCast("/ip4/192.0.2.1/tcp/123") + p2paddr1 := tStringCast("/dnsaddr/example.com/p2p/" + p1.String()) + p2paddr2 := tStringCast("/dnsaddr/example.com/p2p/" + p2.String()) + p2paddr1i := tStringCast("/dnsaddr/foo.example.com/p2p/" + p1.String()) + p2paddr2i := tStringCast("/dnsaddr/bar.example.com/p2p/" + p2.String()) + p2paddr1f := tStringCast("/ip4/192.0.2.1/tcp/123/p2p/" + p1.String()) backend := &madns.MockResolver{ TXT: map[string][]string{ @@ -266,7 +271,7 @@ func TestAddrResolutionRecursiveTransportSpecific(t *testing.T) { require.NoError(t, err) s := newTestSwarmWithResolver(t, resolver) - pi1, err := peer.AddrInfoFromP2pAddr(ma.StringCast("/dnsaddr/example.com/p2p/" + p.String())) + pi1, err := peer.AddrInfoFromP2pAddr(tStringCast("/dnsaddr/example.com/p2p/" + p.String())) require.NoError(t, err) tctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) @@ -279,21 +284,21 @@ func TestAddrResolutionRecursiveTransportSpecific(t *testing.T) { } func TestAddrsForDialFiltering(t *testing.T) { - q1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1") - q1v1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1") - wt1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport/") + q1 := tStringCast("/ip4/1.2.3.4/udp/1/quic-v1") + q1v1 := tStringCast("/ip4/1.2.3.4/udp/1/quic-v1") + wt1 := tStringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport/") - q2 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1") - q2v1 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1") - wt2 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1/webtransport/") + q2 := tStringCast("/ip4/1.2.3.4/udp/2/quic-v1") + q2v1 := tStringCast("/ip4/1.2.3.4/udp/2/quic-v1") + wt2 := tStringCast("/ip4/1.2.3.4/udp/2/quic-v1/webtransport/") - q3 := ma.StringCast("/ip4/1.2.3.4/udp/3/quic-v1") + q3 := tStringCast("/ip4/1.2.3.4/udp/3/quic-v1") - t1 := ma.StringCast("/ip4/1.2.3.4/tcp/1") - ws1 := ma.StringCast("/ip4/1.2.3.4/tcp/1/ws") + t1 := tStringCast("/ip4/1.2.3.4/tcp/1") + ws1 := tStringCast("/ip4/1.2.3.4/tcp/1/ws") - unSpecQ := ma.StringCast("/ip4/0.0.0.0/udp/2/quic-v1") - unSpecT := ma.StringCast("/ip6/::/tcp/2/") + unSpecQ := tStringCast("/ip4/0.0.0.0/udp/2/quic-v1") + unSpecT := tStringCast("/ip6/::/tcp/2/") resolver, err := madns.NewResolver(madns.WithDefaultResolver(&madns.MockResolver{})) require.NoError(t, err) @@ -368,7 +373,7 @@ func TestBlackHoledAddrBlocked(t *testing.T) { // All dials to this addr will fail. // manet.IsPublic is aggressive for IPv6 addresses. Use a NAT64 address. - addr := ma.StringCast("/ip6/64:ff9b::1.2.3.4/tcp/54321/") + addr := tStringCast("/ip6/64:ff9b::1.2.3.4/tcp/54321/") p, err := test.RandPeerID() if err != nil { diff --git a/go-libp2p/p2p/net/swarm/swarm_event_test.go b/go-libp2p/p2p/net/swarm/swarm_event_test.go index 5010215..87c64ec 100644 --- a/go-libp2p/p2p/net/swarm/swarm_event_test.go +++ b/go-libp2p/p2p/net/swarm/swarm_event_test.go @@ -67,6 +67,11 @@ func TestConnectednessEventsSingleConn(t *testing.T) { checkEvent(t, sub2, event.EvtPeerConnectednessChanged{Peer: s1.LocalPeer(), Connectedness: network.NotConnected}) } +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func TestNoDeadlockWhenConsumingConnectednessEvents(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) @@ -78,7 +83,7 @@ func TestNoDeadlockWhenConsumingConnectednessEvents(t *testing.T) { listener := swarmt.GenSwarm(t, swarmt.OptDialOnly) addrsToListen := []ma.Multiaddr{ - ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"), + tStringCast("/ip4/127.0.0.1/udp/0/quic-v1"), } if err := listener.Listen(addrsToListen...); err != nil { diff --git a/go-libp2p/p2p/net/swarm/swarm_metrics.go b/go-libp2p/p2p/net/swarm/swarm_metrics.go index b5c0f2e..4b687ab 100644 --- a/go-libp2p/p2p/net/swarm/swarm_metrics.go +++ b/go-libp2p/p2p/net/swarm/swarm_metrics.go @@ -187,7 +187,8 @@ func (m *metricsTracer) OpenedConnection(dir network.Direction, p crypto.PubKey, *tags = append(*tags, metricshelper.GetDirection(dir)) *tags = appendConnectionState(*tags, cs) - *tags = append(*tags, metricshelper.GetIPVersion(laddr)) + ipv, _ := metricshelper.GetIPVersion(laddr) + *tags = append(*tags, ipv) connsOpened.WithLabelValues(*tags...).Inc() *tags = (*tags)[:0] @@ -202,7 +203,8 @@ func (m *metricsTracer) ClosedConnection(dir network.Direction, duration time.Du *tags = append(*tags, metricshelper.GetDirection(dir)) *tags = appendConnectionState(*tags, cs) - *tags = append(*tags, metricshelper.GetIPVersion(laddr)) + ipv, _ := metricshelper.GetIPVersion(laddr) + *tags = append(*tags, ipv) connsClosed.WithLabelValues(*tags...).Inc() connDuration.WithLabelValues(*tags...).Observe(duration.Seconds()) } @@ -212,7 +214,8 @@ func (m *metricsTracer) CompletedHandshake(t time.Duration, cs network.Connectio defer metricshelper.PutStringSlice(tags) *tags = appendConnectionState(*tags, cs) - *tags = append(*tags, metricshelper.GetIPVersion(laddr)) + ipv, _ := metricshelper.GetIPVersion(laddr) + *tags = append(*tags, ipv) connHandshakeLatency.WithLabelValues(*tags...).Observe(t.Seconds()) } @@ -246,7 +249,8 @@ func (m *metricsTracer) FailedDialing(addr ma.Multiaddr, dialErr error, cause er defer metricshelper.PutStringSlice(tags) *tags = append(*tags, transport, e) - *tags = append(*tags, metricshelper.GetIPVersion(addr)) + ipv, _ := metricshelper.GetIPVersion(addr) + *tags = append(*tags, ipv) dialError.WithLabelValues(*tags...).Inc() } diff --git a/go-libp2p/p2p/net/swarm/swarm_test.go b/go-libp2p/p2p/net/swarm/swarm_test.go index 3d92690..8e4a93e 100644 --- a/go-libp2p/p2p/net/swarm/swarm_test.go +++ b/go-libp2p/p2p/net/swarm/swarm_test.go @@ -390,7 +390,7 @@ func TestTypedNilConn(t *testing.T) { func TestPreventDialListenAddr(t *testing.T) { s := GenSwarm(t, OptDialOnly) - if err := s.Listen(ma.StringCast("/ip4/0.0.0.0/udp/0/quic-v1")); err != nil { + if err := s.Listen(tStringCast("/ip4/0.0.0.0/udp/0/quic-v1")); err != nil { t.Fatal(err) } addrs, err := s.InterfaceListenAddresses() @@ -544,8 +544,8 @@ func TestResourceManagerAcceptStream(t *testing.T) { func TestListenCloseCount(t *testing.T) { s := GenSwarm(t, OptDialOnly) addrsToListen := []ma.Multiaddr{ - ma.StringCast("/ip4/0.0.0.0/tcp/0"), - ma.StringCast("/ip4/0.0.0.0/udp/0/quic-v1"), + tStringCast("/ip4/0.0.0.0/tcp/0"), + tStringCast("/ip4/0.0.0.0/udp/0/quic-v1"), } if err := s.Listen(addrsToListen...); err != nil { @@ -557,7 +557,7 @@ func TestListenCloseCount(t *testing.T) { for _, addr := range listenedAddrs { if _, err := addr.ValueForProtocol(ma.P_QUIC_V1); err == nil { // make a copy of the address to make sure the multiaddr comparison actually works - addrToClose = ma.StringCast(addr.String()) + addrToClose = tStringCast(addr.String()) } } diff --git a/go-libp2p/p2p/net/swarm/testing/testing.go b/go-libp2p/p2p/net/swarm/testing/testing.go index 627114f..ff587b3 100644 --- a/go-libp2p/p2p/net/swarm/testing/testing.go +++ b/go-libp2p/p2p/net/swarm/testing/testing.go @@ -170,7 +170,8 @@ func GenSwarm(t *testing.T, opts ...Option) *swarm.Swarm { t.Fatal(err) } if !cfg.dialOnly { - if err := s.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0")); err != nil { + l, _ := ma.StringCast("/ip4/127.0.0.1/tcp/0") + if err := s.Listen(l); err != nil { t.Fatal(err) } } @@ -188,7 +189,8 @@ func GenSwarm(t *testing.T, opts ...Option) *swarm.Swarm { t.Fatal(err) } if !cfg.dialOnly { - if err := s.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")); err != nil { + l, _ := ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1") + if err := s.Listen(l); err != nil { t.Fatal(err) } } diff --git a/go-libp2p/p2p/protocol/circuitv2/client/dial.go b/go-libp2p/p2p/protocol/circuitv2/client/dial.go index 2716525..789488f 100644 --- a/go-libp2p/p2p/protocol/circuitv2/client/dial.go +++ b/go-libp2p/p2p/protocol/circuitv2/client/dial.go @@ -41,10 +41,14 @@ func isRelayError(err error) bool { // dialer func (c *Client) dial(ctx context.Context, a ma.Multiaddr, p peer.ID) (*Conn, error) { // split /a/p2p-circuit/b into (/a, /p2p-circuit/b) - relayaddr, destaddr := ma.SplitFunc(a, func(c ma.Component) bool { + relayaddr, destaddr, err := ma.SplitFunc(a, func(c ma.Component) bool { return c.Protocol().Code == ma.P_CIRCUIT }) + if err != nil { + return nil, err + } + // If the address contained no /p2p-circuit part, the second part is nil. if destaddr == nil { return nil, fmt.Errorf("%s is not a relay address", a) @@ -58,7 +62,11 @@ func (c *Client) dial(ctx context.Context, a ma.Multiaddr, p peer.ID) (*Conn, er // Strip the /p2p-circuit prefix from the destaddr so that we can pass the destination address // (if present) for active relays - _, destaddr = ma.SplitFirst(destaddr) + _, destaddr, err = ma.SplitFirst(destaddr) + if err != nil { + return nil, err + } + if destaddr != nil { dinfo.Addrs = append(dinfo.Addrs, destaddr) } diff --git a/go-libp2p/p2p/protocol/circuitv2/client/transport.go b/go-libp2p/p2p/protocol/circuitv2/client/transport.go index 2c9e49f..83eed0c 100644 --- a/go-libp2p/p2p/protocol/circuitv2/client/transport.go +++ b/go-libp2p/p2p/protocol/circuitv2/client/transport.go @@ -14,7 +14,7 @@ import ( ) var circuitProtocol = ma.ProtocolWithCode(ma.P_CIRCUIT) -var circuitAddr = ma.Cast(circuitProtocol.VCode) +var circuitAddr, _ = ma.Cast(circuitProtocol.VCode) // AddTransport constructs a new p2p-circuit/v2 client and adds it as a transport to the // host network diff --git a/go-libp2p/p2p/protocol/circuitv2/relay/relay.go b/go-libp2p/p2p/protocol/circuitv2/relay/relay.go index 1629a6d..ab12d5b 100644 --- a/go-libp2p/p2p/protocol/circuitv2/relay/relay.go +++ b/go-libp2p/p2p/protocol/circuitv2/relay/relay.go @@ -95,7 +95,10 @@ func New(h host.Host, opts ...Option) (*Relay, error) { } r.constraints = newConstraints(&r.rc) - r.selfAddr = ma.StringCast(fmt.Sprintf("/p2p/%s", h.ID())) + r.selfAddr, err = ma.StringCast(fmt.Sprintf("/p2p/%s", h.ID())) + if err != nil { + return nil, err + } h.SetStreamHandler(proto.ProtoIDv2Hop, r.handleStream) r.notifiee = &network.NotifyBundle{DisconnectedF: r.disconnected} @@ -574,7 +577,7 @@ func (r *Relay) makeReservationMsg(p peer.ID, expire time.Time) *pbv2.Reservatio var addrBytes [][]byte for _, addr := range r.host.Addrs() { - if !manet.IsPublicAddr(addr) { + if is, err := manet.IsPublicAddr(addr); !is && err == nil { continue } diff --git a/go-libp2p/p2p/protocol/circuitv2/relay/relay_test.go b/go-libp2p/p2p/protocol/circuitv2/relay/relay_test.go index e5d32b0..68baea6 100644 --- a/go-libp2p/p2p/protocol/circuitv2/relay/relay_test.go +++ b/go-libp2p/p2p/protocol/circuitv2/relay/relay_test.go @@ -68,7 +68,8 @@ func getNetHosts(t *testing.T, ctx context.Context, n int) (hosts []host.Host, u t.Fatal(err) } - err = netw.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0")) + m, _ := ma.StringCast("/ip4/127.0.0.1/tcp/0") + err = netw.Listen(m) if err != nil { t.Fatal(err) } diff --git a/go-libp2p/p2p/protocol/holepunch/holepunch_test.go b/go-libp2p/p2p/protocol/holepunch/holepunch_test.go index 23593c7..18874ff 100644 --- a/go-libp2p/p2p/protocol/holepunch/holepunch_test.go +++ b/go-libp2p/p2p/protocol/holepunch/holepunch_test.go @@ -75,8 +75,13 @@ func newMockIDService(t *testing.T, h host.Host) identify.IDService { return &mockIDService{IDService: ids} } +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func (s *mockIDService) OwnObservedAddrs() []ma.Multiaddr { - return append(s.IDService.OwnObservedAddrs(), ma.StringCast("/ip4/1.1.1.1/tcp/1234")) + return append(s.IDService.OwnObservedAddrs(), tStringCast("/ip4/1.1.1.1/tcp/1234")) } func TestNoHolePunchIfDirectConnExists(t *testing.T) { @@ -276,7 +281,7 @@ func TestFailuresOnResponder(t *testing.T) { w := pbio.NewDelimitedWriter(s) w.WriteMsg(&holepunch_pb.HolePunch{ Type: holepunch_pb.HolePunch_CONNECT.Enum(), - ObsAddrs: addrsToBytes([]ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/1234")}), + ObsAddrs: addrsToBytes([]ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/1234")}), }) w.WriteMsg(&holepunch_pb.HolePunch{Type: holepunch_pb.HolePunch_CONNECT.Enum()}) }, @@ -287,7 +292,7 @@ func TestFailuresOnResponder(t *testing.T) { initiator: func(s network.Stream) { pbio.NewDelimitedWriter(s).WriteMsg(&holepunch_pb.HolePunch{ Type: holepunch_pb.HolePunch_CONNECT.Enum(), - ObsAddrs: addrsToBytes([]ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/1234")}), + ObsAddrs: addrsToBytes([]ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/1234")}), }) time.Sleep(10 * time.Second) }, @@ -306,7 +311,7 @@ func TestFailuresOnResponder(t *testing.T) { initiator: func(s network.Stream) { pbio.NewDelimitedWriter(s).WriteMsg(&holepunch_pb.HolePunch{ Type: holepunch_pb.HolePunch_CONNECT.Enum(), - ObsAddrs: addrsToBytes([]ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/1234")}), + ObsAddrs: addrsToBytes([]ma.Multiaddr{tStringCast("/ip4/127.0.0.1/tcp/1234")}), }) time.Sleep(10 * time.Second) }, @@ -421,7 +426,7 @@ func mkHostWithStaticAutoRelay(t *testing.T, relay host.Host) host.Host { defer func() { manet.Private4 = cpy }() h, err := libp2p.New( - libp2p.ListenAddrs(ma.StringCast("/ip4/127.0.0.1/tcp/0")), + libp2p.ListenAddrs(tStringCast("/ip4/127.0.0.1/tcp/0")), libp2p.EnableRelay(), libp2p.EnableAutoRelayWithStaticRelays([]peer.AddrInfo{pi}), libp2p.ForceReachabilityPrivate(), @@ -446,7 +451,7 @@ func makeRelayedHosts(t *testing.T, h1opt, h2opt []holepunch.Option, addHolePunc h1, _ = mkHostWithHolePunchSvc(t, h1opt...) var err error relay, err = libp2p.New( - libp2p.ListenAddrs(ma.StringCast("/ip4/127.0.0.1/tcp/0")), + libp2p.ListenAddrs(tStringCast("/ip4/127.0.0.1/tcp/0")), libp2p.DisableRelay(), libp2p.ResourceManager(&network.NullResourceManager{}), ) @@ -500,7 +505,7 @@ func addHolePunchService(t *testing.T, h host.Host, opts ...holepunch.Option) *h func mkHostWithHolePunchSvc(t *testing.T, opts ...holepunch.Option) (host.Host, *holepunch.Service) { t.Helper() h, err := libp2p.New( - libp2p.ListenAddrs(ma.StringCast("/ip4/127.0.0.1/tcp/0"), ma.StringCast("/ip6/::1/tcp/0")), + libp2p.ListenAddrs(tStringCast("/ip4/127.0.0.1/tcp/0"), tStringCast("/ip6/::1/tcp/0")), libp2p.ForceReachabilityPrivate(), libp2p.ResourceManager(&network.NullResourceManager{}), ) diff --git a/go-libp2p/p2p/protocol/holepunch/holepuncher.go b/go-libp2p/p2p/protocol/holepunch/holepuncher.go index 479376e..3df7055 100644 --- a/go-libp2p/p2p/protocol/holepunch/holepuncher.go +++ b/go-libp2p/p2p/protocol/holepunch/holepuncher.go @@ -108,7 +108,7 @@ func (hp *holePuncher) directConnect(rp peer.ID) error { // short-circuit hole punching if a direct dial works. // attempt a direct connection ONLY if we have a public address for the remote peer for _, a := range hp.host.Peerstore().Addrs(rp) { - if manet.IsPublicAddr(a) && !isRelayAddress(a) { + if is, err := manet.IsPublicAddr(a); err == nil && is && !isRelayAddress(a) { forceDirectConnCtx := network.WithForceDirectDial(hp.ctx, "hole-punching") dialCtx, cancel := context.WithTimeout(forceDirectConnCtx, dialTimeout) diff --git a/go-libp2p/p2p/protocol/holepunch/metrics.go b/go-libp2p/p2p/protocol/holepunch/metrics.go index 92ed20b..a9c67e0 100644 --- a/go-libp2p/p2p/protocol/holepunch/metrics.go +++ b/go-libp2p/p2p/protocol/holepunch/metrics.go @@ -113,19 +113,19 @@ func (mt *metricsTracer) HolePunchFinished(side string, numAttempts int, *tags = append(*tags, side, getNumAttemptString(numAttempts)) var dipv, dtransport string if directConn != nil { - dipv = metricshelper.GetIPVersion(directConn.LocalMultiaddr()) + dipv, _ = metricshelper.GetIPVersion(directConn.LocalMultiaddr()) dtransport = metricshelper.GetTransport(directConn.LocalMultiaddr()) } matchingAddressCount := 0 // calculate holepunch outcome for all the addresses involved for _, la := range localAddrs { - lipv := metricshelper.GetIPVersion(la) + lipv, _ := metricshelper.GetIPVersion(la) ltransport := metricshelper.GetTransport(la) matchingAddress := false for _, ra := range remoteAddrs { - ripv := metricshelper.GetIPVersion(ra) + ripv, _ := metricshelper.GetIPVersion(ra) rtransport := metricshelper.GetTransport(ra) if ripv == lipv && rtransport == ltransport { // the peer reported an address with the same transport diff --git a/go-libp2p/p2p/protocol/holepunch/metrics_test.go b/go-libp2p/p2p/protocol/holepunch/metrics_test.go index 86cb59d..da034eb 100644 --- a/go-libp2p/p2p/protocol/holepunch/metrics_test.go +++ b/go-libp2p/p2p/protocol/holepunch/metrics_test.go @@ -20,12 +20,17 @@ func getCounterValue(t *testing.T, counter *prometheus.CounterVec, labels ...str } -func TestHolePunchOutcomeCounter(t *testing.T) { - t1 := ma.StringCast("/ip4/1.2.3.4/tcp/1") - t2 := ma.StringCast("/ip4/1.2.3.4/tcp/2") +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} - q1v1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1") - q2v1 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1") +func TestHolePunchOutcomeCounter(t *testing.T) { + t1 := tStringCast("/ip4/1.2.3.4/tcp/1") + t2 := tStringCast("/ip4/1.2.3.4/tcp/2") + + q1v1 := tStringCast("/ip4/1.2.3.4/udp/1/quic-v1") + q2v1 := tStringCast("/ip4/1.2.3.4/udp/2/quic-v1") type testcase struct { name string diff --git a/go-libp2p/p2p/protocol/holepunch/util.go b/go-libp2p/p2p/protocol/holepunch/util.go index 1301356..a63fb9b 100644 --- a/go-libp2p/p2p/protocol/holepunch/util.go +++ b/go-libp2p/p2p/protocol/holepunch/util.go @@ -13,7 +13,7 @@ import ( func containsPublicAddr(addrs []ma.Multiaddr) bool { for _, addr := range addrs { - if isRelayAddress(addr) || !manet.IsPublicAddr(addr) { + if is, err := manet.IsPublicAddr(addr); err != nil || !is || isRelayAddress(addr) { continue } return true diff --git a/go-libp2p/p2p/protocol/identify/id.go b/go-libp2p/p2p/protocol/identify/id.go index a91cc4f..30c6329 100644 --- a/go-libp2p/p2p/protocol/identify/id.go +++ b/go-libp2p/p2p/protocol/identify/id.go @@ -1072,10 +1072,10 @@ func filterAddrs(addrs []ma.Multiaddr, remote ma.Multiaddr) []ma.Multiaddr { if manet.IsIPLoopback(remote) { return addrs } - if manet.IsPrivateAddr(remote) { + if is, err := manet.IsPrivateAddr(remote); is && err == nil { return ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool { return !manet.IsIPLoopback(a) }) } - return ma.FilterAddrs(addrs, manet.IsPublicAddr) + return ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool { is, err := manet.IsPublicAddr(a); return is && err == nil }) } func trimHostAddrList(addrs []ma.Multiaddr, maxSize int) []ma.Multiaddr { @@ -1089,13 +1089,16 @@ func trimHostAddrList(addrs []ma.Multiaddr, maxSize int) []ma.Multiaddr { score := func(addr ma.Multiaddr) int { var res int - if manet.IsPublicAddr(addr) { + if is, err := manet.IsPublicAddr(addr); is && err == nil { res |= 1 << 12 } else if !manet.IsIPLoopback(addr) { res |= 1 << 11 } var protocolWeight int - ma.ForEach(addr, func(c ma.Component) bool { + ma.ForEach(addr, func(c ma.Component, e error) bool { + if e != nil { + return false + } switch c.Protocol().Code { case ma.P_QUIC_V1: protocolWeight = 5 diff --git a/go-libp2p/p2p/protocol/identify/id_glass_test.go b/go-libp2p/p2p/protocol/identify/id_glass_test.go index 3eec26c..970b525 100644 --- a/go-libp2p/p2p/protocol/identify/id_glass_test.go +++ b/go-libp2p/p2p/protocol/identify/id_glass_test.go @@ -19,6 +19,11 @@ import ( "github.com/stretchr/testify/require" ) +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func TestFastDisconnect(t *testing.T) { // This test checks to see if we correctly abort sending an identify // response if the peer disconnects before we handle the request. @@ -176,11 +181,11 @@ func TestInvalidSignedPeerRecord(t *testing.T) { } func TestIncomingAddrFilter(t *testing.T) { - lhAddr := ma.StringCast("/ip4/127.0.0.1/udp/123/quic-v1") - privAddr := ma.StringCast("/ip4/192.168.1.101/tcp/123") - pubAddr := ma.StringCast("/ip6/2001::1/udp/123/quic-v1") - pubDNSAddr := ma.StringCast("/dns/example.com/udp/123/quic-v1") - privDNSAddr := ma.StringCast("/dns4/localhost/udp/123/quic-v1") + lhAddr := tStringCast("/ip4/127.0.0.1/udp/123/quic-v1") + privAddr := tStringCast("/ip4/192.168.1.101/tcp/123") + pubAddr := tStringCast("/ip6/2001::1/udp/123/quic-v1") + pubDNSAddr := tStringCast("/dns/example.com/udp/123/quic-v1") + privDNSAddr := tStringCast("/dns4/localhost/udp/123/quic-v1") tests := []struct { output []ma.Multiaddr remote ma.Multiaddr diff --git a/go-libp2p/p2p/protocol/identify/id_test.go b/go-libp2p/p2p/protocol/identify/id_test.go index a65d64f..1cd5954 100644 --- a/go-libp2p/p2p/protocol/identify/id_test.go +++ b/go-libp2p/p2p/protocol/identify/id_test.go @@ -446,6 +446,11 @@ func TestIdentifyPushWhileIdentifyingConn(t *testing.T) { } } +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func TestIdentifyPushOnAddrChange(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -483,7 +488,7 @@ func TestIdentifyPushOnAddrChange(t *testing.T) { testKnowsAddrs(t, h2, h1p, h1.Peerstore().Addrs(h1p)) // change addr on host 1 and ensure host2 gets a push - lad := ma.StringCast("/ip4/127.0.0.1/tcp/1234") + lad := tStringCast("/ip4/127.0.0.1/tcp/1234") require.NoError(t, h1.Network().Listen(lad)) require.Contains(t, h1.Addrs(), lad) @@ -497,7 +502,7 @@ func TestIdentifyPushOnAddrChange(t *testing.T) { require.True(t, ma.Contains(h2.Peerstore().Addrs(h1p), lad)) // change addr on host2 and ensure host 1 gets a pus - lad = ma.StringCast("/ip4/127.0.0.1/tcp/1235") + lad = tStringCast("/ip4/127.0.0.1/tcp/1235") require.NoError(t, h2.Network().Listen(lad)) require.Contains(t, h2.Addrs(), lad) h1AddrStream := h1.Peerstore().AddrStream(ctx, h2p) @@ -509,7 +514,7 @@ func TestIdentifyPushOnAddrChange(t *testing.T) { require.True(t, ma.Contains(h1.Peerstore().Addrs(h2p), lad)) // change addr on host2 again - lad2 := ma.StringCast("/ip4/127.0.0.1/tcp/1236") + lad2 := tStringCast("/ip4/127.0.0.1/tcp/1236") require.NoError(t, h2.Network().Listen(lad2)) require.Contains(t, h2.Addrs(), lad2) emitAddrChangeEvt(t, h2) @@ -776,7 +781,7 @@ func TestLargePushMessage(t *testing.T) { testKnowsAddrs(t, h2, h1p, h1.Peerstore().Addrs(h1p)) // change addr on host 1 and ensure host2 gets a push - lad := ma.StringCast("/ip4/127.0.0.1/tcp/1234") + lad := tStringCast("/ip4/127.0.0.1/tcp/1234") require.NoError(t, h1.Network().Listen(lad)) require.Contains(t, h1.Addrs(), lad) emitAddrChangeEvt(t, h1) @@ -786,7 +791,7 @@ func TestLargePushMessage(t *testing.T) { }, time.Second, 10*time.Millisecond) // change addr on host2 and ensure host 1 gets a pus - lad = ma.StringCast("/ip4/127.0.0.1/tcp/1235") + lad = tStringCast("/ip4/127.0.0.1/tcp/1235") require.NoError(t, h2.Network().Listen(lad)) require.Contains(t, h2.Addrs(), lad) emitAddrChangeEvt(t, h2) @@ -796,7 +801,7 @@ func TestLargePushMessage(t *testing.T) { }, time.Second, 10*time.Millisecond) // change addr on host2 again - lad2 := ma.StringCast("/ip4/127.0.0.1/tcp/1236") + lad2 := tStringCast("/ip4/127.0.0.1/tcp/1236") require.NoError(t, h2.Network().Listen(lad2)) require.Contains(t, h2.Addrs(), lad2) emitAddrChangeEvt(t, h2) @@ -903,7 +908,7 @@ func TestOutOfOrderConnectedNotifs(t *testing.T) { h1, err := libp2p.New(libp2p.NoListenAddrs) require.NoError(t, err) defer h1.Close() - h2, err := libp2p.New(libp2p.ListenAddrs(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"))) + h2, err := libp2p.New(libp2p.ListenAddrs(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1"))) require.NoError(t, err) defer h2.Close() diff --git a/go-libp2p/p2p/protocol/identify/obsaddr.go b/go-libp2p/p2p/protocol/identify/obsaddr.go index 4437c4b..254842a 100644 --- a/go-libp2p/p2p/protocol/identify/obsaddr.go +++ b/go-libp2p/p2p/protocol/identify/obsaddr.go @@ -40,7 +40,7 @@ type thinWaistWithCount struct { func thinWaistForm(a ma.Multiaddr) (thinWaist, error) { i := 0 - tw, rest := ma.SplitFunc(a, func(c ma.Component) bool { + tw, rest, err := ma.SplitFunc(a, func(c ma.Component) bool { if i > 1 { return true } @@ -60,6 +60,9 @@ func thinWaistForm(a ma.Multiaddr) (thinWaist, error) { } return false }) + if err != nil { + return thinWaist{}, err + } if i <= 1 { return thinWaist{}, fmt.Errorf("not a thinwaist address: %s", a) } diff --git a/go-libp2p/p2p/protocol/identify/obsaddr_glass_test.go b/go-libp2p/p2p/protocol/identify/obsaddr_glass_test.go index 31fd4f5..9ded67a 100644 --- a/go-libp2p/p2p/protocol/identify/obsaddr_glass_test.go +++ b/go-libp2p/p2p/protocol/identify/obsaddr_glass_test.go @@ -36,17 +36,17 @@ func (c *mockConn) IsClosed() bool { } func TestShouldRecordObservationWithWebTransport(t *testing.T) { - listenAddr := ma.StringCast("/ip4/0.0.0.0/udp/0/quic-v1/webtransport/certhash/uEgNmb28") - ifaceAddr := ma.StringCast("/ip4/10.0.0.2/udp/9999/quic-v1/webtransport/certhash/uEgNmb28") + listenAddr := tStringCast("/ip4/0.0.0.0/udp/0/quic-v1/webtransport/certhash/uEgNmb28") + ifaceAddr := tStringCast("/ip4/10.0.0.2/udp/9999/quic-v1/webtransport/certhash/uEgNmb28") listenAddrs := func() []ma.Multiaddr { return []ma.Multiaddr{listenAddr} } ifaceListenAddrs := func() ([]ma.Multiaddr, error) { return []ma.Multiaddr{ifaceAddr}, nil } addrs := func() []ma.Multiaddr { return []ma.Multiaddr{listenAddr} } c := &mockConn{ local: listenAddr, - remote: ma.StringCast("/ip4/1.2.3.6/udp/1236/quic-v1/webtransport"), + remote: tStringCast("/ip4/1.2.3.6/udp/1236/quic-v1/webtransport"), } - observedAddr := ma.StringCast("/ip4/1.2.3.4/udp/1231/quic-v1/webtransport") + observedAddr := tStringCast("/ip4/1.2.3.4/udp/1231/quic-v1/webtransport") o, err := NewObservedAddrManager(listenAddrs, addrs, ifaceListenAddrs, normalize) require.NoError(t, err) shouldRecord, _, _ := o.shouldRecordObservation(c, observedAddr) @@ -54,10 +54,10 @@ func TestShouldRecordObservationWithWebTransport(t *testing.T) { } func TestShouldRecordObservationWithNAT64Addr(t *testing.T) { - listenAddr1 := ma.StringCast("/ip4/0.0.0.0/tcp/1234") - ifaceAddr1 := ma.StringCast("/ip4/10.0.0.2/tcp/4321") - listenAddr2 := ma.StringCast("/ip6/::/tcp/1234") - ifaceAddr2 := ma.StringCast("/ip6/1::1/tcp/4321") + listenAddr1 := tStringCast("/ip4/0.0.0.0/tcp/1234") + ifaceAddr1 := tStringCast("/ip4/10.0.0.2/tcp/4321") + listenAddr2 := tStringCast("/ip6/::/tcp/1234") + ifaceAddr2 := tStringCast("/ip6/1::1/tcp/4321") var ( listenAddrs = func() []ma.Multiaddr { return []ma.Multiaddr{listenAddr1, listenAddr2} } @@ -66,7 +66,7 @@ func TestShouldRecordObservationWithNAT64Addr(t *testing.T) { ) c := &mockConn{ local: listenAddr1, - remote: ma.StringCast("/ip4/1.2.3.6/tcp/4321"), + remote: tStringCast("/ip4/1.2.3.6/tcp/4321"), } cases := []struct { @@ -75,17 +75,17 @@ func TestShouldRecordObservationWithNAT64Addr(t *testing.T) { failureReason string }{ { - addr: ma.StringCast("/ip4/1.2.3.4/tcp/1234"), + addr: tStringCast("/ip4/1.2.3.4/tcp/1234"), want: true, failureReason: "IPv4 should be observed", }, { - addr: ma.StringCast("/ip6/1::4/tcp/1234"), + addr: tStringCast("/ip6/1::4/tcp/1234"), want: true, failureReason: "public IPv6 address should be observed", }, { - addr: ma.StringCast("/ip6/64:ff9b::192.0.1.2/tcp/1234"), + addr: tStringCast("/ip6/64:ff9b::192.0.1.2/tcp/1234"), want: false, failureReason: "NAT64 IPv6 address shouldn't be observed", }, @@ -138,17 +138,17 @@ func TestThinWaistForm(t *testing.T) { }} for i, tt := range tc { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - inputAddr := ma.StringCast(tt.input) + inputAddr := tStringCast(tt.input) tw, err := thinWaistForm(inputAddr) if tt.err { require.Equal(t, tw, thinWaist{}) require.Error(t, err) return } - wantTW := ma.StringCast(tt.tw) + wantTW := tStringCast(tt.tw) var restTW ma.Multiaddr if tt.rest != "" { - restTW = ma.StringCast(tt.rest) + restTW = tStringCast(tt.rest) } require.Equal(t, tw.Addr, inputAddr, "%s %s", tw.Addr, inputAddr) require.Equal(t, wantTW, tw.TW, "%s %s", tw.TW, wantTW) diff --git a/go-libp2p/p2p/protocol/identify/obsaddr_test.go b/go-libp2p/p2p/protocol/identify/obsaddr_test.go index 9c2d8de..de32a6c 100644 --- a/go-libp2p/p2p/protocol/identify/obsaddr_test.go +++ b/go-libp2p/p2p/protocol/identify/obsaddr_test.go @@ -24,7 +24,11 @@ func newConn(local, remote ma.Multiaddr) *mockConn { func normalize(addr ma.Multiaddr) ma.Multiaddr { for { - out, last := ma.SplitLast(addr) + out, last, err := ma.SplitLast(addr) + if err != nil { + return nil + } + if last == nil { return addr } @@ -67,12 +71,12 @@ func addrsEqual(a, b []ma.Multiaddr) bool { } func TestObservedAddrManager(t *testing.T) { - tcp4ListenAddr := ma.StringCast("/ip4/192.168.1.100/tcp/1") - quic4ListenAddr := ma.StringCast("/ip4/0.0.0.0/udp/1/quic-v1") - webTransport4ListenAddr := ma.StringCast("/ip4/0.0.0.0/udp/1/quic-v1/webtransport/certhash/uEgNmb28") - tcp6ListenAddr := ma.StringCast("/ip6/2004::1/tcp/1") - quic6ListenAddr := ma.StringCast("/ip6/::/udp/1/quic-v1") - webTransport6ListenAddr := ma.StringCast("/ip6/::/udp/1/quic-v1/webtransport/certhash/uEgNmb28") + tcp4ListenAddr := tStringCast("/ip4/192.168.1.100/tcp/1") + quic4ListenAddr := tStringCast("/ip4/0.0.0.0/udp/1/quic-v1") + webTransport4ListenAddr := tStringCast("/ip4/0.0.0.0/udp/1/quic-v1/webtransport/certhash/uEgNmb28") + tcp6ListenAddr := tStringCast("/ip6/2004::1/tcp/1") + quic6ListenAddr := tStringCast("/ip6/::/udp/1/quic-v1") + webTransport6ListenAddr := tStringCast("/ip6/::/udp/1/quic-v1/webtransport/certhash/uEgNmb28") newObservedAddrMgr := func() *ObservedAddrManager { listenAddrs := []ma.Multiaddr{ tcp4ListenAddr, quic4ListenAddr, webTransport4ListenAddr, tcp6ListenAddr, quic6ListenAddr, webTransport6ListenAddr, @@ -97,11 +101,11 @@ func TestObservedAddrManager(t *testing.T) { t.Run("Single Observation", func(t *testing.T) { o := newObservedAddrMgr() defer o.Close() - observed := ma.StringCast("/ip4/2.2.2.2/tcp/2") - c1 := newConn(tcp4ListenAddr, ma.StringCast("/ip4/1.2.3.1/tcp/1")) - c2 := newConn(tcp4ListenAddr, ma.StringCast("/ip4/1.2.3.2/tcp/1")) - c3 := newConn(tcp4ListenAddr, ma.StringCast("/ip4/1.2.3.3/tcp/1")) - c4 := newConn(tcp4ListenAddr, ma.StringCast("/ip4/1.2.3.4/tcp/1")) + observed := tStringCast("/ip4/2.2.2.2/tcp/2") + c1 := newConn(tcp4ListenAddr, tStringCast("/ip4/1.2.3.1/tcp/1")) + c2 := newConn(tcp4ListenAddr, tStringCast("/ip4/1.2.3.2/tcp/1")) + c3 := newConn(tcp4ListenAddr, tStringCast("/ip4/1.2.3.3/tcp/1")) + c4 := newConn(tcp4ListenAddr, tStringCast("/ip4/1.2.3.4/tcp/1")) o.Record(c1, observed) o.Record(c2, observed) o.Record(c3, observed) @@ -121,12 +125,12 @@ func TestObservedAddrManager(t *testing.T) { t.Run("WebTransport inferred from QUIC", func(t *testing.T) { o := newObservedAddrMgr() defer o.Close() - observedQuic := ma.StringCast("/ip4/2.2.2.2/udp/2/quic-v1") - observedWebTransport := ma.StringCast("/ip4/2.2.2.2/udp/2/quic-v1/webtransport") - c1 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.1/udp/1/quic-v1")) - c2 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.2/udp/1/quic-v1")) - c3 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.3/udp/1/quic-v1/webtransport")) - c4 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport")) + observedQuic := tStringCast("/ip4/2.2.2.2/udp/2/quic-v1") + observedWebTransport := tStringCast("/ip4/2.2.2.2/udp/2/quic-v1/webtransport") + c1 := newConn(quic4ListenAddr, tStringCast("/ip4/1.2.3.1/udp/1/quic-v1")) + c2 := newConn(quic4ListenAddr, tStringCast("/ip4/1.2.3.2/udp/1/quic-v1")) + c3 := newConn(webTransport4ListenAddr, tStringCast("/ip4/1.2.3.3/udp/1/quic-v1/webtransport")) + c4 := newConn(webTransport4ListenAddr, tStringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport")) o.Record(c1, observedQuic) o.Record(c2, observedQuic) o.Record(c3, observedWebTransport) @@ -147,13 +151,13 @@ func TestObservedAddrManager(t *testing.T) { o := newObservedAddrMgr() defer o.Close() - observedQuic := ma.StringCast("/ip4/2.2.2.2/udp/2/quic-v1") + observedQuic := tStringCast("/ip4/2.2.2.2/udp/2/quic-v1") const N = 4 // ActivationThresh var ob1, ob2 [N]connMultiaddrs for i := 0; i < N; i++ { - ob1[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) - ob2[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) + ob1[i] = newConn(quic4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) + ob2[i] = newConn(quic4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) } for i := 0; i < N-1; i++ { o.Record(ob1[i], observedQuic) @@ -190,14 +194,14 @@ func TestObservedAddrManager(t *testing.T) { o := newObservedAddrMgr() defer o.Close() - observedQuic1 := ma.StringCast("/ip4/2.2.2.2/udp/2/quic-v1") - observedQuic2 := ma.StringCast("/ip4/2.2.2.2/udp/3/quic-v1") + observedQuic1 := tStringCast("/ip4/2.2.2.2/udp/2/quic-v1") + observedQuic2 := tStringCast("/ip4/2.2.2.2/udp/3/quic-v1") const N = 4 // ActivationThresh var ob1, ob2 [N]connMultiaddrs for i := 0; i < N; i++ { - ob1[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) - ob2[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) + ob1[i] = newConn(quic4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) + ob2[i] = newConn(quic4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) } for i := 0; i < N-1; i++ { o.Record(ob1[i], observedQuic1) @@ -234,15 +238,15 @@ func TestObservedAddrManager(t *testing.T) { t.Run("Old observations discarded", func(t *testing.T) { o := newObservedAddrMgr() defer o.Close() - c1 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.1/udp/1/quic-v1")) - c2 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.2/udp/1/quic-v1")) - c3 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.3/udp/1/quic-v1/webtransport")) - c4 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport")) + c1 := newConn(quic4ListenAddr, tStringCast("/ip4/1.2.3.1/udp/1/quic-v1")) + c2 := newConn(quic4ListenAddr, tStringCast("/ip4/1.2.3.2/udp/1/quic-v1")) + c3 := newConn(webTransport4ListenAddr, tStringCast("/ip4/1.2.3.3/udp/1/quic-v1/webtransport")) + c4 := newConn(webTransport4ListenAddr, tStringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport")) var observedQuic, observedWebTransport ma.Multiaddr for i := 0; i < 10; i++ { // Change the IP address in each observation - observedQuic = ma.StringCast(fmt.Sprintf("/ip4/2.2.2.%d/udp/2/quic-v1", i)) - observedWebTransport = ma.StringCast(fmt.Sprintf("/ip4/2.2.2.%d/udp/2/quic-v1/webtransport", i)) + observedQuic = tStringCast(fmt.Sprintf("/ip4/2.2.2.%d/udp/2/quic-v1", i)) + observedWebTransport = tStringCast(fmt.Sprintf("/ip4/2.2.2.%d/udp/2/quic-v1/webtransport", i)) o.Record(c1, observedQuic) o.Record(c2, observedQuic) o.Record(c3, observedWebTransport) @@ -276,17 +280,17 @@ func TestObservedAddrManager(t *testing.T) { const N = 100 var tcpConns, quicConns, webTransportConns [N]*mockConn for i := 0; i < N; i++ { - tcpConns[i] = newConn(tcp4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/tcp/1", i))) - quicConns[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) - webTransportConns[i] = newConn(webTransport4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i))) + tcpConns[i] = newConn(tcp4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/tcp/1", i))) + quicConns[i] = newConn(quic4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) + webTransportConns[i] = newConn(webTransport4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i))) } var observedQuic, observedWebTransport, observedTCP ma.Multiaddr for i := 0; i < N; i++ { for j := 0; j < 5; j++ { // ip addr has the form 2.2.. - observedQuic = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1", i/10, j)) - observedWebTransport = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1/webtransport", i/10, j)) - observedTCP = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.%d/tcp/2", i/10, j)) + observedQuic = tStringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1", i/10, j)) + observedWebTransport = tStringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1/webtransport", i/10, j)) + observedTCP = tStringCast(fmt.Sprintf("/ip4/2.2.%d.%d/tcp/2", i/10, j)) o.Record(tcpConns[i], observedTCP) o.Record(quicConns[i], observedQuic) o.Record(webTransportConns[i], observedWebTransport) @@ -307,9 +311,9 @@ func TestObservedAddrManager(t *testing.T) { // Now we bias a few address counts and check for sorting correctness var resTCPAddrs, resQuicAddrs, resWebTransportAddrs [maxExternalThinWaistAddrsPerLocalAddr]ma.Multiaddr for i := 0; i < maxExternalThinWaistAddrsPerLocalAddr; i++ { - resTCPAddrs[i] = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.4/tcp/2", 9-i)) - resQuicAddrs[i] = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.4/udp/2/quic-v1", 9-i)) - resWebTransportAddrs[i] = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.4/udp/2/quic-v1/webtransport", 9-i)) + resTCPAddrs[i] = tStringCast(fmt.Sprintf("/ip4/2.2.%d.4/tcp/2", 9-i)) + resQuicAddrs[i] = tStringCast(fmt.Sprintf("/ip4/2.2.%d.4/udp/2/quic-v1", 9-i)) + resWebTransportAddrs[i] = tStringCast(fmt.Sprintf("/ip4/2.2.%d.4/udp/2/quic-v1/webtransport", 9-i)) o.Record(tcpConns[i], resTCPAddrs[i]) o.Record(quicConns[i], resQuicAddrs[i]) o.Record(webTransportConns[i], resWebTransportAddrs[i]) @@ -335,11 +339,11 @@ func TestObservedAddrManager(t *testing.T) { t.Run("WebTransport certhash", func(t *testing.T) { o := newObservedAddrMgr() - observedWebTransport := ma.StringCast("/ip4/2.2.2.2/udp/1/quic-v1/webtransport") - c1 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.1/udp/1/quic-v1/webtransport")) - c2 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.2/udp/1/quic-v1/webtransport")) - c3 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.3/udp/1/quic-v1/webtransport")) - c4 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport")) + observedWebTransport := tStringCast("/ip4/2.2.2.2/udp/1/quic-v1/webtransport") + c1 := newConn(webTransport4ListenAddr, tStringCast("/ip4/1.2.3.1/udp/1/quic-v1/webtransport")) + c2 := newConn(webTransport4ListenAddr, tStringCast("/ip4/1.2.3.2/udp/1/quic-v1/webtransport")) + c3 := newConn(webTransport4ListenAddr, tStringCast("/ip4/1.2.3.3/udp/1/quic-v1/webtransport")) + c4 := newConn(webTransport4ListenAddr, tStringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport")) o.Record(c1, observedWebTransport) o.Record(c2, observedWebTransport) o.Record(c3, observedWebTransport) @@ -360,10 +364,10 @@ func TestObservedAddrManager(t *testing.T) { o := newObservedAddrMgr() defer o.Close() - observedWebTransport := ma.StringCast("/ip4/2.2.2.2/udp/1/quic-v1/webtransport") + observedWebTransport := tStringCast("/ip4/2.2.2.2/udp/1/quic-v1/webtransport") var udpConns [5 * maxExternalThinWaistAddrsPerLocalAddr]connMultiaddrs for i := 0; i < len(udpConns); i++ { - udpConns[i] = newConn(webTransport4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i))) + udpConns[i] = newConn(webTransport4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i))) o.Record(udpConns[i], observedWebTransport) time.Sleep(10 * time.Millisecond) } @@ -381,14 +385,14 @@ func TestObservedAddrManager(t *testing.T) { const N = 100 var tcpConns, quicConns [N]*mockConn for i := 0; i < N; i++ { - tcpConns[i] = newConn(tcp4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/tcp/1", i))) - quicConns[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) + tcpConns[i] = newConn(tcp4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/tcp/1", i))) + quicConns[i] = newConn(quic4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) } var observedQuic, observedTCP ma.Multiaddr for i := 0; i < N; i++ { // ip addr has the form 2.2..2 - observedQuic = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.2/udp/2/quic-v1", i%20)) - observedTCP = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.2/tcp/2", i%20)) + observedQuic = tStringCast(fmt.Sprintf("/ip4/2.2.%d.2/udp/2/quic-v1", i%20)) + observedTCP = tStringCast(fmt.Sprintf("/ip4/2.2.%d.2/tcp/2", i%20)) o.Record(tcpConns[i], observedTCP) o.Record(quicConns[i], observedQuic) time.Sleep(10 * time.Millisecond) @@ -415,7 +419,7 @@ func TestObservedAddrManager(t *testing.T) { o := newObservedAddrMgr() defer o.Close() o.maybeRecordObservation(nil, nil) - remoteAddr := ma.StringCast("/ip4/1.2.3.4/tcp/1") + remoteAddr := tStringCast("/ip4/1.2.3.4/tcp/1") o.maybeRecordObservation(newConn(tcp4ListenAddr, remoteAddr), nil) o.maybeRecordObservation(nil, remoteAddr) o.AddrsFor(nil) @@ -442,10 +446,10 @@ func TestObservedAddrManager(t *testing.T) { sub, err := bus.Subscribe(new(event.EvtNATDeviceTypeChanged)) require.NoError(t, err) - observedWebTransport := ma.StringCast("/ip4/2.2.2.2/udp/1/quic-v1/webtransport") + observedWebTransport := tStringCast("/ip4/2.2.2.2/udp/1/quic-v1/webtransport") var udpConns [5 * maxExternalThinWaistAddrsPerLocalAddr]connMultiaddrs for i := 0; i < len(udpConns); i++ { - udpConns[i] = newConn(webTransport4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i))) + udpConns[i] = newConn(webTransport4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i))) o.Record(udpConns[i], observedWebTransport) time.Sleep(10 * time.Millisecond) } @@ -470,27 +474,27 @@ func TestObservedAddrManager(t *testing.T) { var tcp4Conns, quic4Conns, webTransport4Conns [N]*mockConn var tcp6Conns, quic6Conns, webTransport6Conns [N]*mockConn for i := 0; i < N; i++ { - tcp4Conns[i] = newConn(tcp4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/tcp/1", i))) - quic4Conns[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) - webTransport4Conns[i] = newConn(webTransport4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i))) + tcp4Conns[i] = newConn(tcp4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/tcp/1", i))) + quic4Conns[i] = newConn(quic4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i))) + webTransport4Conns[i] = newConn(webTransport4ListenAddr, tStringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i))) - tcp6Conns[i] = newConn(tcp6ListenAddr, ma.StringCast(fmt.Sprintf("/ip6/20%02x::/tcp/1", i))) - quic6Conns[i] = newConn(quic6ListenAddr, ma.StringCast(fmt.Sprintf("/ip6/20%02x::/udp/1/quic-v1", i))) - webTransport6Conns[i] = newConn(webTransport6ListenAddr, ma.StringCast(fmt.Sprintf("/ip6/20%02x::/udp/1/quic-v1/webtransport", i))) + tcp6Conns[i] = newConn(tcp6ListenAddr, tStringCast(fmt.Sprintf("/ip6/20%02x::/tcp/1", i))) + quic6Conns[i] = newConn(quic6ListenAddr, tStringCast(fmt.Sprintf("/ip6/20%02x::/udp/1/quic-v1", i))) + webTransport6Conns[i] = newConn(webTransport6ListenAddr, tStringCast(fmt.Sprintf("/ip6/20%02x::/udp/1/quic-v1/webtransport", i))) } var observedQUIC4, observedWebTransport4, observedTCP4 ma.Multiaddr var observedQUIC6, observedWebTransport6, observedTCP6 ma.Multiaddr for i := 0; i < N; i++ { for j := 0; j < 5; j++ { // ip addr has the form 2.2.. - observedQUIC4 = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1", i/10, j)) - observedWebTransport4 = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1/webtransport", i/10, j)) - observedTCP4 = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.%d/tcp/2", i/10, j)) + observedQUIC4 = tStringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1", i/10, j)) + observedWebTransport4 = tStringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1/webtransport", i/10, j)) + observedTCP4 = tStringCast(fmt.Sprintf("/ip4/2.2.%d.%d/tcp/2", i/10, j)) // ip addr has the form 20XX::YY - observedQUIC6 = ma.StringCast(fmt.Sprintf("/ip6/20%02x::%02x/udp/2/quic-v1", i/10, j)) - observedWebTransport6 = ma.StringCast(fmt.Sprintf("/ip6/20%02x::%02x/udp/2/quic-v1/webtransport", i/10, j)) - observedTCP6 = ma.StringCast(fmt.Sprintf("/ip6/20%02x::%02x/tcp/2", i/10, j)) + observedQUIC6 = tStringCast(fmt.Sprintf("/ip6/20%02x::%02x/udp/2/quic-v1", i/10, j)) + observedWebTransport6 = tStringCast(fmt.Sprintf("/ip6/20%02x::%02x/udp/2/quic-v1/webtransport", i/10, j)) + observedTCP6 = tStringCast(fmt.Sprintf("/ip6/20%02x::%02x/tcp/2", i/10, j)) o.maybeRecordObservation(tcp4Conns[i], observedTCP4) o.maybeRecordObservation(quic4Conns[i], observedQUIC4) @@ -516,18 +520,18 @@ func TestObservedAddrManager(t *testing.T) { var resTCPAddrs, resQuicAddrs, resWebTransportAddrs []ma.Multiaddr for i, idx := 0, 0; i < maxExternalThinWaistAddrsPerLocalAddr; i++ { - resTCPAddrs = append(resTCPAddrs, ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.4/tcp/2", 9-i))) - resQuicAddrs = append(resQuicAddrs, ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.4/udp/2/quic-v1", 9-i))) - resWebTransportAddrs = append(resWebTransportAddrs, ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.4/udp/2/quic-v1/webtransport", 9-i))) + resTCPAddrs = append(resTCPAddrs, tStringCast(fmt.Sprintf("/ip4/2.2.%d.4/tcp/2", 9-i))) + resQuicAddrs = append(resQuicAddrs, tStringCast(fmt.Sprintf("/ip4/2.2.%d.4/udp/2/quic-v1", 9-i))) + resWebTransportAddrs = append(resWebTransportAddrs, tStringCast(fmt.Sprintf("/ip4/2.2.%d.4/udp/2/quic-v1/webtransport", 9-i))) o.maybeRecordObservation(tcp4Conns[i], resTCPAddrs[idx]) o.maybeRecordObservation(quic4Conns[i], resQuicAddrs[idx]) o.maybeRecordObservation(webTransport4Conns[i], resWebTransportAddrs[idx]) idx++ - resTCPAddrs = append(resTCPAddrs, ma.StringCast(fmt.Sprintf("/ip6/20%02x::04/tcp/2", 9-i))) - resQuicAddrs = append(resQuicAddrs, ma.StringCast(fmt.Sprintf("/ip6/20%02x::04/udp/2/quic-v1", 9-i))) - resWebTransportAddrs = append(resWebTransportAddrs, ma.StringCast(fmt.Sprintf("/ip6/20%02x::04/udp/2/quic-v1/webtransport", 9-i))) + resTCPAddrs = append(resTCPAddrs, tStringCast(fmt.Sprintf("/ip6/20%02x::04/tcp/2", 9-i))) + resQuicAddrs = append(resQuicAddrs, tStringCast(fmt.Sprintf("/ip6/20%02x::04/udp/2/quic-v1", 9-i))) + resWebTransportAddrs = append(resWebTransportAddrs, tStringCast(fmt.Sprintf("/ip6/20%02x::04/udp/2/quic-v1/webtransport", 9-i))) o.maybeRecordObservation(tcp6Conns[i], resTCPAddrs[idx]) o.maybeRecordObservation(quic6Conns[i], resQuicAddrs[idx]) o.maybeRecordObservation(webTransport6Conns[i], resWebTransportAddrs[idx]) @@ -574,12 +578,12 @@ func FuzzObservedAddrManager(f *testing.F) { "/quic-v1", "/quic-v1/webtransport", } - tcp4 := ma.StringCast("/ip4/192.168.1.100/tcp/1") - quic4 := ma.StringCast("/ip4/0.0.0.0/udp/1/quic-v1") - wt4 := ma.StringCast("/ip4/0.0.0.0/udp/1/quic-v1/webtransport/certhash/uEgNmb28") - tcp6 := ma.StringCast("/ip6/1::1/tcp/1") - quic6 := ma.StringCast("/ip6/::/udp/1/quic-v1") - wt6 := ma.StringCast("/ip6/::/udp/1/quic-v1/webtransport/certhash/uEgNmb28") + tcp4 := tStringCast("/ip4/192.168.1.100/tcp/1") + quic4 := tStringCast("/ip4/0.0.0.0/udp/1/quic-v1") + wt4 := tStringCast("/ip4/0.0.0.0/udp/1/quic-v1/webtransport/certhash/uEgNmb28") + tcp6 := tStringCast("/ip6/1::1/tcp/1") + quic6 := tStringCast("/ip6/::/udp/1/quic-v1") + wt6 := tStringCast("/ip6/::/udp/1/quic-v1/webtransport/certhash/uEgNmb28") newObservedAddrMgr := func() *ObservedAddrManager { listenAddrs := []ma.Multiaddr{ tcp4, quic4, wt4, tcp6, quic6, wt6, @@ -602,15 +606,15 @@ func FuzzObservedAddrManager(f *testing.F) { addrs := []ma.Multiaddr{genIPMultiaddr(true), genIPMultiaddr(false)} n := len(addrs) for i := 0; i < n; i++ { - addrs = append(addrs, addrs[i].Encapsulate(ma.StringCast(fmt.Sprintf("/tcp/%d", port)))) - addrs = append(addrs, addrs[i].Encapsulate(ma.StringCast(fmt.Sprintf("/udp/%d", port)))) - addrs = append(addrs, ma.StringCast(fmt.Sprintf("/tcp/%d", port))) - addrs = append(addrs, ma.StringCast(fmt.Sprintf("/udp/%d", port))) + addrs = append(addrs, addrs[i].Encapsulate(tStringCast(fmt.Sprintf("/tcp/%d", port)))) + addrs = append(addrs, addrs[i].Encapsulate(tStringCast(fmt.Sprintf("/udp/%d", port)))) + addrs = append(addrs, tStringCast(fmt.Sprintf("/tcp/%d", port))) + addrs = append(addrs, tStringCast(fmt.Sprintf("/udp/%d", port))) } n = len(addrs) for i := 0; i < n; i++ { for j := 0; j < len(protos); j++ { - protoAddr := ma.StringCast(protos[j]) + protoAddr := tStringCast(protos[j]) addrs = append(addrs, addrs[i].Encapsulate(protoAddr)) addrs = append(addrs, protoAddr) } @@ -635,19 +639,19 @@ func TestObserver(t *testing.T) { want string }{ { - addr: ma.StringCast("/ip4/1.2.3.4/tcp/1"), + addr: tStringCast("/ip4/1.2.3.4/tcp/1"), want: "1.2.3.4", }, { - addr: ma.StringCast("/ip4/192.168.0.1/tcp/1"), + addr: tStringCast("/ip4/192.168.0.1/tcp/1"), want: "192.168.0.1", }, { - addr: ma.StringCast("/ip6/200::1/udp/1/quic-v1"), + addr: tStringCast("/ip6/200::1/udp/1/quic-v1"), want: "200::", }, { - addr: ma.StringCast("/ip6/::1/udp/1/quic-v1"), + addr: tStringCast("/ip6/::1/udp/1/quic-v1"), want: "::", }, } diff --git a/go-libp2p/p2p/protocol/identify/snapshot_test.go b/go-libp2p/p2p/protocol/identify/snapshot_test.go index 55354a4..55cc559 100644 --- a/go-libp2p/p2p/protocol/identify/snapshot_test.go +++ b/go-libp2p/p2p/protocol/identify/snapshot_test.go @@ -13,8 +13,8 @@ import ( ) func TestSnapshotEquality(t *testing.T) { - addr1 := ma.StringCast("/ip4/127.0.0.1/tcp/1234") - addr2 := ma.StringCast("/ip4/127.0.0.1/udp/1234/quic-v1") + addr1 := tStringCast("/ip4/127.0.0.1/tcp/1234") + addr2 := tStringCast("/ip4/127.0.0.1/udp/1234/quic-v1") _, pubKey1, err := crypto.GenerateEd25519Key(rand.Reader) require.NoError(t, err) diff --git a/go-libp2p/p2p/test/basichost/basic_host_test.go b/go-libp2p/p2p/test/basichost/basic_host_test.go index 9cd442d..2f5a243 100644 --- a/go-libp2p/p2p/test/basichost/basic_host_test.go +++ b/go-libp2p/p2p/test/basichost/basic_host_test.go @@ -20,6 +20,11 @@ import ( "github.com/stretchr/testify/require" ) +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func TestNoStreamOverTransientConnection(t *testing.T) { h1, err := libp2p.New( libp2p.NoListenAddrs, @@ -61,7 +66,7 @@ func TestNoStreamOverTransientConnection(t *testing.T) { _, err = client.Reserve(context.Background(), h2, relay1info) require.NoError(t, err) - relayaddr := ma.StringCast("/p2p/" + relay1info.ID.String() + "/p2p-circuit/p2p/" + h2.ID().String()) + relayaddr := tStringCast("/p2p/" + relay1info.ID.String() + "/p2p-circuit/p2p/" + h2.ID().String()) h2Info := peer.AddrInfo{ ID: h2.ID(), @@ -122,7 +127,7 @@ func TestNewStreamTransientConnection(t *testing.T) { _, err = client.Reserve(context.Background(), h2, relay1info) require.NoError(t, err) - relayaddr := ma.StringCast("/p2p/" + relay1info.ID.String() + "/p2p-circuit/p2p/" + h2.ID().String()) + relayaddr := tStringCast("/p2p/" + relay1info.ID.String() + "/p2p-circuit/p2p/" + h2.ID().String()) h1.Peerstore().AddAddr(h2.ID(), relayaddr, peerstore.TempAddrTTL) @@ -167,8 +172,8 @@ func TestAddrFactorCertHashAppend(t *testing.T) { webrtcAddr := "/ip4/1.2.3.4/udp/2/webrtc-direct" addrsFactory := func(addrs []ma.Multiaddr) []ma.Multiaddr { return append(addrs, - ma.StringCast(wtAddr), - ma.StringCast(webrtcAddr), + tStringCast(wtAddr), + tStringCast(webrtcAddr), ) } h, err := libp2p.New( diff --git a/go-libp2p/p2p/test/notifications/notification_test.go b/go-libp2p/p2p/test/notifications/notification_test.go index eb26a3f..a9d4675 100644 --- a/go-libp2p/p2p/test/notifications/notification_test.go +++ b/go-libp2p/p2p/test/notifications/notification_test.go @@ -14,6 +14,11 @@ import ( "github.com/stretchr/testify/require" ) +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func portFromString(t *testing.T, s string) int { t.Helper() p, err := strconv.ParseInt(s, 10, 32) @@ -54,7 +59,7 @@ func TestListenAddressNotif(t *testing.T) { require.Equal(t, []ma.Multiaddr{initialAddr}, listenAddrs) // now start listening on another address - require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"))) + require.NoError(t, h.Network().Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1"))) var addedAddr ma.Multiaddr select { case e := <-sub.Out(): diff --git a/go-libp2p/p2p/test/swarm/swarm_test.go b/go-libp2p/p2p/test/swarm/swarm_test.go index 10298f5..d92420a 100644 --- a/go-libp2p/p2p/test/swarm/swarm_test.go +++ b/go-libp2p/p2p/test/swarm/swarm_test.go @@ -19,6 +19,11 @@ import ( "github.com/stretchr/testify/require" ) +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func TestDialPeerTransientConnection(t *testing.T) { h1, err := libp2p.New( libp2p.NoListenAddrs, @@ -51,7 +56,7 @@ func TestDialPeerTransientConnection(t *testing.T) { _, err = client.Reserve(context.Background(), h2, relay1info) require.NoError(t, err) - relayaddr := ma.StringCast("/p2p/" + relay1info.ID.String() + "/p2p-circuit/p2p/" + h2.ID().String()) + relayaddr := tStringCast("/p2p/" + relay1info.ID.String() + "/p2p-circuit/p2p/" + h2.ID().String()) h1.Peerstore().AddAddr(h2.ID(), relayaddr, peerstore.TempAddrTTL) @@ -106,7 +111,7 @@ func TestNewStreamTransientConnection(t *testing.T) { _, err = client.Reserve(context.Background(), h2, relay1info) require.NoError(t, err) - relayaddr := ma.StringCast("/p2p/" + relay1info.ID.String() + "/p2p-circuit/p2p/" + h2.ID().String()) + relayaddr := tStringCast("/p2p/" + relay1info.ID.String() + "/p2p-circuit/p2p/" + h2.ID().String()) h1.Peerstore().AddAddr(h2.ID(), relayaddr, peerstore.TempAddrTTL) diff --git a/go-libp2p/p2p/test/transport/gating_test.go b/go-libp2p/p2p/test/transport/gating_test.go index df53da6..3881c1e 100644 --- a/go-libp2p/p2p/test/transport/gating_test.go +++ b/go-libp2p/p2p/test/transport/gating_test.go @@ -25,7 +25,7 @@ func stripCertHash(addr ma.Multiaddr) ma.Multiaddr { if _, err := addr.ValueForProtocol(ma.P_CERTHASH); err != nil { break } - addr, _ = ma.SplitLast(addr) + addr, _, _ = ma.SplitLast(addr) } return addr } diff --git a/go-libp2p/p2p/test/webtransport/webtransport_test.go b/go-libp2p/p2p/test/webtransport/webtransport_test.go index e9c612b..089e678 100644 --- a/go-libp2p/p2p/test/webtransport/webtransport_test.go +++ b/go-libp2p/p2p/test/webtransport/webtransport_test.go @@ -15,7 +15,10 @@ import ( func extractCertHashes(addr ma.Multiaddr) []string { var certHashesStr []string - ma.ForEach(addr, func(c ma.Component) bool { + ma.ForEach(addr, func(c ma.Component, e error) bool { + if e != nil { + return false + } if c.Protocol().Code == ma.P_CERTHASH { certHashesStr = append(certHashesStr, c.Value()) } @@ -24,6 +27,11 @@ func extractCertHashes(addr ma.Multiaddr) []string { return certHashesStr } +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func TestDeterministicCertsAfterReboot(t *testing.T) { priv, _, err := test.RandTestKeyPair(ic.Ed25519, 256) require.NoError(t, err) @@ -33,7 +41,7 @@ func TestDeterministicCertsAfterReboot(t *testing.T) { cl.Add(time.Hour * 24 * 365) h, err := libp2p.New(libp2p.NoTransports, libp2p.Transport(libp2pwebtransport.New, libp2pwebtransport.WithClock(cl)), libp2p.Identity(priv)) require.NoError(t, err) - err = h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + err = h.Network().Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) prevCerthashes := extractCertHashes(h.Addrs()[0]) @@ -42,7 +50,7 @@ func TestDeterministicCertsAfterReboot(t *testing.T) { h, err = libp2p.New(libp2p.NoTransports, libp2p.Transport(libp2pwebtransport.New, libp2pwebtransport.WithClock(cl)), libp2p.Identity(priv)) require.NoError(t, err) defer h.Close() - err = h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + err = h.Network().Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) nextCertHashes := extractCertHashes(h.Addrs()[0]) diff --git a/go-libp2p/p2p/transport/quic/cmd/lib/lib_test.go b/go-libp2p/p2p/transport/quic/cmd/lib/lib_test.go index 1058b07..8ce8ce8 100644 --- a/go-libp2p/p2p/transport/quic/cmd/lib/lib_test.go +++ b/go-libp2p/p2p/transport/quic/cmd/lib/lib_test.go @@ -13,7 +13,7 @@ func TestCmd(t *testing.T) { l := <-serverLocation - ip, rest := multiaddr.SplitFirst(l.Addrs[0]) + ip, rest, _ := multiaddr.SplitFirst(l.Addrs[0]) if ip.Protocol().Code == multiaddr.P_IP4 && ip.Value() == "0.0.0.0" { // Windows can't dial to 0.0.0.0 so replace with localhost var err error diff --git a/go-libp2p/p2p/transport/quic/conn_test.go b/go-libp2p/p2p/transport/quic/conn_test.go index d3e27a7..0124021 100644 --- a/go-libp2p/p2p/transport/quic/conn_test.go +++ b/go-libp2p/p2p/transport/quic/conn_test.go @@ -39,6 +39,11 @@ var connTestCases = []*connTestCase{ {"reuseport_off", []quicreuse.Option{quicreuse.DisableReuseport()}}, } +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func createPeer(t *testing.T) (peer.ID, ic.PrivKey) { var priv ic.PrivKey var err error @@ -62,7 +67,7 @@ func createPeer(t *testing.T) (peer.ID, ic.PrivKey) { func runServer(t *testing.T, tr tpt.Transport, addr string) tpt.Listener { t.Helper() - ln, err := tr.Listen(ma.StringCast(addr)) + ln, err := tr.Listen(tStringCast(addr)) require.NoError(t, err) return ln } @@ -143,7 +148,7 @@ func testResourceManagerSuccess(t *testing.T, tc *connTestCase) { serverTransport, err := NewTransport(serverKey, newConnManager(t, tc.Options...), nil, nil, serverRcmgr) require.NoError(t, err) defer serverTransport.(io.Closer).Close() - ln, err := serverTransport.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")) + ln, err := serverTransport.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1")) require.NoError(t, err) defer ln.Close() @@ -194,7 +199,7 @@ func testResourceManagerDialDenied(t *testing.T, tc *connTestCase) { defer clientTransport.(io.Closer).Close() connScope := mocknetwork.NewMockConnManagementScope(ctrl) - target := ma.StringCast("/ip4/127.0.0.1/udp/1234/quic-v1") + target := tStringCast("/ip4/127.0.0.1/udp/1234/quic-v1") rcmgr.EXPECT().OpenConnection(network.DirOutbound, false, target).Return(connScope, nil) rerr := errors.New("nope") @@ -237,7 +242,7 @@ func testResourceManagerAcceptDenied(t *testing.T, tc *connTestCase) { serverTransport, err := NewTransport(serverKey, newConnManager(t, tc.Options...), nil, nil, serverRcmgr) require.NoError(t, err) defer serverTransport.(io.Closer).Close() - ln, err := serverTransport.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")) + ln, err := serverTransport.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1")) require.NoError(t, err) defer ln.Close() connChan := make(chan tpt.CapableConn) @@ -573,7 +578,7 @@ func testStatelessReset(t *testing.T, tc *connTestCase) { proxy.Close() // Start another listener (on a different port). - ln, err = serverTransport.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")) + ln, err = serverTransport.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1")) require.NoError(t, err) defer ln.Close() // Now that the new server is up, re-enable packet forwarding. diff --git a/go-libp2p/p2p/transport/quic/listener_test.go b/go-libp2p/p2p/transport/quic/listener_test.go index d739c82..6454e95 100644 --- a/go-libp2p/p2p/transport/quic/listener_test.go +++ b/go-libp2p/p2p/transport/quic/listener_test.go @@ -34,7 +34,7 @@ func TestListenAddr(t *testing.T) { defer tr.(io.Closer).Close() t.Run("for IPv4", func(t *testing.T) { - localAddrV1 := ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1") + localAddrV1 := tStringCast("/ip4/127.0.0.1/udp/0/quic-v1") ln, err := tr.Listen(localAddrV1) require.NoError(t, err) defer ln.Close() @@ -49,7 +49,7 @@ func TestListenAddr(t *testing.T) { }) t.Run("for IPv6", func(t *testing.T) { - localAddrV1 := ma.StringCast("/ip6/::/udp/0/quic-v1") + localAddrV1 := tStringCast("/ip6/::/udp/0/quic-v1") ln, err := tr.Listen(localAddrV1) require.NoError(t, err) defer ln.Close() @@ -66,7 +66,7 @@ func TestListenAddr(t *testing.T) { func TestAccepting(t *testing.T) { tr := newTransport(t, nil) defer tr.(io.Closer).Close() - ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")) + ln, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1")) require.NoError(t, err) done := make(chan struct{}) go func() { @@ -90,7 +90,7 @@ func TestAccepting(t *testing.T) { func TestAcceptAfterClose(t *testing.T) { tr := newTransport(t, nil) defer tr.(io.Closer).Close() - ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")) + ln, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1")) require.NoError(t, err) require.NoError(t, ln.Close()) _, err = ln.Accept() @@ -102,7 +102,7 @@ func TestCorrectNumberOfVirtualListeners(t *testing.T) { tpt := tr.(*transport) defer tr.(io.Closer).Close() - localAddrV1 := ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1") + localAddrV1 := tStringCast("/ip4/127.0.0.1/udp/0/quic-v1") ln, err := tr.Listen(localAddrV1) require.NoError(t, err) udpAddr, _, err := quicreuse.FromQuicMultiaddr(localAddrV1) diff --git a/go-libp2p/p2p/transport/quicreuse/connmgr_test.go b/go-libp2p/p2p/transport/quicreuse/connmgr_test.go index f3576a3..918999d 100644 --- a/go-libp2p/p2p/transport/quicreuse/connmgr_test.go +++ b/go-libp2p/p2p/transport/quicreuse/connmgr_test.go @@ -20,6 +20,11 @@ import ( "github.com/stretchr/testify/require" ) +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func checkClosed(t *testing.T, cm *ConnManager) { for _, r := range []*reuse{cm.reuseUDP4, cm.reuseUDP6} { if r == nil { @@ -63,16 +68,16 @@ func testListenOnSameProto(t *testing.T, enableReuseport bool) { var tlsConf tls.Config tlsConf.NextProtos = []string{alpn} - ln1, err := cm.ListenQUIC(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"), &tls.Config{NextProtos: []string{alpn}}, nil) + ln1, err := cm.ListenQUIC(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1"), &tls.Config{NextProtos: []string{alpn}}, nil) require.NoError(t, err) defer ln1.Close() - addr := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/%d/quic-v1", ln1.Addr().(*net.UDPAddr).Port)) + addr := tStringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/%d/quic-v1", ln1.Addr().(*net.UDPAddr).Port)) _, err = cm.ListenQUIC(addr, &tls.Config{NextProtos: []string{alpn}}, nil) require.EqualError(t, err, "already listening for protocol "+alpn) // listening on a different address works - ln2, err := cm.ListenQUIC(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"), &tls.Config{NextProtos: []string{alpn}}, nil) + ln2, err := cm.ListenQUIC(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1"), &tls.Config{NextProtos: []string{alpn}}, nil) require.NoError(t, err) defer ln2.Close() } @@ -87,7 +92,7 @@ func TestConnectionPassedToQUICForListening(t *testing.T) { require.NoError(t, err) defer cm.Close() - raddr := ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1") + raddr := tStringCast("/ip4/127.0.0.1/udp/0/quic-v1") naddr, _, err := FromQuicMultiaddr(raddr) require.NoError(t, err) @@ -105,7 +110,7 @@ func TestConnectionPassedToQUICForListening(t *testing.T) { } func TestAcceptErrorGetCleanedUp(t *testing.T) { - raddr := ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1") + raddr := tStringCast("/ip4/127.0.0.1/udp/0/quic-v1") cm, err := NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{}, DisableReuseport()) require.NoError(t, err) @@ -147,7 +152,7 @@ func TestConnectionPassedToQUICForDialing(t *testing.T) { require.NoError(t, err) defer cm.Close() - raddr := ma.StringCast("/ip4/127.0.0.1/udp/1234/quic-v1") + raddr := tStringCast("/ip4/127.0.0.1/udp/1234/quic-v1") naddr, _, err := FromQuicMultiaddr(raddr) require.NoError(t, err) @@ -222,12 +227,12 @@ func testListener(t *testing.T, enableReuseport bool) { require.NoError(t, err) id1, tlsConf1 := getTLSConfForProto(t, "proto1") - ln1, err := cm.ListenQUIC(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"), tlsConf1, nil) + ln1, err := cm.ListenQUIC(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1"), tlsConf1, nil) require.NoError(t, err) id2, tlsConf2 := getTLSConfForProto(t, "proto2") ln2, err := cm.ListenQUIC( - ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/%d/quic-v1", ln1.Addr().(*net.UDPAddr).Port)), + tStringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/%d/quic-v1", ln1.Addr().(*net.UDPAddr).Port)), tlsConf2, nil, ) diff --git a/go-libp2p/p2p/transport/quicreuse/quic_multiaddr.go b/go-libp2p/p2p/transport/quicreuse/quic_multiaddr.go index 3da4721..29e2a53 100644 --- a/go-libp2p/p2p/transport/quicreuse/quic_multiaddr.go +++ b/go-libp2p/p2p/transport/quicreuse/quic_multiaddr.go @@ -10,7 +10,7 @@ import ( ) var ( - quicV1MA = ma.StringCast("/quic-v1") + quicV1MA, _ = ma.StringCast("/quic-v1") ) func ToQuicMultiaddr(na net.Addr, version quic.VersionNumber) (ma.Multiaddr, error) { @@ -29,7 +29,12 @@ func ToQuicMultiaddr(na net.Addr, version quic.VersionNumber) (ma.Multiaddr, err func FromQuicMultiaddr(addr ma.Multiaddr) (*net.UDPAddr, quic.VersionNumber, error) { var version quic.VersionNumber var partsBeforeQUIC []ma.Multiaddr - ma.ForEach(addr, func(c ma.Component) bool { + var err error + ma.ForEach(addr, func(c ma.Component, e error) bool { + if e != nil { + err = e + return false + } switch c.Protocol().Code { case ma.P_QUIC_V1: version = quic.Version1 @@ -39,6 +44,9 @@ func FromQuicMultiaddr(addr ma.Multiaddr) (*net.UDPAddr, quic.VersionNumber, err return true } }) + if err != nil { + return nil, version, err + } if len(partsBeforeQUIC) == 0 { return nil, version, errors.New("no addr before QUIC component") } diff --git a/go-libp2p/p2p/transport/tcp/tcp_test.go b/go-libp2p/p2p/transport/tcp/tcp_test.go index a57a65e..42c2a4c 100644 --- a/go-libp2p/p2p/transport/tcp/tcp_test.go +++ b/go-libp2p/p2p/transport/tcp/tcp_test.go @@ -24,6 +24,11 @@ import ( var muxers = []tptu.StreamMuxer{{ID: "/yamux", Muxer: yamux.DefaultTransport}} +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func TestTcpTransport(t *testing.T) { for i := 0; i < 2; i++ { peerA, ia := makeInsecureMuxer(t) @@ -74,7 +79,7 @@ func TestResourceManager(t *testing.T) { require.NoError(t, err) ta, err := NewTCPTransport(ua, nil) require.NoError(t, err) - ln, err := ta.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0")) + ln, err := ta.Listen(tStringCast("/ip4/127.0.0.1/tcp/0")) require.NoError(t, err) defer ln.Close() @@ -156,7 +161,7 @@ func TestDialWithUpdates(t *testing.T) { require.NoError(t, err) ta, err := NewTCPTransport(ua, nil) require.NoError(t, err) - ln, err := ta.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0")) + ln, err := ta.Listen(tStringCast("/ip4/127.0.0.1/tcp/0")) require.NoError(t, err) defer ln.Close() @@ -173,7 +178,7 @@ func TestDialWithUpdates(t *testing.T) { require.NoError(t, err) acceptAndClose := func() manet.Listener { - li, err := manet.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0")) + li, err := manet.Listen(tStringCast("/ip4/127.0.0.1/tcp/0")) if err != nil { t.Fatal(err) } diff --git a/go-libp2p/p2p/transport/webrtc/listener.go b/go-libp2p/p2p/transport/webrtc/listener.go index 1834fc8..ba68e60 100644 --- a/go-libp2p/p2p/transport/webrtc/listener.go +++ b/go-libp2p/p2p/transport/webrtc/listener.go @@ -153,7 +153,10 @@ func (l *listener) handleCandidate(ctx context.Context, candidate udpmux.Candida return nil, err } if l.transport.gater != nil { - localAddr, _ := ma.SplitFunc(l.localMultiaddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_CERTHASH }) + localAddr, _, err := ma.SplitFunc(l.localMultiaddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_CERTHASH }) + if err != nil { + return nil, err + } if !l.transport.gater.InterceptAccept(&connMultiaddrs{local: localAddr, remote: remoteMultiaddr}) { // The connection attempt is rejected before we can send the client an error. // This means that the connection attempt will time out. @@ -264,7 +267,11 @@ func (l *listener) setupConnection( return nil, err } - localMultiaddrWithoutCerthash, _ := ma.SplitFunc(l.localMultiaddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_CERTHASH }) + localMultiaddrWithoutCerthash, _, err := ma.SplitFunc(l.localMultiaddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_CERTHASH }) + if err != nil { + return nil, err + } + conn, err := newConnection( network.DirInbound, w.PeerConnection, diff --git a/go-libp2p/p2p/transport/webrtc/transport.go b/go-libp2p/p2p/transport/webrtc/transport.go index b04753e..9613beb 100644 --- a/go-libp2p/p2p/transport/webrtc/transport.go +++ b/go-libp2p/p2p/transport/webrtc/transport.go @@ -186,7 +186,11 @@ func (t *WebRTCTransport) CanDial(addr ma.Multiaddr) bool { // be multiplexed on the same port as other UDP based transports like QUIC and WebTransport. // See https://github.com/libp2p/go-libp2p/issues/2446 for details. func (t *WebRTCTransport) Listen(addr ma.Multiaddr) (tpt.Listener, error) { - addr, wrtcComponent := ma.SplitLast(addr) + addr, wrtcComponent, err := ma.SplitLast(addr) + if err != nil { + return nil, err + } + isWebrtc := wrtcComponent.Equal(webrtcComponent) if !isWebrtc { return nil, fmt.Errorf("must listen on webrtc multiaddr") @@ -386,7 +390,10 @@ func (t *WebRTCTransport) dial(ctx context.Context, scope network.ConnManagement if err != nil { return nil, err } - remoteMultiaddrWithoutCerthash, _ := ma.SplitFunc(remoteMultiaddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_CERTHASH }) + remoteMultiaddrWithoutCerthash, _, err := ma.SplitFunc(remoteMultiaddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_CERTHASH }) + if err != nil { + return nil, err + } conn, err := newConnection( network.DirOutbound, @@ -623,7 +630,11 @@ func newWebRTCConnection(settings webrtc.SettingEngine, config webrtc.Configurat func IsWebRTCDirectMultiaddr(addr ma.Multiaddr) (bool, int) { var foundUDP, foundWebRTC bool certHashCount := 0 - ma.ForEach(addr, func(c ma.Component) bool { + ma.ForEach(addr, func(c ma.Component, e error) bool { + if e != nil { + return false + } + if !foundUDP { if c.Protocol().Code == ma.P_UDP { foundUDP = true diff --git a/go-libp2p/p2p/transport/webrtc/transport_test.go b/go-libp2p/p2p/transport/webrtc/transport_test.go index a3054a8..1f507ea 100644 --- a/go-libp2p/p2p/transport/webrtc/transport_test.go +++ b/go-libp2p/p2p/transport/webrtc/transport_test.go @@ -27,6 +27,11 @@ import ( "golang.org/x/crypto/sha3" ) +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func getTransport(t *testing.T, opts ...Option) (*WebRTCTransport, peer.ID) { t.Helper() privKey, _, err := crypto.GenerateKeyPair(crypto.Ed25519, -1) @@ -78,14 +83,14 @@ func TestIsWebRTCDirectMultiaddr(t *testing.T) { } for _, addr := range invalid { - a := ma.StringCast(addr) + a := tStringCast(addr) isValid, n := IsWebRTCDirectMultiaddr(a) require.Equal(t, 0, n) require.False(t, isValid) } for _, tc := range valid { - a := ma.StringCast(tc.addr) + a := tStringCast(tc.addr) isValid, n := IsWebRTCDirectMultiaddr(a) require.Equal(t, tc.count, n) require.True(t, isValid) @@ -107,12 +112,12 @@ func TestTransportWebRTC_CanDial(t *testing.T) { } for _, addr := range invalid { - a := ma.StringCast(addr) + a := tStringCast(addr) require.False(t, tr.CanDial(a)) } for _, addr := range valid { - a := ma.StringCast(addr) + a := tStringCast(addr) require.True(t, tr.CanDial(a), addr) } } @@ -124,7 +129,7 @@ func TestTransportAddCertHasher(t *testing.T) { "/ip6/1::3/udp/2/webrtc-direct", } for _, a := range addrs { - addr, added := tr.AddCertHashes(ma.StringCast(a)) + addr, added := tr.AddCertHashes(tStringCast(a)) require.True(t, added) _, err := addr.ValueForProtocol(ma.P_CERTHASH) require.NoError(t, err) @@ -169,7 +174,7 @@ func TestTransportWebRTC_DialFailsOnUnsupportedHashFunction(t *testing.T) { func TestTransportWebRTC_CanListenSingle(t *testing.T) { tr, listeningPeer := getTransport(t) tr1, connectingPeer := getTransport(t) - listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") + listenMultiaddr := tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") listener, err := tr.Listen(listenMultiaddr) require.NoError(t, err) @@ -211,7 +216,7 @@ func TestTransportWebRTC_CanListenMultiple(t *testing.T) { count := 3 tr, listeningPeer := getTransport(t, WithListenerMaxInFlightConnections(uint32(count))) - listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") + listenMultiaddr := tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") listener, err := tr.Listen(listenMultiaddr) require.NoError(t, err) defer listener.Close() @@ -256,7 +261,7 @@ func TestTransportWebRTC_CanListenMultiple(t *testing.T) { func TestTransportWebRTC_CanCreateSuccessiveConnections(t *testing.T) { tr, listeningPeer := getTransport(t) - listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") + listenMultiaddr := tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") listener, err := tr.Listen(listenMultiaddr) require.NoError(t, err) defer listener.Close() @@ -287,7 +292,7 @@ func TestTransportWebRTC_CanCreateSuccessiveConnections(t *testing.T) { func TestTransportWebRTC_ListenerCanCreateStreams(t *testing.T) { tr, listeningPeer := getTransport(t) tr1, connectingPeer := getTransport(t) - listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") + listenMultiaddr := tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") listener, err := tr.Listen(listenMultiaddr) require.NoError(t, err) defer listener.Close() @@ -332,7 +337,7 @@ func TestTransportWebRTC_ListenerCanCreateStreams(t *testing.T) { func TestTransportWebRTC_DialerCanCreateStreams(t *testing.T) { tr, listeningPeer := getTransport(t) - listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") + listenMultiaddr := tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") listener, err := tr.Listen(listenMultiaddr) require.NoError(t, err) defer listener.Close() @@ -378,7 +383,7 @@ func TestTransportWebRTC_DialerCanCreateStreams(t *testing.T) { func TestTransportWebRTC_DialerCanCreateStreamsMultiple(t *testing.T) { tr, listeningPeer := getTransport(t) - listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") + listenMultiaddr := tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") listener, err := tr.Listen(listenMultiaddr) require.NoError(t, err) defer listener.Close() @@ -469,7 +474,7 @@ func TestTransportWebRTC_DialerCanCreateStreamsMultiple(t *testing.T) { func TestTransportWebRTC_Deadline(t *testing.T) { tr, listeningPeer := getTransport(t) - listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") + listenMultiaddr := tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") listener, err := tr.Listen(listenMultiaddr) require.NoError(t, err) defer listener.Close() @@ -532,7 +537,7 @@ func TestTransportWebRTC_Deadline(t *testing.T) { func TestTransportWebRTC_StreamWriteBufferContention(t *testing.T) { tr, listeningPeer := getTransport(t) - listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") + listenMultiaddr := tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") listener, err := tr.Listen(listenMultiaddr) require.NoError(t, err) defer listener.Close() @@ -581,7 +586,7 @@ func TestTransportWebRTC_StreamWriteBufferContention(t *testing.T) { func TestTransportWebRTC_RemoteReadsAfterClose(t *testing.T) { tr, listeningPeer := getTransport(t) - listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") + listenMultiaddr := tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") listener, err := tr.Listen(listenMultiaddr) require.NoError(t, err) defer listener.Close() @@ -634,7 +639,7 @@ func TestTransportWebRTC_RemoteReadsAfterClose(t *testing.T) { func TestTransportWebRTC_RemoteReadsAfterClose2(t *testing.T) { tr, listeningPeer := getTransport(t) - listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") + listenMultiaddr := tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") listener, err := tr.Listen(listenMultiaddr) require.NoError(t, err) defer listener.Close() @@ -685,7 +690,7 @@ func TestTransportWebRTC_RemoteReadsAfterClose2(t *testing.T) { func TestTransportWebRTC_Close(t *testing.T) { tr, listeningPeer := getTransport(t) - listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") + listenMultiaddr := tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") listener, err := tr.Listen(listenMultiaddr) require.NoError(t, err) defer listener.Close() @@ -726,7 +731,7 @@ func TestTransportWebRTC_Close(t *testing.T) { func TestTransportWebRTC_PeerConnectionDTLSFailed(t *testing.T) { tr, listeningPeer := getTransport(t) - listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") + listenMultiaddr := tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") ln, err := tr.Listen(listenMultiaddr) require.NoError(t, err) defer ln.Close() @@ -739,7 +744,7 @@ func TestTransportWebRTC_PeerConnectionDTLSFailed(t *testing.T) { require.NoError(t, err) badCerthash, err := ma.NewMultiaddr(fmt.Sprintf("/certhash/%s", badEncodedCerthash)) require.NoError(t, err) - badMultiaddr, _ := ma.SplitFunc(ln.Multiaddr(), func(c ma.Component) bool { return c.Protocol().Code == ma.P_CERTHASH }) + badMultiaddr, _, _ := ma.SplitFunc(ln.Multiaddr(), func(c ma.Component) bool { return c.Protocol().Code == ma.P_CERTHASH }) badMultiaddr = badMultiaddr.Encapsulate(badCerthash) tr1, _ := getTransport(t) @@ -755,7 +760,7 @@ func TestConnectionTimeoutOnListener(t *testing.T) { tr.peerConnectionTimeouts.Failed = 150 * time.Millisecond tr.peerConnectionTimeouts.Keepalive = 50 * time.Millisecond - listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") + listenMultiaddr := tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct") ln, err := tr.Listen(listenMultiaddr) require.NoError(t, err) defer ln.Close() @@ -774,7 +779,7 @@ func TestConnectionTimeoutOnListener(t *testing.T) { defer cancel() addr, err := manet.FromNetAddr(proxy.LocalAddr()) require.NoError(t, err) - _, webrtcComponent := ma.SplitFunc(ln.Multiaddr(), func(c ma.Component) bool { return c.Protocol().Code == ma.P_WEBRTC_DIRECT }) + _, webrtcComponent, _ := ma.SplitFunc(ln.Multiaddr(), func(c ma.Component) bool { return c.Protocol().Code == ma.P_WEBRTC_DIRECT }) addr = addr.Encapsulate(webrtcComponent) conn, err := tr1.Dial(ctx, addr, listeningPeer) require.NoError(t, err) @@ -834,7 +839,7 @@ func TestMaxInFlightRequests(t *testing.T) { tr, listeningPeer := getTransport(t, WithListenerMaxInFlightConnections(count), ) - ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")) + ln, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")) require.NoError(t, err) defer ln.Close() diff --git a/go-libp2p/p2p/transport/websocket/addrs.go b/go-libp2p/p2p/transport/websocket/addrs.go index 5fea856..46289bc 100644 --- a/go-libp2p/p2p/transport/websocket/addrs.go +++ b/go-libp2p/p2p/transport/websocket/addrs.go @@ -156,9 +156,10 @@ func parseWebsocketMultiaddr(a ma.Multiaddr) (parsedWebsocketMultiaddr, error) { // If this is not a wss then withoutWs is the rest of the multiaddr out.restMultiaddr = withoutWs for { + var err error var head *ma.Component - rest, head = ma.SplitLast(rest) - if head == nil || rest == nil { + rest, head, err = ma.SplitLast(rest) + if head == nil || rest == nil || err != nil { break } diff --git a/go-libp2p/p2p/transport/websocket/addrs_test.go b/go-libp2p/p2p/transport/websocket/addrs_test.go index 3c5ba50..4e263d3 100644 --- a/go-libp2p/p2p/transport/websocket/addrs_test.go +++ b/go-libp2p/p2p/transport/websocket/addrs_test.go @@ -9,6 +9,11 @@ import ( ma "github.com/multiformats/go-multiaddr" ) +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func TestMultiaddrParsing(t *testing.T) { addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/5555/ws") if err != nil { @@ -69,13 +74,13 @@ func TestConvertWebsocketMultiaddrToNetAddr(t *testing.T) { } func TestListeningOnDNSAddr(t *testing.T) { - ln, err := newListener(ma.StringCast("/dns/localhost/tcp/0/ws"), nil) + ln, err := newListener(tStringCast("/dns/localhost/tcp/0/ws"), nil) require.NoError(t, err) addr := ln.Multiaddr() - first, rest := ma.SplitFirst(addr) + first, rest, _ := ma.SplitFirst(addr) require.Equal(t, ma.P_DNS, first.Protocol().Code) require.Equal(t, "localhost", first.Value()) - next, _ := ma.SplitFirst(rest) + next, _, _ := ma.SplitFirst(rest) require.Equal(t, ma.P_TCP, next.Protocol().Code) require.NotEqual(t, 0, next.Value()) } diff --git a/go-libp2p/p2p/transport/websocket/listener.go b/go-libp2p/p2p/transport/websocket/listener.go index d7a1b88..64c7f82 100644 --- a/go-libp2p/p2p/transport/websocket/listener.go +++ b/go-libp2p/p2p/transport/websocket/listener.go @@ -63,11 +63,17 @@ func newListener(a ma.Multiaddr, tlsConf *tls.Config) (*listener, error) { if err != nil { return nil, err } - first, _ := ma.SplitFirst(a) + first, _, err := ma.SplitFirst(a) + if err != nil { + return nil, err + } // Don't resolve dns addresses. // We want to be able to announce domain names, so the peer can validate the TLS certificate. if c := first.Protocol().Code; c == ma.P_DNS || c == ma.P_DNS4 || c == ma.P_DNS6 || c == ma.P_DNSADDR { - _, last := ma.SplitFirst(laddr) + _, last, err := ma.SplitFirst(laddr) + if err != nil { + return nil, err + } laddr = first.Encapsulate(last) } parsed.restMultiaddr = laddr diff --git a/go-libp2p/p2p/transport/websocket/websocket.go b/go-libp2p/p2p/transport/websocket/websocket.go index 5142ca9..5cb1b23 100644 --- a/go-libp2p/p2p/transport/websocket/websocket.go +++ b/go-libp2p/p2p/transport/websocket/websocket.go @@ -38,10 +38,10 @@ var dialMatcher = mafmt.And( mafmt.Base(ma.P_WSS))) var ( - wssComponent = ma.StringCast("/wss") - tlsWsComponent = ma.StringCast("/tls/ws") - tlsComponent = ma.StringCast("/tls") - wsComponent = ma.StringCast("/ws") + wssComponent, _ = ma.StringCast("/wss") + tlsWsComponent, _ = ma.StringCast("/tls/ws") + tlsComponent, _ = ma.StringCast("/tls") + wsComponent, _ = ma.StringCast("/ws") ) func init() { @@ -134,7 +134,11 @@ func (t *WebsocketTransport) Resolve(_ context.Context, maddr ma.Multiaddr) ([]m if parsed.sni == nil { var err error // We don't have an sni component, we'll use dns/dnsaddr - ma.ForEach(parsed.restMultiaddr, func(c ma.Component) bool { + ma.ForEach(parsed.restMultiaddr, func(c ma.Component, e error) bool { + if e != nil { + err = e + return false + } switch c.Protocol().Code { case ma.P_DNS, ma.P_DNS4, ma.P_DNS6: // err shouldn't happen since this means we couldn't parse a dns hostname for an sni value. diff --git a/go-libp2p/p2p/transport/websocket/websocket_test.go b/go-libp2p/p2p/transport/websocket/websocket_test.go index 2023ee3..b113143 100644 --- a/go-libp2p/p2p/transport/websocket/websocket_test.go +++ b/go-libp2p/p2p/transport/websocket/websocket_test.go @@ -81,7 +81,7 @@ func newSecureMuxer(t *testing.T) (peer.ID, []sec.SecureTransport) { func lastComponent(t *testing.T, a ma.Multiaddr) ma.Multiaddr { t.Helper() - _, wscomponent := ma.SplitLast(a) + _, wscomponent, _ := ma.SplitLast(a) require.NotNil(t, wscomponent) if wscomponent.Equal(wsComponent) { return wsComponent @@ -117,25 +117,25 @@ func generateTLSConfig(t *testing.T) *tls.Config { func TestCanDial(t *testing.T) { d := &WebsocketTransport{} - if !d.CanDial(ma.StringCast("/ip4/127.0.0.1/tcp/5555/ws")) { + if !d.CanDial(tStringCast("/ip4/127.0.0.1/tcp/5555/ws")) { t.Fatal("expected to match websocket maddr, but did not") } - if !d.CanDial(ma.StringCast("/ip4/127.0.0.1/tcp/5555/wss")) { + if !d.CanDial(tStringCast("/ip4/127.0.0.1/tcp/5555/wss")) { t.Fatal("expected to match secure websocket maddr, but did not") } - if d.CanDial(ma.StringCast("/ip4/127.0.0.1/tcp/5555")) { + if d.CanDial(tStringCast("/ip4/127.0.0.1/tcp/5555")) { t.Fatal("expected to not match tcp maddr, but did") } - if !d.CanDial(ma.StringCast("/ip4/127.0.0.1/tcp/5555/tls/ws")) { + if !d.CanDial(tStringCast("/ip4/127.0.0.1/tcp/5555/tls/ws")) { t.Fatal("expected to match secure websocket maddr, but did not") } - if !d.CanDial(ma.StringCast("/ip4/127.0.0.1/tcp/5555/tls/sni/example.com/ws")) { + if !d.CanDial(tStringCast("/ip4/127.0.0.1/tcp/5555/tls/sni/example.com/ws")) { t.Fatal("expected to match secure websocket maddr with sni, but did not") } - if !d.CanDial(ma.StringCast("/dns4/example.com/tcp/5555/tls/sni/example.com/ws")) { + if !d.CanDial(tStringCast("/dns4/example.com/tcp/5555/tls/sni/example.com/ws")) { t.Fatal("expected to match secure websocket maddr with sni, but did not") } - if !d.CanDial(ma.StringCast("/dnsaddr/example.com/tcp/5555/tls/sni/example.com/ws")) { + if !d.CanDial(tStringCast("/dnsaddr/example.com/tcp/5555/tls/sni/example.com/ws")) { t.Fatal("expected to match secure websocket maddr with sni, but did not") } } @@ -233,7 +233,7 @@ func TestHostHeaderWss(t *testing.T) { _, port, err := net.SplitHostPort(l.Addr().String()) require.NoError(t, err) - serverMA := ma.StringCast("/ip4/127.0.0.1/tcp/" + port + "/tls/sni/example.com/ws") + serverMA := tStringCast("/ip4/127.0.0.1/tcp/" + port + "/tls/sni/example.com/ws") tlsConfig := &tls.Config{InsecureSkipVerify: true} // Our test server doesn't have a cert signed by a CA _, u := newSecureUpgrader(t) @@ -251,7 +251,7 @@ func TestHostHeaderWss(t *testing.T) { } func TestDialWss(t *testing.T) { - serverMA, rid, errChan := testWSSServer(t, ma.StringCast("/ip4/127.0.0.1/tcp/0/tls/sni/example.com/ws")) + serverMA, rid, errChan := testWSSServer(t, tStringCast("/ip4/127.0.0.1/tcp/0/tls/sni/example.com/ws")) require.Contains(t, serverMA.String(), "tls") tlsConfig := &tls.Config{InsecureSkipVerify: true} // Our test server doesn't have a cert signed by a CA @@ -275,7 +275,7 @@ func TestDialWss(t *testing.T) { } func TestDialWssNoClientCert(t *testing.T) { - serverMA, rid, _ := testWSSServer(t, ma.StringCast("/ip4/127.0.0.1/tcp/0/tls/sni/example.com/ws")) + serverMA, rid, _ := testWSSServer(t, tStringCast("/ip4/127.0.0.1/tcp/0/tls/sni/example.com/ws")) require.Contains(t, serverMA.String(), "tls") _, u := newSecureUpgrader(t) @@ -374,10 +374,10 @@ func connectAndExchangeData(t *testing.T, laddr ma.Multiaddr, secure bool) { func TestWebsocketConnection(t *testing.T) { t.Run("unencrypted", func(t *testing.T) { - connectAndExchangeData(t, ma.StringCast("/ip4/127.0.0.1/tcp/0/ws"), false) + connectAndExchangeData(t, tStringCast("/ip4/127.0.0.1/tcp/0/ws"), false) }) t.Run("encrypted", func(t *testing.T) { - connectAndExchangeData(t, ma.StringCast("/ip4/127.0.0.1/tcp/0/wss"), true) + connectAndExchangeData(t, tStringCast("/ip4/127.0.0.1/tcp/0/wss"), true) }) } @@ -385,7 +385,7 @@ func TestWebsocketListenSecureFailWithoutTLSConfig(t *testing.T) { _, u := newUpgrader(t) tpt, err := New(u, &network.NullResourceManager{}) require.NoError(t, err) - addr := ma.StringCast("/ip4/127.0.0.1/tcp/0/wss") + addr := tStringCast("/ip4/127.0.0.1/tcp/0/wss") _, err = tpt.Listen(addr) require.EqualError(t, err, fmt.Sprintf("cannot listen on wss address %s without a tls.Config", addr)) } @@ -395,9 +395,9 @@ func TestWebsocketListenSecureAndInsecure(t *testing.T) { server, err := New(serverUpgrader, &network.NullResourceManager{}, WithTLSConfig(generateTLSConfig(t))) require.NoError(t, err) - lnInsecure, err := server.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0/ws")) + lnInsecure, err := server.Listen(tStringCast("/ip4/127.0.0.1/tcp/0/ws")) require.NoError(t, err) - lnSecure, err := server.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0/wss")) + lnSecure, err := server.Listen(tStringCast("/ip4/127.0.0.1/tcp/0/wss")) require.NoError(t, err) t.Run("insecure", func(t *testing.T) { @@ -439,7 +439,7 @@ func TestConcurrentClose(t *testing.T) { _, u := newUpgrader(t) tpt, err := New(u, &network.NullResourceManager{}) require.NoError(t, err) - l, err := tpt.maListen(ma.StringCast("/ip4/127.0.0.1/tcp/0/ws")) + l, err := tpt.maListen(tStringCast("/ip4/127.0.0.1/tcp/0/ws")) if err != nil { t.Fatal(err) } @@ -479,7 +479,7 @@ func TestWriteZero(t *testing.T) { if err != nil { t.Fatal(err) } - l, err := tpt.maListen(ma.StringCast("/ip4/127.0.0.1/tcp/0/ws")) + l, err := tpt.maListen(tStringCast("/ip4/127.0.0.1/tcp/0/ws")) if err != nil { t.Fatal(err) } @@ -537,7 +537,7 @@ func TestResolveMultiaddr(t *testing.T) { for unresolved, expectedMA := range testCases { t.Run(unresolved, func(t *testing.T) { - m1 := ma.StringCast(unresolved) + m1 := tStringCast(unresolved) wsTpt := WebsocketTransport{} ctx := context.Background() diff --git a/go-libp2p/p2p/transport/webtransport/cert_manager_test.go b/go-libp2p/p2p/transport/webtransport/cert_manager_test.go index 942d471..29ed96a 100644 --- a/go-libp2p/p2p/transport/webtransport/cert_manager_test.go +++ b/go-libp2p/p2p/transport/webtransport/cert_manager_test.go @@ -24,7 +24,7 @@ func certificateHashFromTLSConfig(c *tls.Config) [32]byte { func splitMultiaddr(addr ma.Multiaddr) []ma.Component { var components []ma.Component - ma.ForEach(addr, func(c ma.Component) bool { + ma.ForEach(addr, func(c ma.Component, e error) bool { components = append(components, c) return true }) diff --git a/go-libp2p/p2p/transport/webtransport/multiaddr.go b/go-libp2p/p2p/transport/webtransport/multiaddr.go index bd90638..47c5b02 100644 --- a/go-libp2p/p2p/transport/webtransport/multiaddr.go +++ b/go-libp2p/p2p/transport/webtransport/multiaddr.go @@ -12,7 +12,7 @@ import ( "github.com/multiformats/go-multihash" ) -var webtransportMA = ma.StringCast("/quic-v1/webtransport") +var webtransportMA, _ = ma.StringCast("/quic-v1/webtransport") func toWebtransportMultiaddr(na net.Addr) (ma.Multiaddr, error) { addr, err := manet.FromNetAddr(na) @@ -43,12 +43,20 @@ func stringToWebtransportMultiaddr(str string) (ma.Multiaddr, error) { func extractCertHashes(addr ma.Multiaddr) ([]multihash.DecodedMultihash, error) { certHashesStr := make([]string, 0, 2) - ma.ForEach(addr, func(c ma.Component) bool { + var err error + ma.ForEach(addr, func(c ma.Component, e error) bool { + if e != nil { + err = e + return false + } if c.Protocol().Code == ma.P_CERTHASH { certHashesStr = append(certHashesStr, c.Value()) } return true }) + if err != nil { + return nil, err + } certHashes := make([]multihash.DecodedMultihash, 0, len(certHashesStr)) for _, s := range certHashesStr { _, ch, err := multibase.Decode(s) @@ -88,7 +96,10 @@ func IsWebtransportMultiaddr(multiaddr ma.Multiaddr) (bool, int) { state := init certhashCount := 0 - ma.ForEach(multiaddr, func(c ma.Component) bool { + ma.ForEach(multiaddr, func(c ma.Component, e error) bool { + if e != nil { + return false + } switch c.Protocol().Code { case ma.P_UDP: if state == init { diff --git a/go-libp2p/p2p/transport/webtransport/multiaddr_test.go b/go-libp2p/p2p/transport/webtransport/multiaddr_test.go index 3f0a3ec..9875ead 100644 --- a/go-libp2p/p2p/transport/webtransport/multiaddr_test.go +++ b/go-libp2p/p2p/transport/webtransport/multiaddr_test.go @@ -12,6 +12,11 @@ import ( "github.com/stretchr/testify/require" ) +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func TestWebtransportMultiaddr(t *testing.T) { t.Run("valid", func(t *testing.T) { addr, err := toWebtransportMultiaddr(&net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 1337}) @@ -67,7 +72,7 @@ func TestExtractCertHashes(t *testing.T) { {addr: fmt.Sprintf("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/%s", fooHash), hashes: []string{"foo"}}, {addr: fmt.Sprintf("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/%s/certhash/%s", fooHash, barHash), hashes: []string{"foo", "bar"}}, } { - ch, err := extractCertHashes(ma.StringCast(tc.addr)) + ch, err := extractCertHashes(tStringCast(tc.addr)) require.NoError(t, err) require.Len(t, ch, len(tc.hashes)) for i, h := range tc.hashes { @@ -88,7 +93,7 @@ func TestWebtransportResolve(t *testing.T) { for _, tc := range testCases { t.Run(tc, func(t *testing.T) { - outMa, err := tpt.Resolve(ctx, ma.StringCast(tc)) + outMa, err := tpt.Resolve(ctx, tStringCast(tc)) require.NoError(t, err) sni, err := outMa[0].ValueForProtocol(ma.P_SNI) require.NoError(t, err) @@ -97,7 +102,7 @@ func TestWebtransportResolve(t *testing.T) { } t.Run("No sni", func(t *testing.T) { - outMa, err := tpt.Resolve(ctx, ma.StringCast("/ip4/127.0.0.1/udp/1337/quic-v1/webtransport")) + outMa, err := tpt.Resolve(ctx, tStringCast("/ip4/127.0.0.1/udp/1337/quic-v1/webtransport")) require.NoError(t, err) _, err = outMa[0].ValueForProtocol(ma.P_SNI) require.Error(t, err) @@ -123,7 +128,7 @@ func TestIsWebtransportMultiaddr(t *testing.T) { for _, tc := range testCases { t.Run(tc.addr, func(t *testing.T) { - got, n := IsWebtransportMultiaddr(ma.StringCast(tc.addr)) + got, n := IsWebtransportMultiaddr(tStringCast(tc.addr)) require.Equal(t, tc.want, got) require.Equal(t, tc.certhashCount, n) }) diff --git a/go-libp2p/p2p/transport/webtransport/transport.go b/go-libp2p/p2p/transport/webtransport/transport.go index 9717270..78f0bbf 100644 --- a/go-libp2p/p2p/transport/webtransport/transport.go +++ b/go-libp2p/p2p/transport/webtransport/transport.go @@ -158,7 +158,11 @@ func (t *transport) dialWithScope(ctx context.Context, raddr ma.Multiaddr, p pee return nil, err } - maddr, _ := ma.SplitFunc(raddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_WEBTRANSPORT }) + maddr, _, err := ma.SplitFunc(raddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_WEBTRANSPORT }) + if err != nil { + return nil, err + } + sess, err := t.dial(ctx, maddr, url, sni, certHashes) if err != nil { return nil, err @@ -372,7 +376,10 @@ func (t *transport) removeConn(sess *webtransport.Session) { // DNS-like component, then that will be returned for the sni and // foundSniComponent will be false (since we didn't find an actual sni component). func extractSNI(maddr ma.Multiaddr) (sni string, foundSniComponent bool) { - ma.ForEach(maddr, func(c ma.Component) bool { + ma.ForEach(maddr, func(c ma.Component, e error) bool { + if e != nil { + return false + } switch c.Protocol().Code { case ma.P_SNI: sni = c.Value() @@ -397,10 +404,17 @@ func (t *transport) Resolve(_ context.Context, maddr ma.Multiaddr) ([]ma.Multiad return []ma.Multiaddr{maddr}, nil } - beforeQuicMA, afterIncludingQuicMA := ma.SplitFunc(maddr, func(c ma.Component) bool { + beforeQuicMA, afterIncludingQuicMA, err := ma.SplitFunc(maddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_QUIC_V1 }) - quicComponent, afterQuicMA := ma.SplitFirst(afterIncludingQuicMA) + if err != nil { + return nil, err + } + + quicComponent, afterQuicMA, err := ma.SplitFirst(afterIncludingQuicMA) + if err != nil { + return nil, err + } sniComponent, err := ma.NewComponent(ma.ProtocolWithCode(ma.P_SNI).Name, sni) if err != nil { return nil, err diff --git a/go-libp2p/p2p/transport/webtransport/transport_test.go b/go-libp2p/p2p/transport/webtransport/transport_test.go index f6c850a..42b3f30 100644 --- a/go-libp2p/p2p/transport/webtransport/transport_test.go +++ b/go-libp2p/p2p/transport/webtransport/transport_test.go @@ -63,7 +63,7 @@ func randomMultihash(t *testing.T) string { func extractCertHashes(addr ma.Multiaddr) []string { var certHashesStr []string - ma.ForEach(addr, func(c ma.Component) bool { + ma.ForEach(addr, func(c ma.Component, e error) bool { if c.Protocol().Code == ma.P_CERTHASH { certHashesStr = append(certHashesStr, c.Value()) } @@ -78,7 +78,7 @@ func stripCertHashes(addr ma.Multiaddr) ma.Multiaddr { if err != nil { return addr } - addr, _ = ma.SplitLast(addr) + addr, _, _ = ma.SplitLast(addr) } } @@ -103,12 +103,17 @@ func newConnManager(t *testing.T, opts ...quicreuse.Option) *quicreuse.ConnManag return cm } +func tStringCast(str string) ma.Multiaddr { + m, _ := ma.StringCast(str) + return m +} + func TestTransport(t *testing.T) { serverID, serverKey := newIdentity(t) tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), nil, nil) require.NoError(t, err) defer tr.(io.Closer).Close() - ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + ln, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) defer ln.Close() @@ -132,7 +137,7 @@ func TestTransport(t *testing.T) { require.NoError(t, err) _, port, err := net.SplitHostPort(addr) require.NoError(t, err) - require.Equal(t, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/%s/quic-v1/webtransport", port)), conn.RemoteMultiaddr()) + require.Equal(t, tStringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/%s/quic-v1/webtransport", port)), conn.RemoteMultiaddr()) addrChan <- conn.RemoteMultiaddr() }() @@ -154,7 +159,7 @@ func TestHashVerification(t *testing.T) { tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), nil, &network.NullResourceManager{}) require.NoError(t, err) defer tr.(io.Closer).Close() - ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + ln, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) done := make(chan struct{}) go func() { @@ -192,16 +197,16 @@ func TestHashVerification(t *testing.T) { func TestCanDial(t *testing.T) { valid := []ma.Multiaddr{ - ma.StringCast("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/" + randomMultihash(t)), - ma.StringCast("/ip6/b16b:8255:efc6:9cd5:1a54:ee86:2d7a:c2e6/udp/1234/quic-v1/webtransport/certhash/" + randomMultihash(t)), - ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/%s/certhash/%s/certhash/%s", randomMultihash(t), randomMultihash(t), randomMultihash(t))), - ma.StringCast("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport"), // no certificate hash + tStringCast("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/" + randomMultihash(t)), + tStringCast("/ip6/b16b:8255:efc6:9cd5:1a54:ee86:2d7a:c2e6/udp/1234/quic-v1/webtransport/certhash/" + randomMultihash(t)), + tStringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/%s/certhash/%s/certhash/%s", randomMultihash(t), randomMultihash(t), randomMultihash(t))), + tStringCast("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport"), // no certificate hash } invalid := []ma.Multiaddr{ - ma.StringCast("/ip4/127.0.0.1/udp/1234"), // missing webtransport - ma.StringCast("/ip4/127.0.0.1/udp/1234/webtransport"), // missing quic - ma.StringCast("/ip4/127.0.0.1/tcp/1234/webtransport"), // WebTransport over TCP? Is this a joke? + tStringCast("/ip4/127.0.0.1/udp/1234"), // missing webtransport + tStringCast("/ip4/127.0.0.1/udp/1234/webtransport"), // missing quic + tStringCast("/ip4/127.0.0.1/tcp/1234/webtransport"), // WebTransport over TCP? Is this a joke? } _, key := newIdentity(t) @@ -219,14 +224,14 @@ func TestCanDial(t *testing.T) { func TestListenAddrValidity(t *testing.T) { valid := []ma.Multiaddr{ - ma.StringCast("/ip6/::/udp/0/quic-v1/webtransport/"), + tStringCast("/ip6/::/udp/0/quic-v1/webtransport/"), } invalid := []ma.Multiaddr{ - ma.StringCast("/ip4/127.0.0.1/udp/0"), // missing webtransport - ma.StringCast("/ip4/127.0.0.1/udp/0/webtransport"), // missing quic - ma.StringCast("/ip4/127.0.0.1/tcp/0/webtransport"), // WebTransport over TCP? Is this a joke? - ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport/certhash/" + randomMultihash(t)), // We can't listen on a specific certhash + tStringCast("/ip4/127.0.0.1/udp/0"), // missing webtransport + tStringCast("/ip4/127.0.0.1/udp/0/webtransport"), // missing quic + tStringCast("/ip4/127.0.0.1/tcp/0/webtransport"), // WebTransport over TCP? Is this a joke? + tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport/certhash/" + randomMultihash(t)), // We can't listen on a specific certhash } _, key := newIdentity(t) @@ -251,9 +256,9 @@ func TestListenerAddrs(t *testing.T) { require.NoError(t, err) defer tr.(io.Closer).Close() - ln1, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + ln1, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) - ln2, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + ln2, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) hashes1 := extractCertHashes(ln1.Multiaddr()) require.Len(t, hashes1, 2) @@ -266,7 +271,7 @@ func TestResourceManagerDialing(t *testing.T) { defer ctrl.Finish() rcmgr := mocknetwork.NewMockResourceManager(ctrl) - addr := ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport") + addr := tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport") p := peer.ID("foobar") _, key := newIdentity(t) @@ -300,7 +305,7 @@ func TestResourceManagerListening(t *testing.T) { rcmgr := mocknetwork.NewMockResourceManager(ctrl) tr, err := libp2pwebtransport.New(key, nil, newConnManager(t), nil, rcmgr) require.NoError(t, err) - ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + ln, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) defer ln.Close() @@ -326,7 +331,7 @@ func TestResourceManagerListening(t *testing.T) { rcmgr := mocknetwork.NewMockResourceManager(ctrl) tr, err := libp2pwebtransport.New(key, nil, newConnManager(t), nil, rcmgr) require.NoError(t, err) - ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + ln, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) defer ln.Close() @@ -371,7 +376,7 @@ func TestConnectionGaterDialing(t *testing.T) { tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), nil, &network.NullResourceManager{}) require.NoError(t, err) defer tr.(io.Closer).Close() - ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + ln, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) defer ln.Close() @@ -395,7 +400,7 @@ func TestConnectionGaterInterceptAccept(t *testing.T) { tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), connGater, &network.NullResourceManager{}) require.NoError(t, err) defer tr.(io.Closer).Close() - ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + ln, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) defer ln.Close() @@ -421,7 +426,7 @@ func TestConnectionGaterInterceptSecured(t *testing.T) { tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), connGater, &network.NullResourceManager{}) require.NoError(t, err) defer tr.(io.Closer).Close() - ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + ln, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) defer ln.Close() @@ -457,7 +462,7 @@ func TestAcceptQueueFilledUp(t *testing.T) { tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), nil, &network.NullResourceManager{}) require.NoError(t, err) defer tr.(io.Closer).Close() - ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + ln, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) defer ln.Close() @@ -554,7 +559,7 @@ func TestFlowControlWindowIncrease(t *testing.T) { tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), nil, serverRcmgr) require.NoError(t, err) defer tr.(io.Closer).Close() - ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + ln, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) defer ln.Close() @@ -585,7 +590,7 @@ func TestFlowControlWindowIncrease(t *testing.T) { var addr ma.Multiaddr for _, comp := range ma.Split(ln.Multiaddr()) { if _, err := comp.ValueForProtocol(ma.P_UDP); err == nil { - addr = addr.Encapsulate(ma.StringCast(fmt.Sprintf("/udp/%d", proxy.LocalPort()))) + addr = addr.Encapsulate(tStringCast(fmt.Sprintf("/udp/%d", proxy.LocalPort()))) continue } if addr == nil { @@ -664,7 +669,7 @@ func serverSendsBackValidCert(t *testing.T, timeSinceUnixEpoch time.Duration, ke require.NoError(t, err) tr, err := libp2pwebtransport.New(priv, nil, newConnManager(t), nil, &network.NullResourceManager{}, libp2pwebtransport.WithClock(cl)) require.NoError(t, err) - l, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + l, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) defer l.Close() @@ -746,7 +751,7 @@ func TestServerRotatesCertCorrectly(t *testing.T) { return false } - l, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + l, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) if err != nil { return false } @@ -760,14 +765,14 @@ func TestServerRotatesCertCorrectly(t *testing.T) { return false } - l, err = tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + l, err = tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) if err != nil { return false } defer l.Close() var found bool - ma.ForEach(l.Multiaddr(), func(c ma.Component) bool { + ma.ForEach(l.Multiaddr(), func(c ma.Component, e error) bool { if c.Protocol().Code == ma.P_CERTHASH { for _, prevCerthash := range certhashes { if c.Value() == prevCerthash { @@ -794,7 +799,7 @@ func TestServerRotatesCertCorrectlyAfterSteps(t *testing.T) { tr, err := libp2pwebtransport.New(priv, nil, newConnManager(t), nil, &network.NullResourceManager{}, libp2pwebtransport.WithClock(cl)) require.NoError(t, err) - l, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + l, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) certhashes := extractCertHashes(l.Multiaddr()) @@ -806,11 +811,11 @@ func TestServerRotatesCertCorrectlyAfterSteps(t *testing.T) { cl.Add(24 * time.Hour) tr, err := libp2pwebtransport.New(priv, nil, newConnManager(t), nil, &network.NullResourceManager{}, libp2pwebtransport.WithClock(cl)) require.NoError(t, err) - l, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + l, err := tr.Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) require.NoError(t, err) var found bool - ma.ForEach(l.Multiaddr(), func(c ma.Component) bool { + ma.ForEach(l.Multiaddr(), func(c ma.Component, e error) bool { if c.Protocol().Code == ma.P_CERTHASH { for _, prevCerthash := range certhashes { if prevCerthash == c.Value() { diff --git a/go-multiaddr-dns/.gitignore b/go-multiaddr-dns/.gitignore new file mode 100644 index 0000000..4621ab7 --- /dev/null +++ b/go-multiaddr-dns/.gitignore @@ -0,0 +1 @@ +/madns/madns diff --git a/go-multiaddr-dns/LICENSE b/go-multiaddr-dns/LICENSE new file mode 100644 index 0000000..c7386b3 --- /dev/null +++ b/go-multiaddr-dns/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/go-multiaddr-dns/README.md b/go-multiaddr-dns/README.md new file mode 100644 index 0000000..3958c3c --- /dev/null +++ b/go-multiaddr-dns/README.md @@ -0,0 +1,57 @@ +# go-multiaddr-dns + +> Resolve /dns4, /dns6, and /dnsaddr multiaddrs. + +```sh +> madns /dnsaddr/ipfs.io/ipfs/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx +/ip4/104.236.151.122/tcp/4001/ipfs/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx +/ip6/2604:a880:1:20::1d9:6001/tcp/4001/ipfs/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx +/ip6/fc3d:9a4e:3c96:2fd2:1afa:18fe:8dd2:b602/tcp/4001/ipfs/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx +/dns4/jupiter.i.ipfs.io/tcp/4001/ipfs/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx +/dns6/jupiter.i.ipfs.io/tcp/4001/ipfs/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx +``` + + +In more detail: + +```sh +> madns /dns6/example.net +/ip6/2001:db8::a3 +/ip6/2001:db8::a4 +... + +> madns /dns4/example.net/tcp/443/wss +/ip4/192.0.2.1/tcp/443/wss +/ip4/192.0.2.2/tcp/443/wss + +# No-op if it's not a dns-ish address. + +> madns /ip4/127.0.0.1/tcp/8080 +/ip4/127.0.0.1/tcp/8080 + +# /dnsaddr resolves by looking up TXT records. + +> dig +short TXT _dnsaddr.example.net +"dnsaddr=/ip6/2001:db8::a3/tcp/443/wss/ipfs/Qmfoo" +"dnsaddr=/ip6/2001:db8::a4/tcp/443/wss/ipfs/Qmbar" +"dnsaddr=/ip4/192.0.2.1/tcp/443/wss/ipfs/Qmfoo" +"dnsaddr=/ip4/192.0.2.2/tcp/443/wss/ipfs/Qmbar" +... + +# /dnsaddr returns addrs which encapsulate whatever /dnsaddr encapsulates too. + +> madns example.net/ipfs/Qmfoo +info: changing query to /dnsaddr/example.net/ipfs/Qmfoo +/ip6/2001:db8::a3/tcp/443/wss/ipfs/Qmfoo +/ip4/192.0.2.1/tcp/443/wss/ipfs/Qmfoo + +# TODO -p filters by protocol stacks. + +> madns -p /ip6/tcp/wss /dnsaddr/example.net +/ip6/2001:db8::a3/tcp/443/wss/ipfs/Qmfoo +/ip6/2001:db8::a4/tcp/443/wss/ipfs/Qmbar + +# TODO -c filters by CIDR +> madns -c /ip4/104.236.76.0/ipcidr/24 /dnsaddr/example.net +/ip4/192.0.2.2/tcp/443/wss/ipfs/Qmbar +``` diff --git a/go-multiaddr-dns/go.mod b/go-multiaddr-dns/go.mod new file mode 100644 index 0000000..c12d1e0 --- /dev/null +++ b/go-multiaddr-dns/go.mod @@ -0,0 +1,28 @@ +module github.com/multiformats/go-multiaddr-dns + +replace github.com/multiformats/go-multiaddr => ../go-multiaddr + +require ( + github.com/miekg/dns v1.1.41 + github.com/multiformats/go-multiaddr v0.3.3 +) + +require ( + github.com/ipfs/go-cid v0.0.7 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + golang.org/x/crypto v0.18.0 // indirect + golang.org/x/exp v0.0.0-20230725012225-302865e7556b // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.16.0 // indirect + lukechampine.com/blake3 v1.2.1 // indirect +) + +go 1.21 diff --git a/go-multiaddr-dns/go.sum b/go-multiaddr-dns/go.sum new file mode 100644 index 0000000..e78f0c6 --- /dev/null +++ b/go-multiaddr-dns/go.sum @@ -0,0 +1,64 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/exp v0.0.0-20230725012225-302865e7556b h1:tK7yjGqVRzYdXsBcfD2MLhFAhHfDgGLm2rY1ub7FA9k= +golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= +lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= diff --git a/go-multiaddr-dns/madns/main.go b/go-multiaddr-dns/madns/main.go new file mode 100644 index 0000000..e9c65a8 --- /dev/null +++ b/go-multiaddr-dns/madns/main.go @@ -0,0 +1,44 @@ +package main + +import ( + "context" + "fmt" + "os" + "strings" + + ma "github.com/multiformats/go-multiaddr" + madns "github.com/multiformats/go-multiaddr-dns" +) + +func main() { + if len(os.Args) != 2 { + fmt.Print("usage: madns /dnsaddr/example.com\n" + + " madns /dnsaddr/example.com/ipfs/Qmfoobar\n" + + " madns /dns6/example.com\n" + + " madns /dns6/example.com/tcp/443/wss\n" + + " madns /dns4/example.com\n") + os.Exit(1) + } + + query := os.Args[1] + if !strings.HasPrefix(query, "/") { + query = "/dnsaddr/" + query + fmt.Fprintf(os.Stderr, "madns: changing query to %s\n", query) + } + + maddr, err := ma.NewMultiaddr(query) + if err != nil { + fmt.Printf("error: %s\n", err) + os.Exit(1) + } + + rmaddrs, err := madns.Resolve(context.Background(), maddr) + if err != nil { + fmt.Printf("error: %s (result=%+v)\n", err, rmaddrs) + os.Exit(1) + } + + for _, r := range rmaddrs { + fmt.Println(r.String()) + } +} diff --git a/go-multiaddr-dns/mock.go b/go-multiaddr-dns/mock.go new file mode 100644 index 0000000..3a054f8 --- /dev/null +++ b/go-multiaddr-dns/mock.go @@ -0,0 +1,31 @@ +package madns + +import ( + "context" + "net" +) + +type MockResolver struct { + IP map[string][]net.IPAddr + TXT map[string][]string +} + +var _ BasicResolver = (*MockResolver)(nil) + +func (r *MockResolver) LookupIPAddr(ctx context.Context, name string) ([]net.IPAddr, error) { + results, ok := r.IP[name] + if ok { + return results, nil + } else { + return []net.IPAddr{}, nil + } +} + +func (r *MockResolver) LookupTXT(ctx context.Context, name string) ([]string, error) { + results, ok := r.TXT[name] + if ok { + return results, nil + } else { + return []string{}, nil + } +} diff --git a/go-multiaddr-dns/resolve.go b/go-multiaddr-dns/resolve.go new file mode 100644 index 0000000..915765a --- /dev/null +++ b/go-multiaddr-dns/resolve.go @@ -0,0 +1,296 @@ +package madns + +import ( + "context" + "net" + "strings" + + "github.com/miekg/dns" + ma "github.com/multiformats/go-multiaddr" +) + +var ( + dnsaddrProtocol = ma.ProtocolWithCode(ma.P_DNSADDR) + dns4Protocol = ma.ProtocolWithCode(ma.P_DNS4) + dns6Protocol = ma.ProtocolWithCode(ma.P_DNS6) + dnsProtocol = ma.ProtocolWithCode(ma.P_DNS) +) + +var ResolvableProtocols = []ma.Protocol{dnsaddrProtocol, dns4Protocol, dns6Protocol, dnsProtocol} +var DefaultResolver = &Resolver{def: net.DefaultResolver} + +const dnsaddrTXTPrefix = "dnsaddr=" + +// BasicResolver is a low level interface for DNS resolution +type BasicResolver interface { + LookupIPAddr(context.Context, string) ([]net.IPAddr, error) + LookupTXT(context.Context, string) ([]string, error) +} + +// Resolver is an object capable of resolving dns multiaddrs by using one or more BasicResolvers; +// it supports custom per domain/TLD resolvers. +// It also implements the BasicResolver interface so that it can act as a custom per domain/TLD +// resolver. +type Resolver struct { + def BasicResolver + custom map[string]BasicResolver +} + +var _ BasicResolver = (*Resolver)(nil) + +// NewResolver creates a new Resolver instance with the specified options +func NewResolver(opts ...Option) (*Resolver, error) { + r := &Resolver{def: net.DefaultResolver} + for _, opt := range opts { + err := opt(r) + if err != nil { + return nil, err + } + } + + return r, nil +} + +type Option func(*Resolver) error + +// WithDefaultResolver is an option that specifies the default basic resolver, +// which resolves any TLD that doesn't have a custom resolver. +// Defaults to net.DefaultResolver +func WithDefaultResolver(def BasicResolver) Option { + return func(r *Resolver) error { + r.def = def + return nil + } +} + +// WithDomainResolver specifies a custom resolver for a domain/TLD. +// Custom resolver selection matches domains left to right, with more specific resolvers +// superseding generic ones. +func WithDomainResolver(domain string, rslv BasicResolver) Option { + return func(r *Resolver) error { + if r.custom == nil { + r.custom = make(map[string]BasicResolver) + } + fqdn := dns.Fqdn(domain) + r.custom[fqdn] = rslv + return nil + } +} + +func (r *Resolver) getResolver(domain string) BasicResolver { + fqdn := dns.Fqdn(domain) + + // we match left-to-right, with more specific resolvers superseding generic ones. + // So for a domain a.b.c, we will try a.b,c, b.c, c, and fallback to the default if + // there is no match + rslv, ok := r.custom[fqdn] + if ok { + return rslv + } + + for i := strings.Index(fqdn, "."); i != -1; i = strings.Index(fqdn, ".") { + fqdn = fqdn[i+1:] + if fqdn == "" { + // the . is the default resolver + break + } + + rslv, ok = r.custom[fqdn] + if ok { + return rslv + } + } + + return r.def +} + +// Resolve resolves a DNS multiaddr. +func (r *Resolver) Resolve(ctx context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error) { + var results []ma.Multiaddr + for i := 0; maddr != nil; i++ { + var keep ma.Multiaddr + var err error + + // Find the next dns component. + keep, maddr, err = ma.SplitFunc(maddr, func(c ma.Component) bool { + switch c.Protocol().Code { + case dnsProtocol.Code, dns4Protocol.Code, dns6Protocol.Code, dnsaddrProtocol.Code: + return true + default: + return false + } + }) + + if err != nil { + return nil, err + } + + // Keep everything before the dns component. + if keep != nil { + if len(results) == 0 { + results = []ma.Multiaddr{keep} + } else { + for i, r := range results { + results[i] = r.Encapsulate(keep) + } + } + } + + // If the rest is empty, we've hit the end (there _was_ no dns component). + if maddr == nil { + break + } + + // split off the dns component. + var resolve *ma.Component + resolve, maddr, err = ma.SplitFirst(maddr) + if err != nil { + return nil, err + } + + proto := resolve.Protocol() + value := resolve.Value() + rslv := r.getResolver(value) + + // resolve the dns component + var resolved []ma.Multiaddr + switch proto.Code { + case dns4Protocol.Code, dns6Protocol.Code, dnsProtocol.Code: + // The dns, dns4, and dns6 resolver simply resolves each + // dns* component into an ipv4/ipv6 address. + + v4only := proto.Code == dns4Protocol.Code + v6only := proto.Code == dns6Protocol.Code + + // XXX: Unfortunately, go does a pretty terrible job of + // differentiating between IPv6 and IPv4. A v4-in-v6 + // AAAA record will _look_ like an A record to us and + // there's nothing we can do about that. + records, err := rslv.LookupIPAddr(ctx, value) + if err != nil { + return nil, err + } + + // Convert each DNS record into a multiaddr. If the + // protocol is dns4, throw away any IPv6 addresses. If + // the protocol is dns6, throw away any IPv4 addresses. + + for _, r := range records { + var ( + rmaddr ma.Multiaddr + err error + ) + ip4 := r.IP.To4() + if ip4 == nil { + if v4only { + continue + } + rmaddr, err = ma.NewMultiaddr("/ip6/" + r.IP.String()) + } else { + if v6only { + continue + } + rmaddr, err = ma.NewMultiaddr("/ip4/" + ip4.String()) + } + if err != nil { + return nil, err + } + resolved = append(resolved, rmaddr) + } + case dnsaddrProtocol.Code: + // The dnsaddr resolver is a bit more complicated. We: + // + // 1. Lookup the dnsaddr txt record on _dnsaddr.DOMAIN.TLD + // 2. Take everything _after_ the `/dnsaddr/DOMAIN.TLD` + // part of the multiaddr. + // 3. Find the dnsaddr records (if any) with suffixes + // matching the result of step 2. + + // First, lookup the TXT record + records, err := rslv.LookupTXT(ctx, "_dnsaddr."+value) + if err != nil { + return nil, err + } + + // Then, calculate the length of the suffix we're + // looking for. + length := 0 + if maddr != nil { + length = addrLen(maddr) + } + + for _, r := range records { + // Ignore non dnsaddr TXT records. + if !strings.HasPrefix(r, dnsaddrTXTPrefix) { + continue + } + + // Extract and decode the multiaddr. + rmaddr, err := ma.NewMultiaddr(r[len(dnsaddrTXTPrefix):]) + if err != nil { + // discard multiaddrs we don't understand. + // XXX: Is this right? It's the best we + // can do for now, really. + continue + } + + // If we have a suffix to match on. + if maddr != nil { + // Make sure the new address is at least + // as long as the suffix we're looking + // for. + rmlen := addrLen(rmaddr) + if rmlen < length { + // not long enough. + continue + } + + // Matches everything after the /dnsaddr/... with the end of the + // dnsaddr record: + // + // v----------rmlen-----------------v + // /ip4/1.2.3.4/tcp/1234/p2p/QmFoobar + // /p2p/QmFoobar + // ^--(rmlen - length)--^---length--^ + if !maddr.Equal(offset(rmaddr, rmlen-length)) { + continue + } + } + + resolved = append(resolved, rmaddr) + } + + // consumes the rest of the multiaddr as part of the "match" process. + maddr = nil + default: + panic("unreachable") + } + + if len(resolved) == 0 { + return nil, nil + } else if len(results) == 0 { + results = resolved + } else { + // We take the cross product here as we don't have any + // better way to represent "ORs" in multiaddrs. For + // example, `/dns/foo.com/p2p-circuit/dns/bar.com` could + // resolve to: + // + // * /ip4/1.1.1.1/p2p-circuit/ip4/2.1.1.1 + // * /ip4/1.1.1.1/p2p-circuit/ip4/2.1.1.2 + // * /ip4/1.1.1.2/p2p-circuit/ip4/2.1.1.1 + // * /ip4/1.1.1.2/p2p-circuit/ip4/2.1.1.2 + results = cross(results, resolved) + } + } + + return results, nil +} + +func (r *Resolver) LookupIPAddr(ctx context.Context, domain string) ([]net.IPAddr, error) { + return r.getResolver(domain).LookupIPAddr(ctx, domain) +} + +func (r *Resolver) LookupTXT(ctx context.Context, txt string) ([]string, error) { + return r.getResolver(txt).LookupTXT(ctx, txt) +} diff --git a/go-multiaddr-dns/resolve_test.go b/go-multiaddr-dns/resolve_test.go new file mode 100644 index 0000000..2ffc404 --- /dev/null +++ b/go-multiaddr-dns/resolve_test.go @@ -0,0 +1,335 @@ +package madns + +import ( + "context" + "net" + "testing" + + ma "github.com/multiformats/go-multiaddr" +) + +var ip4a = net.IPAddr{IP: net.ParseIP("192.0.2.1")} +var ip4b = net.IPAddr{IP: net.ParseIP("192.0.2.2")} +var ip6a = net.IPAddr{IP: net.ParseIP("2001:db8::a3")} +var ip6b = net.IPAddr{IP: net.ParseIP("2001:db8::a4")} + +var ip4ma, _ = ma.StringCast("/ip4/" + ip4a.IP.String()) +var ip4mb, _ = ma.StringCast("/ip4/" + ip4b.IP.String()) +var ip6ma, _ = ma.StringCast("/ip6/" + ip6a.IP.String()) +var ip6mb, _ = ma.StringCast("/ip6/" + ip6b.IP.String()) + +var textc, _ = ma.StringCast("/tcp/123/http") +var textd, _ = ma.StringCast("/tcp/123") +var texte, _ = ma.StringCast("/tcp/789/http") + +var txtmc = ma.Join(ip4ma, textc) +var txtmd = ma.Join(ip4ma, textd) +var txtme = ma.Join(ip4ma, texte) + +var txta = "dnsaddr=" + ip4ma.String() +var txtb = "dnsaddr=" + ip6ma.String() +var txtc = "dnsaddr=" + txtmc.String() +var txtd = "dnsaddr=" + txtmd.String() +var txte = "dnsaddr=" + txtme.String() + +func makeResolver() *Resolver { + mock := &MockResolver{ + IP: map[string][]net.IPAddr{ + "example.com": {ip4a, ip4b, ip6a, ip6b}, + }, + TXT: map[string][]string{ + "_dnsaddr.example.com": {txta, txtb}, + "_dnsaddr.matching.com": {txtc, txtd, txte, "not a dnsaddr", "dnsaddr=/foobar"}, + }, + } + resolver := &Resolver{def: mock} + return resolver +} + +func TestMatches(t *testing.T) { + s, _ := ma.StringCast("/tcp/1234/dns6/example.com") + if !Matches(s) { + // Pretend this is a p2p-circuit address. Unfortunately, we'd + // need to depend on the circuit package to parse it. + t.Fatalf("expected match, didn't: /tcp/1234/dns6/example.com") + } + s, _ = ma.StringCast("/dns/example.com") + if !Matches(s) { + t.Fatalf("expected match, didn't: /dns/example.com") + } + s, _ = ma.StringCast("/dns4/example.com") + if !Matches(s) { + t.Fatalf("expected match, didn't: /dns4/example.com") + } + s, _ = ma.StringCast("/dns6/example.com") + if !Matches(s) { + t.Fatalf("expected match, didn't: /dns6/example.com") + } + s, _ = ma.StringCast("/dnsaddr/example.com") + if !Matches(s) { + t.Fatalf("expected match, didn't: /dnsaddr/example.com") + } + if Matches(ip4ma) { + t.Fatalf("expected no-match, but did: %s", ip4ma.String()) + } +} + +func TestSimpleIPResolve(t *testing.T) { + ctx := context.Background() + resolver := makeResolver() + + s, _ := ma.StringCast("/dns4/example.com") + addrs4, err := resolver.Resolve(ctx, s) + if err != nil { + t.Error(err) + } + if len(addrs4) != 2 || !addrs4[0].Equal(ip4ma) || addrs4[0].Equal(ip4mb) { + t.Fatalf("expected [%s %s], got %+v", ip4ma, ip4mb, addrs4) + } + + s, _ = ma.StringCast("/dns6/example.com") + addrs6, err := resolver.Resolve(ctx, s) + if err != nil { + t.Error(err) + } + if len(addrs6) != 2 || !addrs6[0].Equal(ip6ma) || addrs6[0].Equal(ip6mb) { + t.Fatalf("expected [%s %s], got %+v", ip6ma, ip6mb, addrs6) + } + + s, _ = ma.StringCast("/dns/example.com") + addrs, err := resolver.Resolve(ctx, s) + if err != nil { + t.Error(err) + } + for i, expected := range []ma.Multiaddr{ip4ma, ip4mb, ip6ma, ip6mb} { + if !expected.Equal(addrs[i]) { + t.Fatalf("%d: expected %s, got %s", i, expected, addrs[i]) + } + } +} + +func TestResolveMultiple(t *testing.T) { + ctx := context.Background() + resolver := makeResolver() + + s, _ := ma.StringCast("/dns4/example.com/quic/dns6/example.com") + addrs, err := resolver.Resolve(ctx, s) + if err != nil { + t.Error(err) + } + for i, x := range []ma.Multiaddr{ip4ma, ip4mb} { + for j, y := range []ma.Multiaddr{ip6ma, ip6mb} { + s, _ = ma.StringCast("/quic") + expected := ma.Join(x, s, y) + actual := addrs[i*2+j] + if !expected.Equal(actual) { + t.Fatalf("expected %s, got %s", expected, actual) + } + } + } +} + +func TestResolveMultipleAdjacent(t *testing.T) { + ctx := context.Background() + resolver := makeResolver() + s, _ := ma.StringCast("/dns4/example.com/dns6/example.com") + addrs, err := resolver.Resolve(ctx, s) + if err != nil { + t.Error(err) + } + for i, x := range []ma.Multiaddr{ip4ma, ip4mb} { + for j, y := range []ma.Multiaddr{ip6ma, ip6mb} { + expected := ma.Join(x, y) + actual := addrs[i*2+j] + if !expected.Equal(actual) { + t.Fatalf("expected %s, got %s", expected, actual) + } + } + } +} + +func TestResolveMultipleSandwitch(t *testing.T) { + ctx := context.Background() + resolver := makeResolver() + s, _ := ma.StringCast("/quic/dns4/example.com/dns6/example.com/http") + addrs, err := resolver.Resolve(ctx, s) + if err != nil { + t.Error(err) + } + for i, x := range []ma.Multiaddr{ip4ma, ip4mb} { + for j, y := range []ma.Multiaddr{ip6ma, ip6mb} { + q, _ := ma.StringCast("/quic") + s, _ := ma.StringCast("/http") + expected := ma.Join(q, x, y, s) + actual := addrs[i*2+j] + if !expected.Equal(actual) { + t.Fatalf("expected %s, got %s", expected, actual) + } + } + } +} + +func TestSimpleTXTResolve(t *testing.T) { + ctx := context.Background() + resolver := makeResolver() + s, _ := ma.StringCast("/dnsaddr/example.com") + addrs, err := resolver.Resolve(ctx, s) + if err != nil { + t.Error(err) + } + if len(addrs) != 2 || !addrs[0].Equal(ip4ma) || addrs[0].Equal(ip6ma) { + t.Fatalf("expected [%s %s], got %+v", ip4ma, ip6ma, addrs) + } +} + +func TestNonResolvable(t *testing.T) { + ctx := context.Background() + resolver := makeResolver() + + addrs, err := resolver.Resolve(ctx, ip4ma) + if err != nil { + t.Error(err) + } + if len(addrs) != 1 || !addrs[0].Equal(ip4ma) { + t.Fatalf("expected [%s], got %+v", ip4ma, addrs) + } +} + +func TestLongMatch(t *testing.T) { + ctx := context.Background() + resolver := makeResolver() + s, _ := ma.StringCast("/dnsaddr/example.com/quic/quic/quic/quic") + res, err := resolver.Resolve(ctx, s) + if err != nil { + t.Error(err) + } + if len(res) != 0 { + t.Error("expected no results") + } +} + +func TestEmptyResult(t *testing.T) { + ctx := context.Background() + resolver := makeResolver() + s, _ := ma.StringCast("/dnsaddr/none.com") + addrs, err := resolver.Resolve(ctx, s) + if err != nil { + t.Error(err) + } + if len(addrs) > 0 { + t.Fatalf("expected [], got %+v", addrs) + } +} + +func TestDnsaddrMatching(t *testing.T) { + ctx := context.Background() + resolver := makeResolver() + s, _ := ma.StringCast("/dnsaddr/matching.com/tcp/123/http") + addrs, err := resolver.Resolve(ctx, s) + if err != nil { + t.Error(err) + } + if len(addrs) != 1 || !addrs[0].Equal(txtmc) { + t.Fatalf("expected [%s], got %+v", txtmc, addrs) + } + s, _ = ma.StringCast("/dnsaddr/matching.com/tcp/123") + addrs, err = resolver.Resolve(ctx, s) + if err != nil { + t.Error(err) + } + if len(addrs) != 1 || !addrs[0].Equal(txtmd) { + t.Fatalf("expected [%s], got %+v", txtmd, addrs) + } +} + +func TestBadDomain(t *testing.T) { + s, _ := ma.StringCast("/dns4/example.com") + bts := s.Bytes() + bts[len(bts)-5] = '/' + _, err := ma.NewMultiaddrBytes(bts) + if err == nil { + t.Error("expected malformed address to fail to parse") + } +} + +func TestCustomResolver(t *testing.T) { + ip1 := net.IPAddr{IP: net.ParseIP("1.2.3.4")} + ip2 := net.IPAddr{IP: net.ParseIP("2.3.4.5")} + ip3 := net.IPAddr{IP: net.ParseIP("3.4.5.6")} + ip4 := net.IPAddr{IP: net.ParseIP("4.5.6.8")} + ip5 := net.IPAddr{IP: net.ParseIP("5.6.8.9")} + ip6 := net.IPAddr{IP: net.ParseIP("6.8.9.10")} + def := &MockResolver{ + IP: map[string][]net.IPAddr{ + "example.com": {ip1}, + }, + } + custom1 := &MockResolver{ + IP: map[string][]net.IPAddr{ + "custom.test": {ip2}, + "another.custom.test": {ip3}, + "more.custom.test": {ip6}, + }, + } + custom2 := &MockResolver{ + IP: map[string][]net.IPAddr{ + "more.custom.test": {ip4}, + "some.more.custom.test": {ip5}, + }, + } + + rslv, err := NewResolver( + WithDefaultResolver(def), + WithDomainResolver("custom.test", custom1), + WithDomainResolver("more.custom.test", custom2), + ) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + res, err := rslv.LookupIPAddr(ctx, "example.com") + if err != nil { + t.Fatal(err) + } + + if len(res) != 1 || !res[0].IP.Equal(ip1.IP) { + t.Fatal("expected result to be ip1") + } + + res, err = rslv.LookupIPAddr(ctx, "custom.test") + if err != nil { + t.Fatal(err) + } + + if len(res) != 1 || !res[0].IP.Equal(ip2.IP) { + t.Fatal("expected result to be ip2") + } + + res, err = rslv.LookupIPAddr(ctx, "another.custom.test") + if err != nil { + t.Fatal(err) + } + + if len(res) != 1 || !res[0].IP.Equal(ip3.IP) { + t.Fatal("expected result to be ip3") + } + + res, err = rslv.LookupIPAddr(ctx, "more.custom.test") + if err != nil { + t.Fatal(err) + } + + if len(res) != 1 || !res[0].IP.Equal(ip4.IP) { + t.Fatal("expected result to be ip4") + } + + res, err = rslv.LookupIPAddr(ctx, "some.more.custom.test") + if err != nil { + t.Fatal(err) + } + + if len(res) != 1 || !res[0].IP.Equal(ip5.IP) { + t.Fatal("expected result to be ip5") + } +} diff --git a/go-multiaddr-dns/util.go b/go-multiaddr-dns/util.go new file mode 100644 index 0000000..c458de0 --- /dev/null +++ b/go-multiaddr-dns/util.go @@ -0,0 +1,67 @@ +package madns + +import ( + "context" + + ma "github.com/multiformats/go-multiaddr" +) + +func Matches(maddr ma.Multiaddr) (matches bool) { + ma.ForEach(maddr, func(c ma.Component, e error) bool { + if e != nil { + return false + } + switch c.Protocol().Code { + case dnsProtocol.Code, dns4Protocol.Code, dns6Protocol.Code, dnsaddrProtocol.Code: + matches = true + } + return !matches + }) + return matches +} + +func Resolve(ctx context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error) { + return DefaultResolver.Resolve(ctx, maddr) +} + +// counts the number of components in the multiaddr +func addrLen(maddr ma.Multiaddr) int { + length := 0 + ma.ForEach(maddr, func(_ ma.Component, e error) bool { + if e != nil { + length = 0 + return false + } + length++ + return true + }) + return length +} + +// trims `offset` components from the beginning of the multiaddr. +func offset(maddr ma.Multiaddr, offset int) ma.Multiaddr { + _, after, err := ma.SplitFunc(maddr, func(c ma.Component) bool { + if offset == 0 { + return true + } + offset-- + return false + }) + if err != nil { + return nil + } + return after +} + +// takes the cross product of two sets of multiaddrs +// +// assumes `a` is non-empty. +func cross(a, b []ma.Multiaddr) []ma.Multiaddr { + res := make([]ma.Multiaddr, 0, len(a)*len(b)) + for _, x := range a { + for _, y := range b { + res = append(res, x.Encapsulate(y)) + } + } + return res +} diff --git a/go-multiaddr-dns/version.json b/go-multiaddr-dns/version.json new file mode 100644 index 0000000..9ed87d7 --- /dev/null +++ b/go-multiaddr-dns/version.json @@ -0,0 +1,3 @@ +{ + "version": "v0.3.1" +} diff --git a/go-multiaddr/.gitignore b/go-multiaddr/.gitignore new file mode 100644 index 0000000..699d271 --- /dev/null +++ b/go-multiaddr/.gitignore @@ -0,0 +1,3 @@ +.vscode/ +multiaddr/multiaddr +tmp/ diff --git a/go-multiaddr/LICENSE b/go-multiaddr/LICENSE new file mode 100644 index 0000000..c7386b3 --- /dev/null +++ b/go-multiaddr/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/go-multiaddr/README.md b/go-multiaddr/README.md new file mode 100644 index 0000000..df2766a --- /dev/null +++ b/go-multiaddr/README.md @@ -0,0 +1,117 @@ +# go-multiaddr + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-multiformats-blue.svg?style=flat-square)](https://github.com/multiformats/multiformats) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23ipfs) +[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/multiformats/go-multiaddr?status.svg)](https://godoc.org/github.com/multiformats/go-multiaddr) +[![Travis CI](https://img.shields.io/travis/multiformats/go-multiaddr.svg?style=flat-square&branch=master)](https://travis-ci.org/multiformats/go-multiaddr) +[![codecov.io](https://img.shields.io/codecov/c/github/multiformats/go-multiaddr.svg?style=flat-square&branch=master)](https://codecov.io/github/multiformats/go-multiaddr?branch=master) + +> [multiaddr](https://github.com/multiformats/multiaddr) implementation in go + +Multiaddr is a standard way to represent addresses that: + +- Support any standard network protocols. +- Self-describe (include protocols). +- Have a binary packed format. +- Have a nice string representation. +- Encapsulate well. + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) + - [Example](#example) + - [Simple](#simple) + - [Protocols](#protocols) + - [En/decapsulate](#endecapsulate) + - [Tunneling](#tunneling) +- [Maintainers](#maintainers) +- [Contribute](#contribute) +- [License](#license) + +## Install + +```sh +go get github.com/multiformats/go-multiaddr +``` + +## Usage + +### Example + +#### Simple + +```go +import ma "github.com/multiformats/go-multiaddr" + +// construct from a string (err signals parse failure) +m1, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234") + +// construct from bytes (err signals parse failure) +m2, err := ma.NewMultiaddrBytes(m1.Bytes()) + +// true +strings.Equal(m1.String(), "/ip4/127.0.0.1/udp/1234") +strings.Equal(m1.String(), m2.String()) +bytes.Equal(m1.Bytes(), m2.Bytes()) +m1.Equal(m2) +m2.Equal(m1) +``` + +#### Protocols + +```go +// get the multiaddr protocol description objects +m1.Protocols() +// []Protocol{ +// Protocol{ Code: 4, Name: 'ip4', Size: 32}, +// Protocol{ Code: 17, Name: 'udp', Size: 16}, +// } +``` + +#### En/decapsulate + +```go +import ma "github.com/multiformats/go-multiaddr" + +m, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234") +// + +sctpMA, err := ma.NewMultiaddr("/sctp/5678") + +m.Encapsulate(sctpMA) +// + +udpMA, err := ma.NewMultiaddr("/udp/1234") + +m.Decapsulate(udpMA) // up to + inc last occurrence of subaddr +// +``` + +#### Tunneling + +Multiaddr allows expressing tunnels very nicely. + +```js +printer, _ := ma.NewMultiaddr("/ip4/192.168.0.13/tcp/80") +proxy, _ := ma.NewMultiaddr("/ip4/10.20.30.40/tcp/443") +printerOverProxy := proxy.Encapsulate(printer) +// /ip4/10.20.30.40/tcp/443/ip4/192.168.0.13/tcp/80 + +proxyAgain := printerOverProxy.Decapsulate(printer) +// /ip4/10.20.30.40/tcp/443 +``` + +## Contribute + +Contributions welcome. Please check out [the issues](https://github.com/multiformats/go-multiaddr/issues). + +Check out our [contributing document](https://github.com/multiformats/multiformats/blob/master/contributing.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to multiformats are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +[MIT](LICENSE) © 2014 Juan Batiz-Benet diff --git a/go-multiaddr/codec.go b/go-multiaddr/codec.go new file mode 100644 index 0000000..f2951c4 --- /dev/null +++ b/go-multiaddr/codec.go @@ -0,0 +1,181 @@ +package multiaddr + +import ( + "bytes" + "fmt" + "strings" + + "github.com/multiformats/go-varint" +) + +func stringToBytes(s string) ([]byte, error) { + // consume trailing slashes + s = strings.TrimRight(s, "/") + + var b bytes.Buffer + sp := strings.Split(s, "/") + + if sp[0] != "" { + return nil, fmt.Errorf("failed to parse multiaddr %q: must begin with /", s) + } + + // consume first empty elem + sp = sp[1:] + + if len(sp) == 0 { + return nil, fmt.Errorf("failed to parse multiaddr %q: empty multiaddr", s) + } + + for len(sp) > 0 { + name := sp[0] + p := ProtocolWithName(name) + if p.Code == 0 { + return nil, fmt.Errorf("failed to parse multiaddr %q: unknown protocol %s", s, sp[0]) + } + _, _ = b.Write(p.VCode) + sp = sp[1:] + + if p.Size == 0 { // no length. + continue + } + + if len(sp) < 1 { + return nil, fmt.Errorf("failed to parse multiaddr %q: unexpected end of multiaddr", s) + } + + if p.Path { + // it's a path protocol (terminal). + // consume the rest of the address as the next component. + sp = []string{"/" + strings.Join(sp, "/")} + } + + a, err := p.Transcoder.StringToBytes(sp[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse multiaddr %q: invalid value %q for protocol %s: %s", s, sp[0], p.Name, err) + } + if p.Size < 0 { // varint size. + _, _ = b.Write(varint.ToUvarint(uint64(len(a)))) + } + b.Write(a) + sp = sp[1:] + } + + return b.Bytes(), nil +} + +func validateBytes(b []byte) (err error) { + if len(b) == 0 { + return fmt.Errorf("empty multiaddr") + } + for len(b) > 0 { + code, n, err := ReadVarintCode(b) + if err != nil { + return err + } + + b = b[n:] + p := ProtocolWithCode(code) + if p.Code == 0 { + return fmt.Errorf("no protocol with code %d", code) + } + + if p.Size == 0 { + continue + } + + n, size, err := sizeForAddr(p, b) + if err != nil { + return err + } + + b = b[n:] + + if len(b) < size || size < 0 { + return fmt.Errorf("invalid value for size %d", len(b)) + } + if p.Path && len(b) != size { + return fmt.Errorf("invalid size of component for path protocol %d: expected %d", size, len(b)) + } + + err = p.Transcoder.ValidateBytes(b[:size]) + if err != nil { + return err + } + + b = b[size:] + } + + return nil +} + +func readComponent(b []byte) (int, Component, error) { + var offset int + code, n, err := ReadVarintCode(b) + if err != nil { + return 0, Component{}, err + } + offset += n + + p := ProtocolWithCode(code) + if p.Code == 0 { + return 0, Component{}, fmt.Errorf("no protocol with code %d", code) + } + + if p.Size == 0 { + return offset, Component{ + bytes: b[:offset], + offset: offset, + protocol: p, + }, nil + } + + n, size, err := sizeForAddr(p, b[offset:]) + if err != nil { + return 0, Component{}, err + } + + offset += n + + if len(b[offset:]) < size || size < 0 { + return 0, Component{}, fmt.Errorf("invalid value for size %d", len(b[offset:])) + } + + return offset + size, Component{ + bytes: b[:offset+size], + protocol: p, + offset: offset, + }, nil +} + +func bytesToString(b []byte) (ret string, err error) { + if len(b) == 0 { + return "", fmt.Errorf("empty multiaddr") + } + var buf strings.Builder + + for len(b) > 0 { + n, c, err := readComponent(b) + if err != nil { + return "", err + } + b = b[n:] + c.writeTo(&buf) + } + + return buf.String(), nil +} + +func sizeForAddr(p Protocol, b []byte) (skip, size int, err error) { + switch { + case p.Size > 0: + return 0, (p.Size / 8), nil + case p.Size == 0: + return 0, 0, nil + default: + size, n, err := ReadVarintCode(b) + if err != nil { + return 0, 0, err + } + return n, size, nil + } +} diff --git a/go-multiaddr/codecov.yml b/go-multiaddr/codecov.yml new file mode 100644 index 0000000..ca8100a --- /dev/null +++ b/go-multiaddr/codecov.yml @@ -0,0 +1,2 @@ +ignore: + - "multiaddr" diff --git a/go-multiaddr/component.go b/go-multiaddr/component.go new file mode 100644 index 0000000..633ba8d --- /dev/null +++ b/go-multiaddr/component.go @@ -0,0 +1,184 @@ +package multiaddr + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "strings" + + "github.com/multiformats/go-varint" +) + +// Component is a single multiaddr Component. +type Component struct { + bytes []byte + protocol Protocol + offset int +} + +func (c *Component) Bytes() []byte { + return c.bytes +} + +func (c *Component) MarshalBinary() ([]byte, error) { + return c.Bytes(), nil +} + +func (c *Component) UnmarshalBinary(data []byte) error { + _, comp, err := readComponent(data) + if err != nil { + return err + } + *c = comp + return nil +} + +func (c *Component) MarshalText() ([]byte, error) { + return []byte(c.String()), nil +} + +func (c *Component) UnmarshalText(data []byte) error { + bytes, err := stringToBytes(string(data)) + if err != nil { + return err + } + _, comp, err := readComponent(bytes) + if err != nil { + return err + } + *c = comp + return nil +} + +func (c *Component) MarshalJSON() ([]byte, error) { + txt, err := c.MarshalText() + if err != nil { + return nil, err + } + + return json.Marshal(string(txt)) +} + +func (m *Component) UnmarshalJSON(data []byte) error { + var v string + if err := json.Unmarshal(data, &v); err != nil { + return err + } + + return m.UnmarshalText([]byte(v)) +} + +func (c *Component) Equal(o Multiaddr) bool { + if o == nil { + return false + } + return bytes.Equal(c.bytes, o.Bytes()) +} + +func (c *Component) Protocols() []Protocol { + return []Protocol{c.protocol} +} + +func (c *Component) Decapsulate(o Multiaddr) Multiaddr { + if c.Equal(o) { + return nil + } + return c +} + +func (c *Component) Encapsulate(o Multiaddr) Multiaddr { + m := &multiaddr{bytes: c.bytes} + return m.Encapsulate(o) +} + +func (c *Component) ValueForProtocol(code int) (string, error) { + if c.protocol.Code != code { + return "", ErrProtocolNotFound + } + return c.Value(), nil +} + +func (c *Component) Protocol() Protocol { + return c.protocol +} + +func (c *Component) RawValue() []byte { + return c.bytes[c.offset:] +} + +func (c *Component) Value() string { + if c.protocol.Transcoder == nil { + return "" + } + value, err := c.protocol.Transcoder.BytesToString(c.bytes[c.offset:]) + if err != nil { + return "" + } + return value +} + +func (c *Component) String() string { + var b strings.Builder + c.writeTo(&b) + return b.String() +} + +// writeTo is an efficient, private function for string-formatting a multiaddr. +// Trust me, we tend to allocate a lot when doing this. +func (c *Component) writeTo(b *strings.Builder) { + b.WriteByte('/') + b.WriteString(c.protocol.Name) + value := c.Value() + if len(value) == 0 { + return + } + if !(c.protocol.Path && value[0] == '/') { + b.WriteByte('/') + } + b.WriteString(value) +} + +// NewComponent constructs a new multiaddr component +func NewComponent(protocol, value string) (*Component, error) { + p := ProtocolWithName(protocol) + if p.Code == 0 { + return nil, fmt.Errorf("unsupported protocol: %s", protocol) + } + if p.Transcoder != nil { + bts, err := p.Transcoder.StringToBytes(value) + if err != nil { + return nil, err + } + return newComponent(p, bts), nil + } else if value != "" { + return nil, fmt.Errorf("protocol %s doesn't take a value", p.Name) + } + return newComponent(p, nil), nil + // TODO: handle path /? +} + +func newComponent(protocol Protocol, bvalue []byte) *Component { + size := len(bvalue) + size += len(protocol.VCode) + if protocol.Size < 0 { + size += varint.UvarintSize(uint64(len(bvalue))) + } + maddr := make([]byte, size) + var offset int + offset += copy(maddr[offset:], protocol.VCode) + if protocol.Size < 0 { + offset += binary.PutUvarint(maddr[offset:], uint64(len(bvalue))) + } + copy(maddr[offset:], bvalue) + + if len(maddr) != offset+len(bvalue) { + return nil + } + + return &Component{ + bytes: maddr, + protocol: protocol, + offset: offset, + } +} diff --git a/go-multiaddr/doc.go b/go-multiaddr/doc.go new file mode 100644 index 0000000..b80f3ab --- /dev/null +++ b/go-multiaddr/doc.go @@ -0,0 +1,35 @@ +/* +Package multiaddr provides an implementation of the Multiaddr network +address format. Multiaddr emphasizes explicitness, self-description, and +portability. It allows applications to treat addresses as opaque tokens, +and to avoid making assumptions about the address representation (e.g. length). +Learn more at https://github.com/multiformats/multiaddr + +Basic Use: + + import ( + "bytes" + "strings" + ma "github.com/multiformats/go-multiaddr" + ) + + // construct from a string (err signals parse failure) + m1, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234") + + // construct from bytes (err signals parse failure) + m2, err := ma.NewMultiaddrBytes(m1.Bytes()) + + // true + strings.Equal(m1.String(), "/ip4/127.0.0.1/udp/1234") + strings.Equal(m1.String(), m2.String()) + bytes.Equal(m1.Bytes(), m2.Bytes()) + m1.Equal(m2) + m2.Equal(m1) + + // tunneling (en/decap) + printer, _ := ma.NewMultiaddr("/ip4/192.168.0.13/tcp/80") + proxy, _ := ma.NewMultiaddr("/ip4/10.20.30.40/tcp/443") + printerOverProxy := proxy.Encapsulate(printer) + proxyAgain := printerOverProxy.Decapsulate(printer) +*/ +package multiaddr diff --git a/go-multiaddr/filter.go b/go-multiaddr/filter.go new file mode 100644 index 0000000..00e6a4b --- /dev/null +++ b/go-multiaddr/filter.go @@ -0,0 +1,153 @@ +package multiaddr + +import ( + "net" + "sync" +) + +// Action is an enum modelling all possible filter actions. +type Action int32 + +const ( + ActionNone Action = iota // zero value. + ActionAccept + ActionDeny +) + +type filterEntry struct { + f net.IPNet + action Action +} + +// Filters is a structure representing a collection of accept/deny +// net.IPNet filters, together with the DefaultAction flag, which +// represents the default filter policy. +// +// Note that the last policy added to the Filters is authoritative. +type Filters struct { + DefaultAction Action + + mu sync.RWMutex + filters []*filterEntry +} + +// NewFilters constructs and returns a new set of net.IPNet filters. +// By default, the new filter accepts all addresses. +func NewFilters() *Filters { + return &Filters{ + DefaultAction: ActionAccept, + filters: make([]*filterEntry, 0), + } +} + +func (fs *Filters) find(ipnet net.IPNet) (int, *filterEntry) { + s := ipnet.String() + for idx, ft := range fs.filters { + if ft.f.String() == s { + return idx, ft + } + } + return -1, nil +} + +// AddFilter adds a rule to the Filters set, enforcing the desired action for +// the provided IPNet mask. +func (fs *Filters) AddFilter(ipnet net.IPNet, action Action) { + fs.mu.Lock() + + if _, f := fs.find(ipnet); f != nil { + f.action = action + } else { + fs.filters = append(fs.filters, &filterEntry{ipnet, action}) + } + + fs.mu.Unlock() +} + +// RemoveLiteral removes the first filter associated with the supplied IPNet, +// returning whether something was removed or not. It makes no distinction +// between whether the rule is an accept or a deny. +func (fs *Filters) RemoveLiteral(ipnet net.IPNet) (removed bool) { + fs.mu.Lock() + + if idx, _ := fs.find(ipnet); idx != -1 { + fs.filters = append(fs.filters[:idx], fs.filters[idx+1:]...) + fs.mu.Unlock() + return true + } + + fs.mu.Unlock() + return false +} + +// AddrBlocked parses a ma.Multiaddr and, if a valid netip is found, it applies the +// Filter set rules, returning true if the given address should be denied, and false if +// the given address is accepted. +// +// If a parsing error occurs, or no filter matches, the Filters' +// default is returned. +// +// TODO: currently, the last filter to match wins always, but it shouldn't be that way. +// +// Instead, the highest-specific last filter should win; that way more specific filters +// override more general ones. +func (fs *Filters) AddrBlocked(a Multiaddr) (deny bool) { + var ( + netip net.IP + found bool + ) + + ForEach(a, func(c Component, e error) bool { + if e != nil { + return false + } + + switch c.Protocol().Code { + case P_IP6ZONE: + return true + case P_IP6, P_IP4: + found = true + netip = net.IP(c.RawValue()) + return false + default: + return false + } + }) + + if !found { + return fs.DefaultAction == ActionDeny + } + + fs.mu.RLock() + + action := fs.DefaultAction + for _, ft := range fs.filters { + if ft.f.Contains(netip) { + action = ft.action + } + } + + fs.mu.RUnlock() + return action == ActionDeny +} + +func (fs *Filters) ActionForFilter(ipnet net.IPNet) (action Action, ok bool) { + if _, f := fs.find(ipnet); f != nil { + return f.action, true + } + return ActionNone, false +} + +// FiltersForAction returns the filters associated with the indicated action. +func (fs *Filters) FiltersForAction(action Action) (result []net.IPNet) { + fs.mu.RLock() + + for _, ff := range fs.filters { + if ff.action == action { + result = append(result, ff.f) + } + } + + fs.mu.RUnlock() + return result +} diff --git a/go-multiaddr/filter_test.go b/go-multiaddr/filter_test.go new file mode 100644 index 0000000..82b974d --- /dev/null +++ b/go-multiaddr/filter_test.go @@ -0,0 +1,202 @@ +package multiaddr + +import ( + "net" + "testing" +) + +func TestFilterListing(t *testing.T) { + f := NewFilters() + expected := map[string]bool{ + "1.2.3.0/24": true, + "4.3.2.1/32": true, + "fd00::/8": true, + "fc00::1/128": true, + } + for cidr := range expected { + _, ipnet, _ := net.ParseCIDR(cidr) + f.AddFilter(*ipnet, ActionDeny) + } + + for _, filter := range f.FiltersForAction(ActionDeny) { + cidr := filter.String() + if expected[cidr] { + delete(expected, cidr) + } else { + t.Errorf("unexected filter %s", cidr) + } + } + for cidr := range expected { + t.Errorf("expected filter %s", cidr) + } +} + +func TestFilterBlocking(t *testing.T) { + f := NewFilters() + + _, ipnet, _ := net.ParseCIDR("0.1.2.3/24") + f.AddFilter(*ipnet, ActionDeny) + filters := f.FiltersForAction(ActionDeny) + if len(filters) != 1 { + t.Fatal("Expected only 1 filter") + } + + if a, ok := f.ActionForFilter(*ipnet); !ok || a != ActionDeny { + t.Fatal("Expected filter with DENY action") + } + + if !f.RemoveLiteral(filters[0]) { + t.Error("expected true value from RemoveLiteral") + } + + for _, cidr := range []string{ + "1.2.3.0/24", + "4.3.2.1/32", + "fd00::/8", + "fc00::1/128", + } { + _, ipnet, _ := net.ParseCIDR(cidr) + f.AddFilter(*ipnet, ActionDeny) + } + + // These addresses should all be blocked + for _, blocked := range []string{ + "/ip4/1.2.3.4/tcp/123", + "/ip4/4.3.2.1/udp/123", + "/ip6/fd00::2/tcp/321", + "/ip6/fc00::1/udp/321", + } { + maddr, err := NewMultiaddr(blocked) + if err != nil { + t.Error(err) + } + if !f.AddrBlocked(maddr) { + t.Fatalf("expected %s to be blocked", blocked) + } + } + + // test that other net intervals are not blocked + for _, addr := range []string{ + "/ip4/1.2.4.1/tcp/123", + "/ip4/4.3.2.2/udp/123", + "/ip6/fe00::1/tcp/321", + "/ip6/fc00::2/udp/321", + } { + maddr, err := NewMultiaddr(addr) + if err != nil { + t.Error(err) + } + if f.AddrBlocked(maddr) { + t.Fatalf("expected %s to not be blocked", addr) + } + } +} + +func TestFilterWhitelisting(t *testing.T) { + f := NewFilters() + + // Add default reject filter + f.DefaultAction = ActionDeny + + // Add a whitelist + _, ipnet, _ := net.ParseCIDR("1.2.3.0/24") + f.AddFilter(*ipnet, ActionAccept) + + if a, ok := f.ActionForFilter(*ipnet); !ok || a != ActionAccept { + t.Fatal("Expected filter with ACCEPT action") + } + + // That /24 should now be allowed + for _, addr := range []string{ + "/ip4/1.2.3.1/tcp/123", + "/ip4/1.2.3.254/tcp/123", + "/ip4/1.2.3.254/udp/321", + } { + maddr, err := NewMultiaddr(addr) + if err != nil { + t.Error(err) + } + if f.AddrBlocked(maddr) { + t.Fatalf("expected %s to be whitelisted", addr) + } + } + + // No policy matches these maddrs, they should be blocked by default + for _, blocked := range []string{ + "/ip4/1.2.4.4/tcp/123", + "/ip4/4.3.2.1/udp/123", + "/ip6/fd00::2/tcp/321", + "/ip6/fc00::1/udp/321", + } { + maddr, err := NewMultiaddr(blocked) + if err != nil { + t.Error(err) + } + if !f.AddrBlocked(maddr) { + t.Fatalf("expected %s to be blocked", blocked) + } + } +} + +func TestFiltersRemoveRules(t *testing.T) { + f := NewFilters() + + ips := []string{ + "/ip4/1.2.3.1/tcp/123", + "/ip4/1.2.3.254/tcp/123", + } + + // Add default reject filter + f.DefaultAction = ActionDeny + + // Add whitelisting + _, ipnet, _ := net.ParseCIDR("1.2.3.0/24") + f.AddFilter(*ipnet, ActionAccept) + + if a, ok := f.ActionForFilter(*ipnet); !ok || a != ActionAccept { + t.Fatal("Expected filter with ACCEPT action") + } + + // these are all whitelisted, should be OK + for _, addr := range ips { + maddr, err := NewMultiaddr(addr) + if err != nil { + t.Error(err) + } + if f.AddrBlocked(maddr) { + t.Fatalf("expected %s to be whitelisted", addr) + } + } + + // Test removing the filter. It'll remove multiple, so make a dupe & + // a complement + f.AddFilter(*ipnet, ActionDeny) + + // Show that they all apply, these are now blacklisted & should fail + for _, addr := range ips { + maddr, err := NewMultiaddr(addr) + if err != nil { + t.Error(err) + } + if !f.AddrBlocked(maddr) { + t.Fatalf("expected %s to be blacklisted", addr) + } + } + + // remove those rules + if !f.RemoveLiteral(*ipnet) { + t.Error("expected true value from RemoveLiteral") + } + + // our default is reject, so the 1.2.3.0/24 should be rejected now, + // along with everything else + for _, addr := range ips { + maddr, err := NewMultiaddr(addr) + if err != nil { + t.Error(err) + } + if !f.AddrBlocked(maddr) { + t.Fatalf("expected %s to be blocked", addr) + } + } +} diff --git a/go-multiaddr/go.mod b/go-multiaddr/go.mod new file mode 100644 index 0000000..b40db43 --- /dev/null +++ b/go-multiaddr/go.mod @@ -0,0 +1,27 @@ +module github.com/multiformats/go-multiaddr + +go 1.21 + +require ( + github.com/ipfs/go-cid v0.0.7 + github.com/multiformats/go-multibase v0.2.0 + github.com/multiformats/go-multihash v0.2.3 + github.com/multiformats/go-varint v0.0.7 + github.com/stretchr/testify v1.7.0 + golang.org/x/exp v0.0.0-20230725012225-302865e7556b +) + +require ( + github.com/davecgh/go-spew v1.1.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + golang.org/x/crypto v0.18.0 // indirect + golang.org/x/sys v0.16.0 // indirect + gopkg.in/yaml.v3 v3.0.0 // indirect + lukechampine.com/blake3 v1.2.1 // indirect +) diff --git a/go-multiaddr/go.sum b/go-multiaddr/go.sum new file mode 100644 index 0000000..076449b --- /dev/null +++ b/go-multiaddr/go.sum @@ -0,0 +1,56 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/exp v0.0.0-20230725012225-302865e7556b h1:tK7yjGqVRzYdXsBcfD2MLhFAhHfDgGLm2rY1ub7FA9k= +golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= +lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= diff --git a/go-multiaddr/interface.go b/go-multiaddr/interface.go new file mode 100644 index 0000000..b7c3efd --- /dev/null +++ b/go-multiaddr/interface.go @@ -0,0 +1,61 @@ +package multiaddr + +import ( + "encoding" + "encoding/json" +) + +/* +Multiaddr is a cross-protocol, cross-platform format for representing +internet addresses. It emphasizes explicitness and self-description. +Learn more here: https://github.com/multiformats/multiaddr + +Multiaddrs have both a binary and string representation. + + import ma "github.com/multiformats/go-multiaddr" + + addr, err := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/80") + // err non-nil when parsing failed. +*/ +type Multiaddr interface { + json.Marshaler + json.Unmarshaler + encoding.TextMarshaler + encoding.TextUnmarshaler + encoding.BinaryMarshaler + encoding.BinaryUnmarshaler + + // Equal returns whether two Multiaddrs are exactly equal + Equal(Multiaddr) bool + + // Bytes returns the []byte representation of this Multiaddr + // + // This function may expose immutable, internal state. Do not modify. + Bytes() []byte + + // String returns the string representation of this Multiaddr + String() string + + // Protocols returns the list of Protocols this Multiaddr includes + Protocols() []Protocol + + // Encapsulate wraps this Multiaddr around another. For example: + // + // /ip4/1.2.3.4 encapsulate /tcp/80 = /ip4/1.2.3.4/tcp/80 + // + Encapsulate(Multiaddr) Multiaddr + + // Decapsulate removes a Multiaddr wrapping. For example: + // + // /ip4/1.2.3.4/tcp/80 decapsulate /tcp/80 = /ip4/1.2.3.4 + // /ip4/1.2.3.4/tcp/80 decapsulate /udp/80 = /ip4/1.2.3.4/tcp/80 + // /ip4/1.2.3.4/tcp/80 decapsulate /ip4/1.2.3.4 = nil + // + Decapsulate(Multiaddr) Multiaddr + + // ValueForProtocol returns the value (if any) following the specified protocol + // + // Note: protocols can appear multiple times in a single multiaddr. + // Consider using `ForEach` to walk over the addr manually. + ValueForProtocol(code int) (string, error) +} diff --git a/go-multiaddr/multiaddr.go b/go-multiaddr/multiaddr.go new file mode 100644 index 0000000..5df340d --- /dev/null +++ b/go-multiaddr/multiaddr.go @@ -0,0 +1,258 @@ +package multiaddr + +import ( + "bytes" + "encoding/json" + "fmt" + + "golang.org/x/exp/slices" +) + +// multiaddr is the data structure representing a Multiaddr +type multiaddr struct { + bytes []byte +} + +// NewMultiaddr parses and validates an input string, returning a *Multiaddr +func NewMultiaddr(s string) (a Multiaddr, err error) { + b, err := stringToBytes(s) + if err != nil { + return nil, err + } + return &multiaddr{bytes: b}, nil +} + +// NewMultiaddrBytes initializes a Multiaddr from a byte representation. +// It validates it as an input string. +func NewMultiaddrBytes(b []byte) (a Multiaddr, err error) { + if err := validateBytes(b); err != nil { + return nil, err + } + + return &multiaddr{bytes: b}, nil +} + +// Equal tests whether two multiaddrs are equal +func (m *multiaddr) Equal(m2 Multiaddr) bool { + if m2 == nil { + return false + } + return bytes.Equal(m.bytes, m2.Bytes()) +} + +// Bytes returns the []byte representation of this Multiaddr +// +// Do not modify the returned buffer, it may be shared. +func (m *multiaddr) Bytes() []byte { + return m.bytes +} + +// String returns the string representation of a Multiaddr +func (m *multiaddr) String() string { + s, err := bytesToString(m.bytes) + if err != nil { + return "" + } + return s +} + +func (m *multiaddr) MarshalBinary() ([]byte, error) { + return m.Bytes(), nil +} + +func (m *multiaddr) UnmarshalBinary(data []byte) error { + new, err := NewMultiaddrBytes(data) + if err != nil { + return err + } + *m = *(new.(*multiaddr)) + return nil +} + +func (m *multiaddr) MarshalText() ([]byte, error) { + s, err := bytesToString(m.bytes) + if err != nil { + return nil, err + } + + return []byte(s), nil +} + +func (m *multiaddr) UnmarshalText(data []byte) error { + new, err := NewMultiaddr(string(data)) + if err != nil { + return err + } + *m = *(new.(*multiaddr)) + return nil +} + +func (m *multiaddr) MarshalJSON() ([]byte, error) { + s, err := bytesToString(m.bytes) + if err != nil { + return nil, err + } + + return json.Marshal(s) +} + +func (m *multiaddr) UnmarshalJSON(data []byte) error { + var v string + if err := json.Unmarshal(data, &v); err != nil { + return err + } + new, err := NewMultiaddr(v) + *m = *(new.(*multiaddr)) + return err +} + +// Protocols returns the list of protocols this Multiaddr has. +func (m *multiaddr) Protocols() []Protocol { + ps := make([]Protocol, 0, 8) + b := m.bytes + for len(b) > 0 { + code, n, err := ReadVarintCode(b) + if err != nil { + return []Protocol{} + } + + p := ProtocolWithCode(code) + if p.Code == 0 { + return []Protocol{} + } + ps = append(ps, p) + b = b[n:] + + n, size, err := sizeForAddr(p, b) + if err != nil { + return []Protocol{} + } + + b = b[n+size:] + } + return ps +} + +// Encapsulate wraps a given Multiaddr, returning the resulting joined Multiaddr +func (m *multiaddr) Encapsulate(o Multiaddr) Multiaddr { + if o == nil { + return m + } + + mb := m.bytes + ob := o.Bytes() + + b := make([]byte, len(mb)+len(ob)) + copy(b, mb) + copy(b[len(mb):], ob) + return &multiaddr{bytes: b} +} + +// Decapsulate unwraps Multiaddr up until the given Multiaddr is found. +func (m *multiaddr) Decapsulate(right Multiaddr) Multiaddr { + if right == nil { + return m + } + + leftParts := Split(m) + rightParts := Split(right) + + lastIndex := -1 + for i := range leftParts { + foundMatch := false + for j, rightC := range rightParts { + if len(leftParts) <= i+j { + foundMatch = false + break + } + + foundMatch = rightC.Equal(leftParts[i+j]) + if !foundMatch { + break + } + } + + if foundMatch { + lastIndex = i + } + } + + if lastIndex == 0 { + return nil + } + + if lastIndex < 0 { + // if multiaddr not contained, returns a copy. + cpy := make([]byte, len(m.bytes)) + copy(cpy, m.bytes) + return &multiaddr{bytes: cpy} + } + + return Join(leftParts[:lastIndex]...) +} + +var ErrProtocolNotFound = fmt.Errorf("protocol not found in multiaddr") + +func (m *multiaddr) ValueForProtocol(code int) (value string, err error) { + err = ErrProtocolNotFound + ForEach(m, func(c Component, e error) bool { + if e != nil { + err = e + return false + } + if c.Protocol().Code == code { + value = c.Value() + err = nil + return false + } + return true + }) + return +} + +// FilterAddrs is a filter that removes certain addresses, according to the given filters. +// If all filters return true, the address is kept. +func FilterAddrs(a []Multiaddr, filters ...func(Multiaddr) bool) []Multiaddr { + b := make([]Multiaddr, 0, len(a)) +addrloop: + for _, addr := range a { + for _, filter := range filters { + if !filter(addr) { + continue addrloop + } + } + b = append(b, addr) + } + return b +} + +// Contains reports whether addr is contained in addrs. +func Contains(addrs []Multiaddr, addr Multiaddr) bool { + for _, a := range addrs { + if addr.Equal(a) { + return true + } + } + return false +} + +// Unique deduplicates addresses in place, leave only unique addresses. +// It doesn't allocate. +func Unique(addrs []Multiaddr) []Multiaddr { + if len(addrs) == 0 { + return addrs + } + // Use the new slices package here, as the sort function doesn't allocate (sort.Slice does). + slices.SortFunc(addrs, func(a, b Multiaddr) int { return bytes.Compare(a.Bytes(), b.Bytes()) }) + idx := 1 + for i := 1; i < len(addrs); i++ { + if !addrs[i-1].Equal(addrs[i]) { + addrs[idx] = addrs[i] + idx++ + } + } + for i := idx; i < len(addrs); i++ { + addrs[i] = nil + } + return addrs[:idx] +} diff --git a/go-multiaddr/multiaddr/main.go b/go-multiaddr/multiaddr/main.go new file mode 100644 index 0000000..c4fa28d --- /dev/null +++ b/go-multiaddr/multiaddr/main.go @@ -0,0 +1,100 @@ +package main + +import ( + "encoding/hex" + "flag" + "fmt" + "os" + "strings" + + maddr "github.com/multiformats/go-multiaddr" +) + +var ( + flagHelp bool +) + +func main() { + flag.Usage = func() { + usage := `usage: %s [options] ADDR + +Print details about the given multiaddr. + +Options: +` + fmt.Fprintf(os.Stderr, usage, os.Args[0]) + flag.PrintDefaults() + } + + flag.BoolVar(&flagHelp, "h", false, "display help message") + flag.Parse() + + if flagHelp || len(flag.Args()) == 0 { + flag.Usage() + os.Exit(0) + } + + addrStr := flag.Args()[0] + var addr maddr.Multiaddr + var merr error + if strings.HasPrefix(addrStr, "0x") { + addrBytes, err := hex.DecodeString(addrStr[2:]) + if err != nil { + fmt.Fprintf(os.Stderr, "parse error: %s\n", err) + os.Exit(1) + } + addr, merr = maddr.NewMultiaddrBytes(addrBytes) + } else { + addr, merr = maddr.NewMultiaddr(addrStr) + } + if merr != nil { + fmt.Fprintf(os.Stderr, "parse error: %s\n", merr) + os.Exit(1) + } + + infoCommand(addr) +} + +func infoCommand(addr maddr.Multiaddr) { + var compsJson []string + maddr.ForEach(addr, func(comp maddr.Component, e error) bool { + if e != nil { + panic(e) + } + lengthPrefix := "" + if comp.Protocol().Size == maddr.LengthPrefixedVarSize { + v, err := maddr.CodeToVarint(len(comp.RawValue())) + if err != nil { + panic(err) + } + lengthPrefix = "0x" + hex.EncodeToString(v) + } + + compsJson = append(compsJson, `{`+ + fmt.Sprintf(`"string": "%s", `, comp.String())+ + fmt.Sprintf(`"stringSize": "%d", `, len(comp.String()))+ + fmt.Sprintf(`"packed": "0x%x", `, comp.Bytes())+ + fmt.Sprintf(`"packedSize": "%d", `, len(comp.Bytes()))+ + fmt.Sprintf(`"value": %#v, `, comp.Value())+ + fmt.Sprintf(`"rawValue": "0x%x", `, comp.RawValue())+ + fmt.Sprintf(`"valueSize": "%d", `, len(comp.RawValue()))+ + fmt.Sprintf(`"protocol": "%s", `, comp.Protocol().Name)+ + fmt.Sprintf(`"codec": "%d", `, comp.Protocol().Code)+ + fmt.Sprintf(`"uvarint": "0x%x", `, comp.Protocol().VCode)+ + fmt.Sprintf(`"lengthPrefix": "%s"`, lengthPrefix)+ + `}`) + return true + }) + + addrJson := `{ + "string": "%[1]s", + "stringSize": "%[2]d", + "packed": "0x%[3]x", + "packedSize": "%[4]d", + "components": [ + %[5]s + ] +}` + fmt.Fprintf(os.Stdout, addrJson+"\n", + addr.String(), len(addr.String()), addr.Bytes(), len(addr.Bytes()), strings.Join(compsJson, ",\n ")) +} diff --git a/go-multiaddr/multiaddr_test.go b/go-multiaddr/multiaddr_test.go new file mode 100644 index 0000000..8550b27 --- /dev/null +++ b/go-multiaddr/multiaddr_test.go @@ -0,0 +1,1064 @@ +package multiaddr + +import ( + "bytes" + "encoding/hex" + "fmt" + "math" + "math/rand" + "testing" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" +) + +func newMultiaddr(t *testing.T, a string) Multiaddr { + m, err := NewMultiaddr(a) + if err != nil { + t.Error(err) + } + return m +} + +func TestConstructFails(t *testing.T) { + cases := []string{ + "/ip4", + "/ip4/::1", + "/ip4/fdpsofodsajfdoisa", + "/ip4/::/ipcidr/256", + "/ip6/::/ipcidr/1026", + "/ip6", + "/ip6zone", + "/ip6zone/", + "/ip6zone//ip6/fe80::1", + "/udp", + "/tcp", + "/sctp", + "/udp/65536", + "/tcp/65536", + "/quic/65536", + "/quic-v1/65536", + "/onion/9imaq4ygg2iegci7:80", + "/onion/aaimaq4ygg2iegci7:80", + "/onion/timaq4ygg2iegci7:0", + "/onion/timaq4ygg2iegci7:-1", + "/onion/timaq4ygg2iegci7", + "/onion/timaq4ygg2iegci@:666", + "/onion3/9ww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:80", + "/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd7:80", + "/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:0", + "/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:-1", + "/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd", + "/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyy@:666", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA7:80", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA:0", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA:0", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA:-1", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA@:666", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA7:80", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA:0", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA:0", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA:-1", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA@:666", + "/garlic32/566niximlxdzpanmn4qouucvua3k7neniwss47li5r6ugoertzu", + "/garlic32/566niximlxdzpanmn4qouucvua3k7neniwss47li5r6ugoertzu77", + "/garlic32/566niximlxdzpanmn4qouucvua3k7neniwss47li5r6ugoertzu:80", + "/garlic32/566niximlxdzpanmn4qouucvua3k7neniwss47li5r6ugoertzuq:-1", + "/garlic32/566niximlxdzpanmn4qouucvua3k7neniwss47li5r6ugoertzu@", + "/udp/1234/sctp", + "/udp/1234/udt/1234", + "/udp/1234/utp/1234", + "/ip4/127.0.0.1/udp/jfodsajfidosajfoidsa", + "/ip4/127.0.0.1/udp", + "/ip4/127.0.0.1/tcp/jfodsajfidosajfoidsa", + "/ip4/127.0.0.1/tcp", + "/ip4/127.0.0.1/quic/1234", + "/ip4/127.0.0.1/quic-v1/1234", + "/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash", + "/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/b2uaraocy6yrdblb4sfptaddgimjmmp", // 1 character missing from certhash + "/ip4/127.0.0.1/ipfs", + "/ip4/127.0.0.1/ipfs/tcp", + "/ip4/127.0.0.1/p2p", + "/ip4/127.0.0.1/p2p/tcp", + "/unix", + "/ip4/1.2.3.4/tcp/80/unix", + "/ip4/1.2.3.4/tcp/-1", + "/ip4/127.0.0.1/tcp/9090/http/p2p-webcrt-direct", + "/", + "", + "/p2p/QmxoHT6iViN5xAjoz1VZ553cL31U9F94ht3QvWR1FrEbZY", // sha256 multihash with digest len > 32 + } + + for _, a := range cases { + if _, err := NewMultiaddr(a); err == nil { + t.Errorf("should have failed: %s - %s", a, err) + } + } +} + +func TestEmptyMultiaddr(t *testing.T) { + _, err := NewMultiaddrBytes([]byte{}) + if err == nil { + t.Fatal("should have failed to parse empty multiaddr") + } +} + +var good = []string{ + "/ip4/1.2.3.4", + "/ip4/0.0.0.0", + "/ip4/192.0.2.0/ipcidr/24", + "/ip6/::1", + "/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21", + "/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21/udp/1234/quic", + "/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21/udp/1234/quic-v1", + "/ip6/2001:db8::/ipcidr/32", + "/ip6zone/x/ip6/fe80::1", + "/ip6zone/x%y/ip6/fe80::1", + "/ip6zone/x%y/ip6/::", + "/ip6zone/x/ip6/fe80::1/udp/1234/quic", + "/ip6zone/x/ip6/fe80::1/udp/1234/quic-v1", + "/onion/timaq4ygg2iegci7:1234", + "/onion/timaq4ygg2iegci7:80/http", + "/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:1234", + "/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:80/http", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA/http", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA/udp/8080", + "/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA/tcp/8080", + "/garlic32/566niximlxdzpanmn4qouucvua3k7neniwss47li5r6ugoertzuq", + "/garlic32/566niximlxdzpanmn4qouucvua3k7neniwss47li5r6ugoertzuqzwas", + "/garlic32/566niximlxdzpanmn4qouucvua3k7neniwss47li5r6ugoertzuqzwassw", + "/garlic32/566niximlxdzpanmn4qouucvua3k7neniwss47li5r6ugoertzuq/http", + "/garlic32/566niximlxdzpanmn4qouucvua3k7neniwss47li5r6ugoertzuq/tcp/8080", + "/garlic32/566niximlxdzpanmn4qouucvua3k7neniwss47li5r6ugoertzuq/udp/8080", + "/udp/0", + "/tcp/0", + "/sctp/0", + "/udp/1234", + "/tcp/1234", + "/sctp/1234", + "/udp/65535", + "/tcp/65535", + "/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC", + "/ipfs/k2k4r8oqamigqdo6o7hsbfwd45y70oyynp98usk7zmyfrzpqxh1pohl7", + "/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC", + "/p2p/k2k4r8oqamigqdo6o7hsbfwd45y70oyynp98usk7zmyfrzpqxh1pohl7", + "/p2p/bafzbeigvf25ytwc3akrijfecaotc74udrhcxzh2cx3we5qqnw5vgrei4bm", + "/p2p/12D3KooWCryG7Mon9orvQxcS1rYZjotPgpwoJNHHKcLLfE4Hf5mV", + "/p2p/k51qzi5uqu5dhb6l8spkdx7yxafegfkee5by8h7lmjh2ehc2sgg34z7c15vzqs", + "/p2p/bafzaajaiaejcalj543iwv2d7pkjt7ykvefrkfu7qjfi6sduakhso4lay6abn2d5u", + "/udp/1234/sctp/1234", + "/udp/1234/udt", + "/udp/1234/utp", + "/tcp/1234/http", + "/tcp/1234/tls/http", + "/tcp/1234/https", + "/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234", + "/ipfs/k2k4r8oqamigqdo6o7hsbfwd45y70oyynp98usk7zmyfrzpqxh1pohl7/tcp/1234", + "/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234", + "/p2p/k2k4r8oqamigqdo6o7hsbfwd45y70oyynp98usk7zmyfrzpqxh1pohl7/tcp/1234", + "/ip4/127.0.0.1/udp/1234", + "/ip4/127.0.0.1/udp/0", + "/ip4/127.0.0.1/tcp/1234", + "/ip4/127.0.0.1/tcp/1234/", + "/ip4/127.0.0.1/udp/1234/quic", + "/ip4/127.0.0.1/udp/1234/quic-v1", + "/ip4/127.0.0.1/udp/1234/quic-v1/webtransport", + "/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/b2uaraocy6yrdblb4sfptaddgimjmmpy", + "/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/b2uaraocy6yrdblb4sfptaddgimjmmpy/certhash/zQmbWTwYGcmdyK9CYfNBcfs9nhZs17a6FQ4Y8oea278xx41", + "/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC", + "/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234", + "/ip4/127.0.0.1/ipfs/k2k4r8oqamigqdo6o7hsbfwd45y70oyynp98usk7zmyfrzpqxh1pohl7", + "/ip4/127.0.0.1/ipfs/k2k4r8oqamigqdo6o7hsbfwd45y70oyynp98usk7zmyfrzpqxh1pohl7/tcp/1234", + "/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC", + "/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234", + "/ip4/127.0.0.1/p2p/k2k4r8oqamigqdo6o7hsbfwd45y70oyynp98usk7zmyfrzpqxh1pohl7", + "/ip4/127.0.0.1/p2p/k2k4r8oqamigqdo6o7hsbfwd45y70oyynp98usk7zmyfrzpqxh1pohl7/tcp/1234", + "/unix/a/b/c/d/e", + "/unix/stdio", + "/ip4/1.2.3.4/tcp/80/unix/a/b/c/d/e/f", + "/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234/unix/stdio", + "/ip4/127.0.0.1/ipfs/k2k4r8oqamigqdo6o7hsbfwd45y70oyynp98usk7zmyfrzpqxh1pohl7/tcp/1234/unix/stdio", + "/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234/unix/stdio", + "/ip4/127.0.0.1/p2p/k2k4r8oqamigqdo6o7hsbfwd45y70oyynp98usk7zmyfrzpqxh1pohl7/tcp/1234/unix/stdio", + "/ip4/127.0.0.1/tcp/9090/http/p2p-webrtc-direct", + "/ip4/127.0.0.1/tcp/127/ws", + "/ip4/127.0.0.1/tcp/127/ws", + "/ip4/127.0.0.1/tcp/127/tls", + "/ip4/127.0.0.1/tcp/127/tls/ws", + "/ip4/127.0.0.1/tcp/127/noise", + "/ip4/127.0.0.1/tcp/127/wss", + "/ip4/127.0.0.1/tcp/127/wss", + "/ip4/127.0.0.1/tcp/127/webrtc-direct", + "/ip4/127.0.0.1/tcp/127/webrtc", + "/http-path/tmp%2Fbar", + "/http-path/tmp%2Fbar%2Fbaz", + "/http-path/foo", + "/ip4/127.0.0.1/tcp/0/p2p/12D3KooWCryG7Mon9orvQxcS1rYZjotPgpwoJNHHKcLLfE4Hf5mV/http-path/foo", + "/ip4/127.0.0.1/tcp/443/tls/sni/example.com/http/http-path/foo", +} + +func TestConstructSucceeds(t *testing.T) { + for _, a := range good { + if _, err := NewMultiaddr(a); err != nil { + t.Errorf("should have succeeded: %s -- %s", a, err) + } + } +} + +func TestEqual(t *testing.T) { + m1 := newMultiaddr(t, "/ip4/127.0.0.1/udp/1234") + m2 := newMultiaddr(t, "/ip4/127.0.0.1/tcp/1234") + m3 := newMultiaddr(t, "/ip4/127.0.0.1/tcp/1234") + m4 := newMultiaddr(t, "/ip4/127.0.0.1/tcp/1234/") + + if m1.Equal(m2) { + t.Error("should not be equal") + } + + if m2.Equal(m1) { + t.Error("should not be equal") + } + + if !m2.Equal(m3) { + t.Error("should be equal") + } + + if !m3.Equal(m2) { + t.Error("should be equal") + } + + if !m1.Equal(m1) { + t.Error("should be equal") + } + + if !m2.Equal(m4) { + t.Error("should be equal") + } + + if !m4.Equal(m3) { + t.Error("should be equal") + } +} + +// TestNilInterface makes sure funcs that accept a multiaddr interface don't +// panic if it's passed a nil interface. +func TestNilInterface(t *testing.T) { + m1 := newMultiaddr(t, "/ip4/127.0.0.1/udp/1234") + var m2 Multiaddr + m1.Equal(m2) + m1.Encapsulate(m2) + m1.Decapsulate(m2) + + // Test components + c, _, _ := SplitFirst(m1) + c.Equal(m2) + c.Encapsulate(m2) + c.Decapsulate(m2) + + // Util funcs + _ = Split(m2) + _, _, _ = SplitFirst(m2) + _, _, _ = SplitLast(m2) + ForEach(m2, func(c Component, e error) bool { return true }) +} + +func TestStringToBytes(t *testing.T) { + + testString := func(s string, h string) { + b1, err := hex.DecodeString(h) + if err != nil { + t.Error("failed to decode hex", h) + } + + // t.Log("196", h, []byte(b1)) + + b2, err := stringToBytes(s) + if err != nil { + t.Error("failed to convert", s, err) + } + + if !bytes.Equal(b1, b2) { + t.Error("failed to convert \n", s, "to\n", hex.EncodeToString(b1), "got\n", hex.EncodeToString(b2)) + } + + if err := validateBytes(b2); err != nil { + t.Error(err, "len:", len(b2)) + } + } + + testString("/ip4/127.0.0.1/udp/1234", "047f000001910204d2") + testString("/ip4/127.0.0.1/tcp/4321", "047f0000010610e1") + testString("/ip4/127.0.0.1/udp/1234/ip4/127.0.0.1/tcp/4321", "047f000001910204d2047f0000010610e1") + testString("/onion/aaimaq4ygg2iegci:80", "bc030010c0439831b48218480050") + testString("/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:1234", "bd03adadec040be047f9658668b11a504f3155001f231a37f54c4476c07fb4cc139ed7e30304d2") + testString("/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA", + "be0383038d3fc8c976a86ae4e78ba378e75ec41bc9ab1542a9cb422581987e118f5cb0c024f3639d6ad9b3aff613672f07bfbbbfc2f920ef910534ecaa6ff9c03e0fa4872a764d2fce6d4cfc5a5a9800cd95944cc9ef0241f753fe71494a175f334b35682459acadc4076428ab49b5a83a49d2ea2366b06461e4a559b0111fa750e0de0c138a94d1231ed5979572ff53922905636221994bdabc44bd0c17fef11622b16432db3f193400af53cc61aa9bfc0c4c8d874b41a6e18732f0b60f5662ef1a89c80589dd8366c90bb58bb85ead56356aba2a244950ca170abbd01094539014f84bdd383e4a10e00cee63dfc3e809506e2d9b54edbdca1bace6eaa119e68573d30533791fba830f5d80be5c051a77c09415e3b8fe3139400848be5244b8ae96bb0c4a24f819cba0488f34985eac741d3359180bd72cafa1559e4c19f54ea8cedbb6a5afde4319396eb92aab340c60a50cc2284580cb3ad09017e8d9abc60269b3d8d687680bd86ce834412273d4f2e3bf68dd3d6fe87e2426ac658cd5c77fd5c0aa000000") + testString("/garlic32/566niximlxdzpanmn4qouucvua3k7neniwss47li5r6ugoertzuq", + "bf0320efbcd45d0c5dc79781ac6f20ea5055a036afb48d45a52e7d68ec7d4338919e69") + +} + +func TestBytesToString(t *testing.T) { + + testString := func(s1 string, h string) { + t.Helper() + b, err := hex.DecodeString(h) + if err != nil { + t.Error("failed to decode hex", h) + } + + if err := validateBytes(b); err != nil { + t.Error(err) + } + + s2, err := bytesToString(b) + if err != nil { + t.Log("236", s1, ":", string(h), ":", s2) + t.Error("failed to convert", b, err) + } + + if s1 != s2 { + t.Error("failed to convert", b, "to", s1, "got", s2) + } + } + + testString("/ip4/127.0.0.1/udp/1234", "047f000001910204d2") + testString("/ip4/127.0.0.1/tcp/4321", "047f0000010610e1") + testString("/ip4/127.0.0.1/udp/1234/ip4/127.0.0.1/tcp/4321", "047f000001910204d2047f0000010610e1") + testString("/onion/aaimaq4ygg2iegci:80", "bc030010c0439831b48218480050") + testString("/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:1234", "bd03adadec040be047f9658668b11a504f3155001f231a37f54c4476c07fb4cc139ed7e30304d2") + testString("/garlic64/jT~IyXaoauTni6N4517EG8mrFUKpy0IlgZh-EY9csMAk82Odatmzr~YTZy8Hv7u~wvkg75EFNOyqb~nAPg-khyp2TS~ObUz8WlqYAM2VlEzJ7wJB91P-cUlKF18zSzVoJFmsrcQHZCirSbWoOknS6iNmsGRh5KVZsBEfp1Dg3gwTipTRIx7Vl5Vy~1OSKQVjYiGZS9q8RL0MF~7xFiKxZDLbPxk0AK9TzGGqm~wMTI2HS0Gm4Ycy8LYPVmLvGonIBYndg2bJC7WLuF6tVjVquiokSVDKFwq70BCUU5AU-EvdOD5KEOAM7mPfw-gJUG4tm1TtvcobrObqoRnmhXPTBTN5H7qDD12AvlwFGnfAlBXjuP4xOUAISL5SRLiulrsMSiT4GcugSI80mF6sdB0zWRgL1yyvoVWeTBn1TqjO27alr95DGTluuSqrNAxgpQzCKEWAyzrQkBfo2avGAmmz2NaHaAvYbOg0QSJz1PLjv2jdPW~ofiQmrGWM1cd~1cCqAAAA", + "be0383038d3fc8c976a86ae4e78ba378e75ec41bc9ab1542a9cb422581987e118f5cb0c024f3639d6ad9b3aff613672f07bfbbbfc2f920ef910534ecaa6ff9c03e0fa4872a764d2fce6d4cfc5a5a9800cd95944cc9ef0241f753fe71494a175f334b35682459acadc4076428ab49b5a83a49d2ea2366b06461e4a559b0111fa750e0de0c138a94d1231ed5979572ff53922905636221994bdabc44bd0c17fef11622b16432db3f193400af53cc61aa9bfc0c4c8d874b41a6e18732f0b60f5662ef1a89c80589dd8366c90bb58bb85ead56356aba2a244950ca170abbd01094539014f84bdd383e4a10e00cee63dfc3e809506e2d9b54edbdca1bace6eaa119e68573d30533791fba830f5d80be5c051a77c09415e3b8fe3139400848be5244b8ae96bb0c4a24f819cba0488f34985eac741d3359180bd72cafa1559e4c19f54ea8cedbb6a5afde4319396eb92aab340c60a50cc2284580cb3ad09017e8d9abc60269b3d8d687680bd86ce834412273d4f2e3bf68dd3d6fe87e2426ac658cd5c77fd5c0aa000000") + testString("/garlic32/566niximlxdzpanmn4qouucvua3k7neniwss47li5r6ugoertzuq", + "bf0320efbcd45d0c5dc79781ac6f20ea5055a036afb48d45a52e7d68ec7d4338919e69") +} + +func TestBytesSplitAndJoin(t *testing.T) { + + testString := func(s string, res []string) { + m, err := NewMultiaddr(s) + if err != nil { + t.Fatal("failed to convert", s, err) + } + + split := Split(m) + if len(split) != len(res) { + t.Error("not enough split components", split) + return + } + + for i, a := range split { + if a.String() != res[i] { + t.Errorf("split component failed: %s != %s", a, res[i]) + } + } + + joined := Join(split...) + if !m.Equal(joined) { + t.Errorf("joined components failed: %s != %s", m, joined) + } + + for i, a := range split { + if a.String() != res[i] { + t.Errorf("split component failed: %s != %s", a, res[i]) + } + } + } + + testString("/ip4/1.2.3.4/udp/1234", []string{"/ip4/1.2.3.4", "/udp/1234"}) + testString("/ip4/1.2.3.4/tcp/1/ip4/2.3.4.5/udp/2", + []string{"/ip4/1.2.3.4", "/tcp/1", "/ip4/2.3.4.5", "/udp/2"}) + testString("/ip4/1.2.3.4/utp/ip4/2.3.4.5/udp/2/udt", + []string{"/ip4/1.2.3.4", "/utp", "/ip4/2.3.4.5", "/udp/2", "/udt"}) +} + +func TestProtocols(t *testing.T) { + m, err := NewMultiaddr("/ip4/127.0.0.1/udp/1234") + if err != nil { + t.Error("failed to construct", "/ip4/127.0.0.1/udp/1234") + } + + ps := m.Protocols() + if ps[0].Code != ProtocolWithName("ip4").Code { + t.Error(ps[0], ProtocolWithName("ip4")) + t.Error("failed to get ip4 protocol") + } + + if ps[1].Code != ProtocolWithName("udp").Code { + t.Error(ps[1], ProtocolWithName("udp")) + t.Error("failed to get udp protocol") + } + +} + +func TestProtocolsWithString(t *testing.T) { + pwn := ProtocolWithName + good := map[string][]Protocol{ + "/ip4": {pwn("ip4")}, + "/ip4/tcp": {pwn("ip4"), pwn("tcp")}, + "ip4/tcp/udp/ip6": {pwn("ip4"), pwn("tcp"), pwn("udp"), pwn("ip6")}, + "////////ip4/tcp": {pwn("ip4"), pwn("tcp")}, + "ip4/udp/////////": {pwn("ip4"), pwn("udp")}, + "////////ip4/tcp////////": {pwn("ip4"), pwn("tcp")}, + } + + for s, ps1 := range good { + ps2, err := ProtocolsWithString(s) + if err != nil { + t.Errorf("ProtocolsWithString(%s) should have succeeded", s) + } + + for i, ps1p := range ps1 { + ps2p := ps2[i] + if ps1p.Code != ps2p.Code { + t.Errorf("mismatch: %s != %s, %s", ps1p.Name, ps2p.Name, s) + } + } + } + + bad := []string{ + "dsijafd", // bogus proto + "/ip4/tcp/fidosafoidsa", // bogus proto + "////////ip4/tcp/21432141/////////", // bogus proto + "////////ip4///////tcp/////////", // empty protos in between + } + + for _, s := range bad { + if _, err := ProtocolsWithString(s); err == nil { + t.Errorf("ProtocolsWithString(%s) should have failed", s) + } + } + +} + +func TestEncapsulate(t *testing.T) { + m, err := NewMultiaddr("/ip4/127.0.0.1/udp/1234") + if err != nil { + t.Error(err) + } + + m2, err := NewMultiaddr("/udp/5678") + if err != nil { + t.Error(err) + } + + b := m.Encapsulate(m2) + if s := b.String(); s != "/ip4/127.0.0.1/udp/1234/udp/5678" { + t.Error("encapsulate /ip4/127.0.0.1/udp/1234/udp/5678 failed.", s) + } + + m3, _ := NewMultiaddr("/udp/5678") + c := b.Decapsulate(m3) + if s := c.String(); s != "/ip4/127.0.0.1/udp/1234" { + t.Error("decapsulate /udp failed.", "/ip4/127.0.0.1/udp/1234", s) + } + + m4, _ := NewMultiaddr("/ip4/127.0.0.1") + d := c.Decapsulate(m4) + if d != nil { + t.Error("decapsulate /ip4 failed: ", d) + } +} + +func TestDecapsulateComment(t *testing.T) { + // shows the behavior from the interface comment + m, _ := StringCast("/ip4/1.2.3.4/tcp/80") + n, _ := StringCast("/tcp/80") + rest := m.Decapsulate(n) + if rest.String() != "/ip4/1.2.3.4" { + t.Fatalf("Documented behavior is not correct. Expected %v saw %v", "/ip4/1.2.3.4/", rest.String()) + } + + m, _ = StringCast("/ip4/1.2.3.4/tcp/80") + n, _ = StringCast("/udp/80") + rest = m.Decapsulate(n) + if !rest.Equal(m) { + t.Fatalf("Documented behavior is not correct. Expected %v saw %v", "/ip4/1.2.3.4/tcp/80", rest.String()) + } + + m, _ = StringCast("/ip4/1.2.3.4/tcp/80") + n, _ = StringCast("/ip4/1.2.3.4") + rest = m.Decapsulate(n) + require.Nil(t, rest, "expected a nil multiaddr if we decapsulate everything") +} + +func TestDecapsulate(t *testing.T) { + t.Run("right is nil", func(t *testing.T) { + left, _ := StringCast("/ip4/1.2.3.4/tcp/1") + var right Multiaddr + left.Decapsulate(right) + }) + + testcases := []struct { + left, right, expected string + }{ + {"/ip4/1.2.3.4/tcp/1234", "/ip4/1.2.3.4", ""}, + {"/ip4/1.2.3.4", "/ip4/1.2.3.4/tcp/1234", "/ip4/1.2.3.4"}, + {"/ip4/1.2.3.5/tcp/1234", "/ip4/5.3.2.1", "/ip4/1.2.3.5/tcp/1234"}, + {"/ip4/1.2.3.5/udp/1234/quic-v1", "/udp/1234", "/ip4/1.2.3.5"}, + {"/ip4/1.2.3.6/udp/1234/quic-v1", "/udp/1234/quic-v1", "/ip4/1.2.3.6"}, + {"/ip4/1.2.3.7/tcp/1234", "/ws", "/ip4/1.2.3.7/tcp/1234"}, + {"/dnsaddr/wss.com/tcp/4001", "/ws", "/dnsaddr/wss.com/tcp/4001"}, + {"/dnsaddr/wss.com/tcp/4001/ws", "/wss", "/dnsaddr/wss.com/tcp/4001/ws"}, + {"/dnsaddr/wss.com/ws", "/wss", "/dnsaddr/wss.com/ws"}, + {"/dnsaddr/wss.com/ws", "/dnsaddr/wss.com", ""}, + {"/dnsaddr/wss.com/tcp/4001/wss", "/wss", "/dnsaddr/wss.com/tcp/4001"}, + } + + for _, tc := range testcases { + t.Run(tc.left, func(t *testing.T) { + left, _ := StringCast(tc.left) + right, _ := StringCast(tc.right) + actualMa := left.Decapsulate(right) + + if tc.expected == "" { + require.Nil(t, actualMa, "expected nil") + return + } + + actual := actualMa.String() + expected, _ := StringCast(tc.expected) + require.Equal(t, expected.String(), actual) + }) + } +} + +func assertValueForProto(t *testing.T, a Multiaddr, p int, exp string) { + t.Logf("checking for %s in %s", ProtocolWithCode(p).Name, a) + fv, err := a.ValueForProtocol(p) + if err != nil { + t.Fatal(err) + } + + if fv != exp { + t.Fatalf("expected %q for %d in %s, but got %q instead", exp, p, a, fv) + } +} + +func TestGetValue(t *testing.T) { + a := newMultiaddr(t, "/ip4/127.0.0.1/utp/tcp/5555/udp/1234/tls/utp/ipfs/QmbHVEEepCi7rn7VL7Exxpd2Ci9NNB6ifvqwhsrbRMgQFP") + assertValueForProto(t, a, P_IP4, "127.0.0.1") + assertValueForProto(t, a, P_UTP, "") + assertValueForProto(t, a, P_TLS, "") + assertValueForProto(t, a, P_TCP, "5555") + assertValueForProto(t, a, P_UDP, "1234") + assertValueForProto(t, a, P_IPFS, "QmbHVEEepCi7rn7VL7Exxpd2Ci9NNB6ifvqwhsrbRMgQFP") + assertValueForProto(t, a, P_P2P, "QmbHVEEepCi7rn7VL7Exxpd2Ci9NNB6ifvqwhsrbRMgQFP") + + _, err := a.ValueForProtocol(P_IP6) + switch err { + case ErrProtocolNotFound: + break + case nil: + t.Fatal("expected value lookup to fail") + default: + t.Fatalf("expected ErrProtocolNotFound but got: %s", err) + } + + a = newMultiaddr(t, "/ip4/0.0.0.0") // only one addr + assertValueForProto(t, a, P_IP4, "0.0.0.0") + + a = newMultiaddr(t, "/ip4/0.0.0.0/ip4/0.0.0.0/ip4/0.0.0.0") // same sub-addr + assertValueForProto(t, a, P_IP4, "0.0.0.0") + + a = newMultiaddr(t, "/ip4/0.0.0.0/udp/12345/utp") // ending in a no-value one. + assertValueForProto(t, a, P_IP4, "0.0.0.0") + assertValueForProto(t, a, P_UDP, "12345") + assertValueForProto(t, a, P_UTP, "") + + a = newMultiaddr(t, "/ip4/0.0.0.0/unix/a/b/c/d") // ending in a path one. + assertValueForProto(t, a, P_IP4, "0.0.0.0") + assertValueForProto(t, a, P_UNIX, "/a/b/c/d") +} + +func FuzzNewMultiaddrBytes(f *testing.F) { + for _, v := range good { + ma, err := NewMultiaddr(v) + if err != nil { + f.Fatal(err) + } + f.Add(ma.Bytes()) + } + + f.Fuzz(func(t *testing.T, b []byte) { + // just checking that it doesn't panic + ma, err := NewMultiaddrBytes(b) + if err == nil { + // for any valid multiaddrs, make sure these calls don't panic + ma.Protocols() + roundTripBytes(t, ma) + roundTripString(t, ma) + } + }) +} + +func FuzzNewMultiaddrString(f *testing.F) { + for _, v := range good { + if _, err := NewMultiaddr(v); err != nil { + // Validate maddrs + f.Fatal(err) + } + f.Add(v) + } + f.Fuzz(func(t *testing.T, s string) { + // just checking that it doesn't panic + ma, err := NewMultiaddr(s) + if err == nil { + // for any valid multiaddrs, make sure these calls don't panic + ma.Protocols() + roundTripBytes(t, ma) + roundTripString(t, ma) + } + }) +} + +func roundTripBytes(t *testing.T, orig Multiaddr) { + m2, err := NewMultiaddrBytes(orig.Bytes()) + if err != nil { + t.Fatalf("failed to parse maddr back from ma.Bytes, %v: %v", orig, err) + } + if !m2.Equal(orig) { + t.Fatalf("unequal maddr after roundTripBytes %v %v", orig, m2) + } +} + +func roundTripString(t *testing.T, orig Multiaddr) { + m2, err := NewMultiaddr(orig.String()) + if err != nil { + t.Fatalf("failed to parse maddr back from ma.String, %v: %v", orig, err) + } + if !m2.Equal(orig) { + t.Fatalf("unequal maddr after roundTripString %v %v\n% 02x\n% 02x\n", orig, m2, orig.Bytes(), m2.Bytes()) + } +} + +func TestBinaryRepresentation(t *testing.T) { + expected := []byte{0x4, 0x7f, 0x0, 0x0, 0x1, 0x91, 0x2, 0x4, 0xd2} + ma, err := NewMultiaddr("/ip4/127.0.0.1/udp/1234") + if err != nil { + t.Error(err) + } + + if !bytes.Equal(ma.Bytes(), expected) { + t.Errorf("expected %x, got %x", expected, ma.Bytes()) + } +} + +func TestRoundTrip(t *testing.T) { + for _, s := range []string{ + "/unix/a/b/c/d", + "/ip6/::ffff:127.0.0.1/tcp/111", + "/ip4/127.0.0.1/tcp/123", + "/ip4/127.0.0.1/tcp/123/tls", + "/ip4/127.0.0.1/udp/123", + "/ip4/127.0.0.1/udp/123/ip6/::", + "/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/uEiDDq4_xNyDorZBH3TlGazyJdOWSwvo4PUo5YHFMrvDE8g", + "/p2p/QmbHVEEepCi7rn7VL7Exxpd2Ci9NNB6ifvqwhsrbRMgQFP", + "/p2p/QmbHVEEepCi7rn7VL7Exxpd2Ci9NNB6ifvqwhsrbRMgQFP/unix/a/b/c", + "/http-path/tmp%2Fbar", + } { + ma, err := NewMultiaddr(s) + if err != nil { + t.Errorf("error when parsing %q: %s", s, err) + continue + } + if ma.String() != s { + t.Errorf("failed to round trip %q", s) + } + } +} + +func TestIPFSvP2P(t *testing.T) { + var ( + p2pAddr = "/p2p/QmbHVEEepCi7rn7VL7Exxpd2Ci9NNB6ifvqwhsrbRMgQFP" + ipfsAddr = "/ipfs/QmbHVEEepCi7rn7VL7Exxpd2Ci9NNB6ifvqwhsrbRMgQFP" + ) + + for _, s := range []string{p2pAddr, ipfsAddr} { + ma, err := NewMultiaddr(s) + if err != nil { + t.Errorf("error when parsing %q: %s", s, err) + } + if ma.String() != p2pAddr { + t.Errorf("expected %q, got %q", p2pAddr, ma.String()) + } + } +} + +func TestInvalidP2PAddrBytes(t *testing.T) { + badAddr := "a503221221c05877cbae039d70a5e600ea02c6f9f2942439285c9e344e26f8d280c850fad6" + bts, err := hex.DecodeString(badAddr) + if err != nil { + t.Fatal(err) + } + ma, err := NewMultiaddrBytes(bts) + if err == nil { + t.Error("should have failed") + // Check for panic + _ = ma.String() + } +} + +func TestInvalidP2PAddrString(t *testing.T) { + hashedData, err := mh.Sum([]byte("test"), mh.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + // using MD5 since it's not a valid data codec + unknownCodecCID := cid.NewCidV1(mh.MD5, hashedData).String() + + badStringAddrs := []string{ + "/p2p/k2k4r8oqamigqdo6o7hsbfwd45y70oyynp98usk7zmyfrzpqxh1pohl-", // invalid multibase encoding + "/p2p/?unknownmultibase", // invalid multibase encoding + "/p2p/k2jmtxwoe2phm1hbqp0e7nufqf6umvuu2e9qd7ana7h411a0haqj6i2z", // non-libp2p-key codec + "/p2p/" + unknownCodecCID, // impossible codec + } + for _, a := range badStringAddrs { + ma, err := NewMultiaddr(a) + if err == nil { + t.Error("should have failed") + // Check for panic + _ = ma.String() + } + } +} + +func TestZone(t *testing.T) { + ip6String := "/ip6zone/eth0/ip6/::1" + ip6Bytes := []byte{ + 0x2a, 4, + 'e', 't', 'h', '0', + 0x29, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 1, + } + + ma, err := NewMultiaddr(ip6String) + if err != nil { + t.Error(err) + } + if !bytes.Equal(ma.Bytes(), ip6Bytes) { + t.Errorf("expected %x, got %x", ip6Bytes, ma.Bytes()) + } + + ma2, err2 := NewMultiaddrBytes(ip6Bytes) + if err2 != nil { + t.Error(err) + } + if ma2.String() != ip6String { + t.Errorf("expected %s, got %s", ip6String, ma2.String()) + } +} + +func TestBinaryMarshaler(t *testing.T) { + addr := newMultiaddr(t, "/ip4/0.0.0.0/tcp/4001/tls") + b, err := addr.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + var addr2 multiaddr + if err = addr2.UnmarshalBinary(b); err != nil { + t.Fatal(err) + } + if !addr.Equal(&addr2) { + t.Error("expected equal addresses in circular marshaling test") + } +} + +func TestTextMarshaler(t *testing.T) { + addr := newMultiaddr(t, "/ip4/0.0.0.0/tcp/4001/tls") + b, err := addr.MarshalText() + if err != nil { + t.Fatal(err) + } + + var addr2 multiaddr + if err = addr2.UnmarshalText(b); err != nil { + t.Fatal(err) + } + if !addr.Equal(&addr2) { + t.Error("expected equal addresses in circular marshaling test") + } +} + +func TestJSONMarshaler(t *testing.T) { + addr := newMultiaddr(t, "/ip4/0.0.0.0/tcp/4001/tls") + b, err := addr.MarshalJSON() + if err != nil { + t.Fatal(err) + } + + var addr2 multiaddr + if err = addr2.UnmarshalJSON(b); err != nil { + t.Fatal(err) + } + if !addr.Equal(&addr2) { + t.Error("expected equal addresses in circular marshaling test") + } +} + +func TestComponentBinaryMarshaler(t *testing.T) { + comp, err := NewComponent("ip4", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + b, err := comp.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + comp2 := &Component{} + if err = comp2.UnmarshalBinary(b); err != nil { + t.Fatal(err) + } + if !comp.Equal(comp2) { + t.Error("expected equal components in circular marshaling test") + } +} + +func TestComponentTextMarshaler(t *testing.T) { + comp, err := NewComponent("ip4", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + b, err := comp.MarshalText() + if err != nil { + t.Fatal(err) + } + + comp2 := &Component{} + if err = comp2.UnmarshalText(b); err != nil { + t.Fatal(err) + } + if !comp.Equal(comp2) { + t.Error("expected equal components in circular marshaling test") + } +} + +func TestComponentJSONMarshaler(t *testing.T) { + comp, err := NewComponent("ip4", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + b, err := comp.MarshalJSON() + if err != nil { + t.Fatal(err) + } + + comp2 := &Component{} + if err = comp2.UnmarshalJSON(b); err != nil { + t.Fatal(err) + } + if !comp.Equal(comp2) { + t.Error("expected equal components in circular marshaling test") + } +} + +func TestFilterAddrs(t *testing.T) { + bad := []Multiaddr{ + newMultiaddr(t, "/ip6/fe80::1/tcp/1234"), + newMultiaddr(t, "/ip6/fe80::100/tcp/1234"), + } + good := []Multiaddr{ + newMultiaddr(t, "/ip4/127.0.0.1/tcp/1234"), + newMultiaddr(t, "/ip4/1.1.1.1/tcp/999"), + newMultiaddr(t, "/ip4/1.2.3.4/udp/1234/utp"), + } + goodAndBad := append(good, bad...) + + filter := func(addr Multiaddr) bool { + return addr.Protocols()[0].Code == P_IP4 + } + + require.Empty(t, FilterAddrs(bad, filter)) + require.ElementsMatch(t, FilterAddrs(good, filter), good) + require.ElementsMatch(t, FilterAddrs(goodAndBad, filter), good) +} + +func TestContains(t *testing.T) { + a1 := newMultiaddr(t, "/ip4/127.0.0.1/tcp/1234") + a2 := newMultiaddr(t, "/ip4/1.1.1.1/tcp/999") + a3 := newMultiaddr(t, "/ip4/1.2.3.4/udp/443/quic") + a4 := newMultiaddr(t, "/ip4/1.2.3.4/udp/443/quic-v1") + addrs := []Multiaddr{a1, a2, a3, a4} + + require.True(t, Contains(addrs, a1)) + require.True(t, Contains(addrs, a2)) + require.True(t, Contains(addrs, a3)) + require.True(t, Contains(addrs, a4)) + require.False(t, Contains(addrs, newMultiaddr(t, "/ip4/4.3.2.1/udp/1234/utp"))) + require.False(t, Contains(nil, a1)) +} + +func TestUniqueAddrs(t *testing.T) { + tcpAddr, _ := StringCast("/ip4/127.0.0.1/tcp/1234") + quicAddr, _ := StringCast("/ip4/127.0.0.1/udp/1234/quic-v1") + wsAddr, _ := StringCast("/ip4/127.0.0.1/tcp/1234/ws") + + type testcase struct { + in, out []Multiaddr + } + + for i, tc := range []testcase{ + {in: nil, out: nil}, + {in: []Multiaddr{tcpAddr}, out: []Multiaddr{tcpAddr}}, + {in: []Multiaddr{tcpAddr, tcpAddr, tcpAddr}, out: []Multiaddr{tcpAddr}}, + {in: []Multiaddr{tcpAddr, quicAddr, tcpAddr}, out: []Multiaddr{tcpAddr, quicAddr}}, + {in: []Multiaddr{tcpAddr, quicAddr, wsAddr}, out: []Multiaddr{tcpAddr, quicAddr, wsAddr}}, + } { + tc := tc + t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { + deduped := Unique(tc.in) + for _, a := range tc.out { + require.Contains(t, deduped, a) + } + }) + } +} + +func BenchmarkUniqueAddrs(b *testing.B) { + b.ReportAllocs() + var addrs []Multiaddr + r := rand.New(rand.NewSource(1234)) + for i := 0; i < 100; i++ { + tcpAddr, _ := StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", r.Intn(math.MaxUint16))) + quicAddr, _ := StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/%d/quic-v1", r.Intn(math.MaxUint16))) + wsAddr, _ := StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d/ws", r.Intn(math.MaxUint16))) + addrs = append(addrs, tcpAddr, tcpAddr, quicAddr, quicAddr, wsAddr) + } + for _, sz := range []int{10, 20, 30, 50, 100} { + b.Run(fmt.Sprintf("%d", sz), func(b *testing.B) { + items := make([]Multiaddr, sz) + for i := 0; i < b.N; i++ { + copy(items, addrs[:sz]) + Unique(items) + } + }) + } +} + +func TestDNS(t *testing.T) { + b := []byte("7*000000000000000000000000000000000000000000") + a, err := NewMultiaddrBytes(b) + if err != nil { + t.Fatal(err) + } + aa, _ := StringCast(a.String()) + if !a.Equal(aa) { + t.Fatal("expected equality") + } +} + +func TestHTTPPath(t *testing.T) { + t.Run("bad addr", func(t *testing.T) { + badAddr := "/http-path/thisIsMissingAfullByte%f" + _, err := NewMultiaddr(badAddr) + require.Error(t, err) + }) + + t.Run("only reads the http-path part", func(t *testing.T) { + addr := "/http-path/tmp%2Fbar/p2p-circuit" // The http-path only reference the part immediately after it. It does not include the rest of the multiaddr (like the /path component sometimes does) + m, err := NewMultiaddr(addr) + require.NoError(t, err) + m.ValueForProtocol(P_HTTP_PATH) + v, err := m.ValueForProtocol(P_HTTP_PATH) + require.NoError(t, err) + require.Equal(t, "tmp%2Fbar", v) + }) + + t.Run("round trip", func(t *testing.T) { + cases := []string{ + "/http-path/tmp%2Fbar", + "/http-path/tmp%2Fbar%2Fbaz", + "/http-path/foo", + "/ip4/127.0.0.1/tcp/0/p2p/12D3KooWCryG7Mon9orvQxcS1rYZjotPgpwoJNHHKcLLfE4Hf5mV/http-path/foo", + "/ip4/127.0.0.1/tcp/443/tls/sni/example.com/http/http-path/foo", + } + for _, c := range cases { + m, err := NewMultiaddr(c) + require.NoError(t, err) + require.Equal(t, c, m.String()) + } + }) + + t.Run("value for protocol", func(t *testing.T) { + m, _ := StringCast("/http-path/tmp%2Fbar") + v, err := m.ValueForProtocol(P_HTTP_PATH) + require.NoError(t, err) + // This gives us the url escaped version + require.Equal(t, "tmp%2Fbar", v) + + // If we want the raw unescaped version, we can use the component and read it + _, component, _ := SplitLast(m) + require.Equal(t, "tmp/bar", string(component.RawValue())) + }) +} + +func FuzzSplitRoundtrip(f *testing.F) { + for _, v := range good { + f.Add(v) + } + otherMultiaddr, _ := StringCast("/udp/1337") + + f.Fuzz(func(t *testing.T, addrStr string) { + addr, err := NewMultiaddr(addrStr) + if err != nil { + t.Skip() // Skip inputs that are not valid multiaddrs + } + + // Test SplitFirst + first, rest, _ := SplitFirst(addr) + joined := Join(first, rest) + require.Equal(t, addr, joined, "SplitFirst and Join should round-trip") + + // Test SplitLast + rest, last, _ := SplitLast(addr) + joined = Join(rest, last) + require.Equal(t, addr, joined, "SplitLast and Join should round-trip") + + p := addr.Protocols() + if len(p) == 0 { + t.Skip() + } + + tryPubMethods := func(a Multiaddr) { + if a == nil { + return + } + _ = a.Equal(otherMultiaddr) + _ = a.Bytes() + _ = a.String() + _ = a.Protocols() + _ = a.Encapsulate(otherMultiaddr) + _ = a.Decapsulate(otherMultiaddr) + _, _ = a.ValueForProtocol(P_TCP) + } + + for _, proto := range p { + splitFunc := func(c Component) bool { + return c.Protocol().Code == proto.Code + } + beforeC, after, _ := SplitFirst(addr) + joined = Join(beforeC, after) + require.Equal(t, addr, joined) + tryPubMethods(after) + + before, afterC, _ := SplitLast(addr) + joined = Join(before, afterC) + require.Equal(t, addr, joined) + tryPubMethods(before) + + before, after, _ = SplitFunc(addr, splitFunc) + joined = Join(before, after) + require.Equal(t, addr, joined) + tryPubMethods(before) + tryPubMethods(after) + } + }) +} diff --git a/go-multiaddr/net/convert.go b/go-multiaddr/net/convert.go new file mode 100644 index 0000000..1747c06 --- /dev/null +++ b/go-multiaddr/net/convert.go @@ -0,0 +1,375 @@ +package manet + +import ( + "errors" + "fmt" + "net" + "path/filepath" + "runtime" + "strings" + + ma "github.com/multiformats/go-multiaddr" +) + +var errIncorrectNetAddr = fmt.Errorf("incorrect network addr conversion") +var errNotIP = fmt.Errorf("multiaddr does not start with an IP address") + +// FromNetAddr converts a net.Addr type to a Multiaddr. +func FromNetAddr(a net.Addr) (ma.Multiaddr, error) { + return defaultCodecs.FromNetAddr(a) +} + +// FromNetAddr converts a net.Addr to Multiaddress. +func (cm *CodecMap) FromNetAddr(a net.Addr) (ma.Multiaddr, error) { + if a == nil { + return nil, fmt.Errorf("nil multiaddr") + } + p, err := cm.getAddrParser(a.Network()) + if err != nil { + return nil, err + } + + return p(a) +} + +// ToNetAddr converts a Multiaddr to a net.Addr +// Must be ThinWaist. acceptable protocol stacks are: +// /ip{4,6}/{tcp, udp} +func ToNetAddr(maddr ma.Multiaddr) (net.Addr, error) { + return defaultCodecs.ToNetAddr(maddr) +} + +// ToNetAddr converts a Multiaddress to a standard net.Addr. +func (cm *CodecMap) ToNetAddr(maddr ma.Multiaddr) (net.Addr, error) { + protos := maddr.Protocols() + final := protos[len(protos)-1] + + p, err := cm.getMaddrParser(final.Name) + if err != nil { + return nil, err + } + + return p(maddr) +} + +// MultiaddrToIPNet converts a multiaddr to an IPNet. Useful for seeing if another IP address is contained within this multiaddr network+mask +func MultiaddrToIPNet(m ma.Multiaddr) (*net.IPNet, error) { + var ipString string + var mask string + + var err error + ma.ForEach(m, func(c ma.Component, e error) bool { + if e != nil { + err = e + return false + } + if c.Protocol().Code == ma.P_IP4 || c.Protocol().Code == ma.P_IP6 { + ipString = c.Value() + } + if c.Protocol().Code == ma.P_IPCIDR { + mask = c.Value() + } + return ipString == "" || mask == "" + }) + + if err != nil { + return nil, err + } + + if ipString == "" { + return nil, errors.New("no ip protocol found") + } + + if mask == "" { + return nil, errors.New("no mask found") + } + + _, ipnet, err := net.ParseCIDR(ipString + "/" + string(mask)) + return ipnet, err +} + +func parseBasicNetMaddr(maddr ma.Multiaddr) (net.Addr, error) { + network, host, err := DialArgs(maddr) + if err != nil { + return nil, err + } + + switch network { + case "tcp", "tcp4", "tcp6": + return net.ResolveTCPAddr(network, host) + case "udp", "udp4", "udp6": + return net.ResolveUDPAddr(network, host) + case "ip", "ip4", "ip6": + return net.ResolveIPAddr(network, host) + case "unix": + return net.ResolveUnixAddr(network, host) + } + + return nil, fmt.Errorf("network not supported: %s", network) +} + +func FromIPAndZone(ip net.IP, zone string) (ma.Multiaddr, error) { + switch { + case ip.To4() != nil: + return ma.NewComponent("ip4", ip.String()) + case ip.To16() != nil: + ip6, err := ma.NewComponent("ip6", ip.String()) + if err != nil { + return nil, err + } + if zone == "" { + return ip6, nil + } else { + zone, err := ma.NewComponent("ip6zone", zone) + if err != nil { + return nil, err + } + return zone.Encapsulate(ip6), nil + } + default: + return nil, errIncorrectNetAddr + } +} + +// FromIP converts a net.IP type to a Multiaddr. +func FromIP(ip net.IP) (ma.Multiaddr, error) { + return FromIPAndZone(ip, "") +} + +// ToIP converts a Multiaddr to a net.IP when possible +func ToIP(addr ma.Multiaddr) (net.IP, error) { + var ip net.IP + ma.ForEach(addr, func(c ma.Component, e error) bool { + if e != nil { + return false + } + switch c.Protocol().Code { + case ma.P_IP6ZONE: + // we can't return these anyways. + return true + case ma.P_IP6, ma.P_IP4: + ip = net.IP(c.RawValue()) + return false + } + return false + }) + if ip == nil { + return nil, errNotIP + } + return ip, nil +} + +// DialArgs is a convenience function that returns network and address as +// expected by net.Dial. See https://godoc.org/net#Dial for an overview of +// possible return values (we do not support the unixpacket ones yet). Unix +// addresses do not, at present, compose. +func DialArgs(m ma.Multiaddr) (string, string, error) { + zone, network, ip, port, hostname, err := dialArgComponents(m) + if err != nil { + return "", "", err + } + + // If we have a hostname (dns*), we don't want any fancy ipv6 formatting + // logic (zone, brackets, etc.). + if hostname { + switch network { + case "ip", "ip4", "ip6": + return network, ip, nil + case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6": + return network, ip + ":" + port, nil + } + // Hostname is only true when network is one of the above. + return "", "", errors.New("unreachable") + } + + switch network { + case "ip6": + if zone != "" { + ip += "%" + zone + } + fallthrough + case "ip4": + return network, ip, nil + case "tcp4", "udp4": + return network, ip + ":" + port, nil + case "tcp6", "udp6": + if zone != "" { + ip += "%" + zone + } + return network, "[" + ip + "]" + ":" + port, nil + case "unix": + if runtime.GOOS == "windows" { + // convert /c:/... to c:\... + ip = filepath.FromSlash(strings.TrimLeft(ip, "/")) + } + return network, ip, nil + default: + return "", "", fmt.Errorf("%s is not a 'thin waist' address", m) + } +} + +// dialArgComponents extracts the raw pieces used in dialing a Multiaddr +func dialArgComponents(m ma.Multiaddr) (zone, network, ip, port string, hostname bool, err error) { + ma.ForEach(m, func(c ma.Component, e error) bool { + if e != nil { + err = e + return false + } + + switch network { + case "": + switch c.Protocol().Code { + case ma.P_IP6ZONE: + if zone != "" { + err = fmt.Errorf("%s has multiple zones", m) + return false + } + zone = c.Value() + return true + case ma.P_IP6: + network = "ip6" + ip = c.Value() + return true + case ma.P_IP4: + if zone != "" { + err = fmt.Errorf("%s has ip4 with zone", m) + return false + } + network = "ip4" + ip = c.Value() + return true + case ma.P_DNS: + network = "ip" + hostname = true + ip = c.Value() + return true + case ma.P_DNS4: + network = "ip4" + hostname = true + ip = c.Value() + return true + case ma.P_DNS6: + network = "ip6" + hostname = true + ip = c.Value() + return true + case ma.P_UNIX: + network = "unix" + ip = c.Value() + return false + } + case "ip": + switch c.Protocol().Code { + case ma.P_UDP: + network = "udp" + case ma.P_TCP: + network = "tcp" + default: + return false + } + port = c.Value() + case "ip4": + switch c.Protocol().Code { + case ma.P_UDP: + network = "udp4" + case ma.P_TCP: + network = "tcp4" + default: + return false + } + port = c.Value() + case "ip6": + switch c.Protocol().Code { + case ma.P_UDP: + network = "udp6" + case ma.P_TCP: + network = "tcp6" + default: + return false + } + port = c.Value() + } + // Done. + return false + }) + return +} + +func parseTCPNetAddr(a net.Addr) (ma.Multiaddr, error) { + ac, ok := a.(*net.TCPAddr) + if !ok { + return nil, errIncorrectNetAddr + } + + // Get IP Addr + ipm, err := FromIPAndZone(ac.IP, ac.Zone) + if err != nil { + return nil, errIncorrectNetAddr + } + + // Get TCP Addr + tcpm, err := ma.NewMultiaddr(fmt.Sprintf("/tcp/%d", ac.Port)) + if err != nil { + return nil, errIncorrectNetAddr + } + + // Encapsulate + return ipm.Encapsulate(tcpm), nil +} + +func parseUDPNetAddr(a net.Addr) (ma.Multiaddr, error) { + ac, ok := a.(*net.UDPAddr) + if !ok { + return nil, errIncorrectNetAddr + } + + // Get IP Addr + ipm, err := FromIPAndZone(ac.IP, ac.Zone) + if err != nil { + return nil, errIncorrectNetAddr + } + + // Get UDP Addr + udpm, err := ma.NewMultiaddr(fmt.Sprintf("/udp/%d", ac.Port)) + if err != nil { + return nil, errIncorrectNetAddr + } + + // Encapsulate + return ipm.Encapsulate(udpm), nil +} + +func parseIPNetAddr(a net.Addr) (ma.Multiaddr, error) { + ac, ok := a.(*net.IPAddr) + if !ok { + return nil, errIncorrectNetAddr + } + return FromIPAndZone(ac.IP, ac.Zone) +} + +func parseIPPlusNetAddr(a net.Addr) (ma.Multiaddr, error) { + ac, ok := a.(*net.IPNet) + if !ok { + return nil, errIncorrectNetAddr + } + return FromIP(ac.IP) +} + +func parseUnixNetAddr(a net.Addr) (ma.Multiaddr, error) { + ac, ok := a.(*net.UnixAddr) + if !ok { + return nil, errIncorrectNetAddr + } + + path := ac.Name + if runtime.GOOS == "windows" { + // Convert c:\foobar\... to c:/foobar/... + path = filepath.ToSlash(path) + } + if len(path) == 0 || path[0] != '/' { + // convert "" and "c:/..." to "/..." + path = "/" + path + } + + return ma.NewComponent("unix", path) +} diff --git a/go-multiaddr/net/convert_test.go b/go-multiaddr/net/convert_test.go new file mode 100644 index 0000000..b757787 --- /dev/null +++ b/go-multiaddr/net/convert_test.go @@ -0,0 +1,266 @@ +package manet + +import ( + "net" + "runtime" + "testing" + + ma "github.com/multiformats/go-multiaddr" +) + +type GenFunc func() (ma.Multiaddr, error) + +func testConvert(t *testing.T, s string, gen GenFunc) { + m, err := gen() + if err != nil { + t.Fatal("failed to generate.") + } + + if s2 := m.String(); err != nil || s2 != s { + t.Fatal("failed to convert: " + s + " != " + s2) + } +} + +func testToNetAddr(t *testing.T, maddr, ntwk, addr string) { + m, err := ma.NewMultiaddr(maddr) + if err != nil { + t.Fatal("failed to generate.") + } + + naddr, err := ToNetAddr(m) + if addr == "" { // should fail + if err == nil { + t.Fatalf("failed to error: %s", m) + } + return + } + + // shouldn't fail + if err != nil { + t.Fatalf("failed to convert to net addr: %s", m) + } + + if naddr.String() != addr { + t.Fatalf("naddr.Address() == %s != %s", naddr, addr) + } + + if naddr.Network() != ntwk { + t.Fatalf("naddr.Network() == %s != %s", naddr.Network(), ntwk) + } + + // should convert properly + switch ntwk { + case "tcp": + taddr := naddr.(*net.TCPAddr) + if ip, err := ToIP(m); err != nil || !taddr.IP.Equal(ip) { + t.Fatalf("ToIP() and ToNetAddr diverged: %s != %s", taddr, ip) + } + case "udp": + uaddr := naddr.(*net.UDPAddr) + if ip, err := ToIP(m); err != nil || !uaddr.IP.Equal(ip) { + t.Fatalf("ToIP() and ToNetAddr diverged: %s != %s", uaddr, ip) + } + case "ip": + ipaddr := naddr.(*net.IPAddr) + if ip, err := ToIP(m); err != nil || !ipaddr.IP.Equal(ip) { + t.Fatalf("ToIP() and ToNetAddr diverged: %s != %s", ipaddr, ip) + } + } +} + +func TestFromIP4(t *testing.T) { + testConvert(t, "/ip4/10.20.30.40", func() (ma.Multiaddr, error) { + return FromNetAddr(&net.IPAddr{IP: net.ParseIP("10.20.30.40")}) + }) +} + +func TestFromUnix(t *testing.T) { + path := "/C:/foo/bar" + if runtime.GOOS == "windows" { + path = `C:\foo\bar` + } + testConvert(t, "/unix/C:/foo/bar", func() (ma.Multiaddr, error) { + return FromNetAddr(&net.UnixAddr{Name: path, Net: "unix"}) + }) +} + +func TestToUnix(t *testing.T) { + path := "/C:/foo/bar" + if runtime.GOOS == "windows" { + path = `C:\foo\bar` + } + testToNetAddr(t, "/unix/C:/foo/bar", "unix", path) +} + +func TestFromIP6(t *testing.T) { + testConvert(t, "/ip6/2001:4860:0:2001::68", func() (ma.Multiaddr, error) { + return FromNetAddr(&net.IPAddr{IP: net.ParseIP("2001:4860:0:2001::68")}) + }) +} + +func TestFromTCP(t *testing.T) { + testConvert(t, "/ip4/10.20.30.40/tcp/1234", func() (ma.Multiaddr, error) { + return FromNetAddr(&net.TCPAddr{ + IP: net.ParseIP("10.20.30.40"), + Port: 1234, + }) + }) +} + +func TestFromUDP(t *testing.T) { + testConvert(t, "/ip4/10.20.30.40/udp/1234", func() (ma.Multiaddr, error) { + return FromNetAddr(&net.UDPAddr{ + IP: net.ParseIP("10.20.30.40"), + Port: 1234, + }) + }) +} + +func TestThinWaist(t *testing.T) { + addrs := map[string]bool{ + "/ip4/127.0.0.1/udp/1234": true, + "/ip4/127.0.0.1/tcp/1234": true, + "/ip4/127.0.0.1/udp/1234/tcp/1234": true, + "/ip4/127.0.0.1/tcp/12345/ip4/1.2.3.4": true, + "/ip6/::1/tcp/80": true, + "/ip6/::1/udp/80": true, + "/ip6/::1": true, + "/ip6zone/hello/ip6/fe80::1/tcp/80": true, + "/ip6zone/hello/ip6/fe80::1": true, + "/tcp/1234/ip4/1.2.3.4": false, + "/tcp/1234": false, + "/tcp/1234/udp/1234": false, + "/ip4/1.2.3.4/ip4/2.3.4.5": true, + "/ip6/fe80::1/ip4/2.3.4.5": true, + "/ip6zone/hello/ip6/fe80::1/ip4/2.3.4.5": true, + + // Invalid ip6zone usage: + "/ip6zone/hello": false, + "/ip6zone/hello/ip4/1.1.1.1": false, + } + + for a, res := range addrs { + m, err := ma.NewMultiaddr(a) + if err != nil { + t.Fatalf("failed to construct Multiaddr: %s", a) + } + + if IsThinWaist(m) != res { + t.Fatalf("IsThinWaist(%s) != %v", a, res) + } + } +} + +func TestDialArgs(t *testing.T) { + test := func(e_maddr, e_nw, e_host string) { + m, err := ma.NewMultiaddr(e_maddr) + if err != nil { + t.Fatal("failed to construct", e_maddr) + } + + nw, host, err := DialArgs(m) + if err != nil { + t.Fatal("failed to get dial args", e_maddr, m, err) + } + + if nw != e_nw { + t.Error("failed to get udp network Dial Arg", e_nw, nw) + } + + if host != e_host { + t.Error("failed to get host:port Dial Arg", e_host, host) + } + } + + test_error := func(e_maddr string) { + m, err := ma.NewMultiaddr(e_maddr) + if err != nil { + t.Fatal("failed to construct", e_maddr) + } + + _, _, err = DialArgs(m) + if err == nil { + t.Fatal("expected DialArgs to fail on", e_maddr) + } + } + + test("/ip4/127.0.0.1/udp/1234", "udp4", "127.0.0.1:1234") + test("/ip4/127.0.0.1/tcp/4321", "tcp4", "127.0.0.1:4321") + test("/ip6/::1/udp/1234", "udp6", "[::1]:1234") + test("/ip6/::1/tcp/4321", "tcp6", "[::1]:4321") + test("/ip6/::1", "ip6", "::1") // Just an IP + test("/ip4/1.2.3.4", "ip4", "1.2.3.4") // Just an IP + test("/ip6zone/foo/ip6/::1/tcp/4321", "tcp6", "[::1%foo]:4321") // zone + test("/ip6zone/foo/ip6/::1/udp/4321", "udp6", "[::1%foo]:4321") // zone + test("/ip6zone/foo/ip6/::1", "ip6", "::1%foo") // no TCP + test_error("/ip6zone/foo/ip4/127.0.0.1") // IP4 doesn't take zone + test("/ip6zone/foo/ip6/::1/ip6zone/bar", "ip6", "::1%foo") // IP over IP + test_error("/ip6zone/foo/ip6zone/bar/ip6/::1") // Only one zone per IP6 + test("/dns/abc.com/tcp/1234", "tcp", "abc.com:1234") // DNS4:port + test("/dns4/abc.com/tcp/1234", "tcp4", "abc.com:1234") // DNS4:port + test("/dns4/abc.com", "ip4", "abc.com") // Just DNS4 + test("/dns6/abc.com/udp/1234", "udp6", "abc.com:1234") // DNS6:port + test("/dns6/abc.com", "ip6", "abc.com") // Just DNS6 +} + +func TestMultiaddrToIPNet(t *testing.T) { + type testCase struct { + name string + ma string + ips []string + contained []bool + } + + testCases := []testCase{ + { + name: "basic", + ma: "/ip4/1.2.3.0/ipcidr/24", + ips: []string{"1.2.3.4", "1.2.3.9", "2.1.1.1"}, + contained: []bool{true, true, false}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ma, _ := ma.StringCast(tc.ma) + + ipnet, err := MultiaddrToIPNet(ma) + if err != nil { + t.Fatalf("failed to parse multiaddr %v into ipnet", ma) + } + for i, ipString := range tc.ips { + ip := net.ParseIP(ipString) + if ip == nil { + t.Fatalf("failed to parse IP %s", ipString) + } + if ipnet.Contains(ip) != tc.contained[i] { + t.Fatalf("Contains check failed. Expected %v got %v", tc.contained[i], ipnet.Contains(ip)) + } + } + }) + } +} + +func TestFailMultiaddrToIPNet(t *testing.T) { + type testCase struct { + name string + ma string + } + + testCases := []testCase{ + {name: "missing ip addr", ma: "/ipcidr/24"}, + {name: "wrong mask", ma: "/ip4/1.2.3.0/ipcidr/128"}, + {name: "wrong mask", ma: "/ip6/::/ipcidr/255"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ma, _ := ma.StringCast(tc.ma) + + _, err := MultiaddrToIPNet(ma) + if err == nil { + t.Fatalf("Expected error when parsing: %s", tc.ma) + } + }) + } +} diff --git a/go-multiaddr/net/doc.go b/go-multiaddr/net/doc.go new file mode 100644 index 0000000..040ad3f --- /dev/null +++ b/go-multiaddr/net/doc.go @@ -0,0 +1,5 @@ +// Package manet provides Multiaddr specific versions of common +// functions in stdlib's net package. This means wrappers of +// standard net symbols like net.Dial and net.Listen, as well +// as conversion to/from net.Addr. +package manet diff --git a/go-multiaddr/net/ip.go b/go-multiaddr/net/ip.go new file mode 100644 index 0000000..cbf07bb --- /dev/null +++ b/go-multiaddr/net/ip.go @@ -0,0 +1,132 @@ +package manet + +import ( + "net" + + ma "github.com/multiformats/go-multiaddr" +) + +// Loopback Addresses +var ( + // IP4Loopback is the ip4 loopback multiaddr + IP4Loopback, _ = ma.StringCast("/ip4/127.0.0.1") + + // IP6Loopback is the ip6 loopback multiaddr + IP6Loopback, _ = ma.StringCast("/ip6/::1") + + // IP4MappedIP6Loopback is the IPv4 Mapped IPv6 loopback address. + IP4MappedIP6Loopback, _ = ma.StringCast("/ip6/::ffff:127.0.0.1") +) + +// Unspecified Addresses (used for ) +var ( + IP4Unspecified, _ = ma.StringCast("/ip4/0.0.0.0") + IP6Unspecified, _ = ma.StringCast("/ip6/::") +) + +// IsThinWaist returns whether a Multiaddr starts with "Thin Waist" Protocols. +// This means: /{IP4, IP6}[/{TCP, UDP}] +func IsThinWaist(m ma.Multiaddr) bool { + m = zoneless(m) + if m == nil { + return false + } + p := m.Protocols() + + // nothing? not even a waist. + if len(p) == 0 { + return false + } + + if p[0].Code != ma.P_IP4 && p[0].Code != ma.P_IP6 { + return false + } + + // only IP? still counts. + if len(p) == 1 { + return true + } + + switch p[1].Code { + case ma.P_TCP, ma.P_UDP, ma.P_IP4, ma.P_IP6: + return true + default: + return false + } +} + +// IsIPLoopback returns whether a Multiaddr starts with a "Loopback" IP address +// This means either /ip4/127.*.*.*/*, /ip6/::1/*, or /ip6/::ffff:127.*.*.*.*/*, +// or /ip6zone//ip6//* +func IsIPLoopback(m ma.Multiaddr) bool { + m = zoneless(m) + if m == nil { + return false + } + c, _, _ := ma.SplitFirst(m) + if c == nil { + return false + } + switch c.Protocol().Code { + case ma.P_IP4, ma.P_IP6: + return net.IP(c.RawValue()).IsLoopback() + } + return false +} + +// IsIP6LinkLocal returns whether a Multiaddr starts with an IPv6 link-local +// multiaddress (with zero or one leading zone). These addresses are non +// routable. +func IsIP6LinkLocal(m ma.Multiaddr) bool { + m = zoneless(m) + if m == nil { + return false + } + c, _, _ := ma.SplitFirst(m) + if c == nil || c.Protocol().Code != ma.P_IP6 { + return false + } + ip := net.IP(c.RawValue()) + return ip.IsLinkLocalMulticast() || ip.IsLinkLocalUnicast() +} + +// IsIPUnspecified returns whether a Multiaddr starts with an Unspecified IP address +// This means either /ip4/0.0.0.0/* or /ip6/::/* +func IsIPUnspecified(m ma.Multiaddr) bool { + m = zoneless(m) + if m == nil { + return false + } + c, _, _ := ma.SplitFirst(m) + return net.IP(c.RawValue()).IsUnspecified() +} + +// If m matches [zone,ip6,...], return [ip6,...] +// else if m matches [], [zone], or [zone,...], return nil +// else return m +func zoneless(m ma.Multiaddr) ma.Multiaddr { + head, tail, _ := ma.SplitFirst(m) + if head == nil { + return nil + } + if head.Protocol().Code == ma.P_IP6ZONE { + if tail == nil { + return nil + } + tailhead, _, _ := ma.SplitFirst(tail) + if tailhead.Protocol().Code != ma.P_IP6 { + return nil + } + return tail + } else { + return m + } +} + +// IsNAT64IPv4ConvertedIPv6Addr returns whether addr is a well-known prefix "64:ff9b::/96" addr +// used for NAT64 Translation. See RFC 6052 +func IsNAT64IPv4ConvertedIPv6Addr(addr ma.Multiaddr) bool { + c, _, _ := ma.SplitFirst(addr) + return c != nil && c.Protocol().Code == ma.P_IP6 && + inAddrRange(c.RawValue(), nat64) +} diff --git a/go-multiaddr/net/ip_test.go b/go-multiaddr/net/ip_test.go new file mode 100644 index 0000000..9084919 --- /dev/null +++ b/go-multiaddr/net/ip_test.go @@ -0,0 +1,60 @@ +package manet + +import ( + "fmt" + "testing" + + ma "github.com/multiformats/go-multiaddr" +) + +func TestIsWellKnownPrefixIPv4ConvertedIPv6Address(t *testing.T) { + addr1, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1234") + addr2, _ := ma.StringCast("/ip6/1::4/tcp/1234") + addr3, _ := ma.StringCast("/ip6/::1/tcp/1234") + addr4, _ := ma.StringCast("/ip6/64:ff9b::192.0.1.2/tcp/1234") + addr5, _ := ma.StringCast("/ip6/64:ff9b::1:192.0.1.2/tcp/1234") + addr6, _ := ma.StringCast("/ip6/64:ff9b:1::1:192.0.1.2/tcp/1234") + cases := []struct { + addr ma.Multiaddr + want bool + failureReason string + }{ + { + addr: addr1, + want: false, + failureReason: "ip4 addresses should return false", + }, + { + addr: addr2, + want: false, + failureReason: "ip6 addresses doesn't have well-known prefix", + }, + { + addr: addr3, + want: false, + failureReason: "localhost addresses should return false", + }, + { + addr: addr4, + want: true, + failureReason: "ip6 address begins with well-known prefix", + }, + { + addr: addr5, + want: false, + failureReason: "64:ff9b::1 is not well-known prefix", + }, + { + addr: addr6, + want: true, + failureReason: "64:ff9b:1::1 is allowed for NAT64 translation", + }, + } + for i, tc := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + if IsNAT64IPv4ConvertedIPv6Addr(tc.addr) != tc.want { + t.Fatalf("%s %s", tc.addr, tc.failureReason) + } + }) + } +} diff --git a/go-multiaddr/net/net.go b/go-multiaddr/net/net.go new file mode 100644 index 0000000..10fcff7 --- /dev/null +++ b/go-multiaddr/net/net.go @@ -0,0 +1,430 @@ +// Package manet provides Multiaddr +// (https://github.com/multiformats/go-multiaddr) specific versions of common +// functions in Go's standard `net` package. This means wrappers of standard +// net symbols like `net.Dial` and `net.Listen`, as well as conversion to +// and from `net.Addr`. +package manet + +import ( + "context" + "fmt" + "net" + + ma "github.com/multiformats/go-multiaddr" +) + +// Conn is the equivalent of a net.Conn object. It is the +// result of calling the Dial or Listen functions in this +// package, with associated local and remote Multiaddrs. +type Conn interface { + net.Conn + + // LocalMultiaddr returns the local Multiaddr associated + // with this connection + LocalMultiaddr() ma.Multiaddr + + // RemoteMultiaddr returns the remote Multiaddr associated + // with this connection + RemoteMultiaddr() ma.Multiaddr +} + +type halfOpen interface { + net.Conn + CloseRead() error + CloseWrite() error +} + +func wrap(nconn net.Conn, laddr, raddr ma.Multiaddr) Conn { + endpts := maEndpoints{ + laddr: laddr, + raddr: raddr, + } + // This sucks. However, it's the only way to reliably expose the + // underlying methods. This way, users that need access to, e.g., + // CloseRead and CloseWrite, can do so via type assertions. + switch nconn := nconn.(type) { + case *net.TCPConn: + return &struct { + *net.TCPConn + maEndpoints + }{nconn, endpts} + case *net.UDPConn: + return &struct { + *net.UDPConn + maEndpoints + }{nconn, endpts} + case *net.IPConn: + return &struct { + *net.IPConn + maEndpoints + }{nconn, endpts} + case *net.UnixConn: + return &struct { + *net.UnixConn + maEndpoints + }{nconn, endpts} + case halfOpen: + return &struct { + halfOpen + maEndpoints + }{nconn, endpts} + default: + return &struct { + net.Conn + maEndpoints + }{nconn, endpts} + } +} + +// WrapNetConn wraps a net.Conn object with a Multiaddr friendly Conn. +// +// This function does it's best to avoid "hiding" methods exposed by the wrapped +// type. Guarantees: +// +// - If the wrapped connection exposes the "half-open" closer methods +// (CloseWrite, CloseRead), these will be available on the wrapped connection +// via type assertions. +// - If the wrapped connection is a UnixConn, IPConn, TCPConn, or UDPConn, all +// methods on these wrapped connections will be available via type assertions. +func WrapNetConn(nconn net.Conn) (Conn, error) { + if nconn == nil { + return nil, fmt.Errorf("failed to convert nconn.LocalAddr: nil") + } + + laddr, err := FromNetAddr(nconn.LocalAddr()) + if err != nil { + return nil, fmt.Errorf("failed to convert nconn.LocalAddr: %s", err) + } + + raddr, err := FromNetAddr(nconn.RemoteAddr()) + if err != nil { + return nil, fmt.Errorf("failed to convert nconn.RemoteAddr: %s", err) + } + + return wrap(nconn, laddr, raddr), nil +} + +type maEndpoints struct { + laddr ma.Multiaddr + raddr ma.Multiaddr +} + +// LocalMultiaddr returns the local address associated with +// this connection +func (c *maEndpoints) LocalMultiaddr() ma.Multiaddr { + return c.laddr +} + +// RemoteMultiaddr returns the remote address associated with +// this connection +func (c *maEndpoints) RemoteMultiaddr() ma.Multiaddr { + return c.raddr +} + +// Dialer contains options for connecting to an address. It +// is effectively the same as net.Dialer, but its LocalAddr +// and RemoteAddr options are Multiaddrs, instead of net.Addrs. +type Dialer struct { + + // Dialer is just an embedded net.Dialer, with all its options. + net.Dialer + + // LocalAddr is the local address to use when dialing an + // address. The address must be of a compatible type for the + // network being dialed. + // If nil, a local address is automatically chosen. + LocalAddr ma.Multiaddr +} + +// Dial connects to a remote address, using the options of the +// Dialer. Dialer uses an underlying net.Dialer to Dial a +// net.Conn, then wraps that in a Conn object (with local and +// remote Multiaddrs). +func (d *Dialer) Dial(remote ma.Multiaddr) (Conn, error) { + return d.DialContext(context.Background(), remote) +} + +// DialContext allows to provide a custom context to Dial(). +func (d *Dialer) DialContext(ctx context.Context, remote ma.Multiaddr) (Conn, error) { + // if a LocalAddr is specified, use it on the embedded dialer. + if d.LocalAddr != nil { + // convert our multiaddr to net.Addr friendly + naddr, err := ToNetAddr(d.LocalAddr) + if err != nil { + return nil, err + } + + // set the dialer's LocalAddr as naddr + d.Dialer.LocalAddr = naddr + } + + // get the net.Dial friendly arguments from the remote addr + rnet, rnaddr, err := DialArgs(remote) + if err != nil { + return nil, err + } + + // ok, Dial! + var nconn net.Conn + switch rnet { + case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6", "unix": + nconn, err = d.Dialer.DialContext(ctx, rnet, rnaddr) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unrecognized network: %s", rnet) + } + + // get local address (pre-specified or assigned within net.Conn) + local := d.LocalAddr + // This block helps us avoid parsing addresses in transports (such as unix + // sockets) that don't have local addresses when dialing out. + if local == nil && nconn.LocalAddr().String() != "" { + local, err = FromNetAddr(nconn.LocalAddr()) + if err != nil { + return nil, err + } + } + return wrap(nconn, local, remote), nil +} + +// Dial connects to a remote address. It uses an underlying net.Conn, +// then wraps it in a Conn object (with local and remote Multiaddrs). +func Dial(remote ma.Multiaddr) (Conn, error) { + return (&Dialer{}).Dial(remote) +} + +// A Listener is a generic network listener for stream-oriented protocols. +// it uses an embedded net.Listener, overriding net.Listener.Accept to +// return a Conn and providing Multiaddr. +type Listener interface { + // Accept waits for and returns the next connection to the listener. + // Returns a Multiaddr friendly Conn + Accept() (Conn, error) + + // Close closes the listener. + // Any blocked Accept operations will be unblocked and return errors. + Close() error + + // Multiaddr returns the listener's (local) Multiaddr. + Multiaddr() ma.Multiaddr + + // Addr returns the net.Listener's network address. + Addr() net.Addr +} + +type netListenerAdapter struct { + Listener +} + +func (nla *netListenerAdapter) Accept() (net.Conn, error) { + return nla.Listener.Accept() +} + +// NetListener turns this Listener into a net.Listener. +// +// - Connections returned from Accept implement multiaddr/net Conn. +// - Calling WrapNetListener on the net.Listener returned by this function will +// return the original (underlying) multiaddr/net Listener. +func NetListener(l Listener) net.Listener { + return &netListenerAdapter{l} +} + +// maListener implements Listener +type maListener struct { + net.Listener + laddr ma.Multiaddr +} + +// Accept waits for and returns the next connection to the listener. +// Returns a Multiaddr friendly Conn +func (l *maListener) Accept() (Conn, error) { + nconn, err := l.Listener.Accept() + if err != nil { + return nil, err + } + + var raddr ma.Multiaddr + // This block protects us in transports (i.e. unix sockets) that don't have + // remote addresses for inbound connections. + if addr := nconn.RemoteAddr(); addr != nil && addr.String() != "" { + raddr, err = FromNetAddr(addr) + if err != nil { + return nil, fmt.Errorf("failed to convert conn.RemoteAddr: %s", err) + } + } + + var laddr ma.Multiaddr + if addr := nconn.LocalAddr(); addr != nil && addr.String() != "" { + laddr, err = FromNetAddr(addr) + if err != nil { + return nil, fmt.Errorf("failed to convert conn.LocalAddr: %s", err) + } + } + + return wrap(nconn, laddr, raddr), nil +} + +// Multiaddr returns the listener's (local) Multiaddr. +func (l *maListener) Multiaddr() ma.Multiaddr { + return l.laddr +} + +// Addr returns the listener's network address. +func (l *maListener) Addr() net.Addr { + return l.Listener.Addr() +} + +// Listen announces on the local network address laddr. +// The Multiaddr must be a "ThinWaist" stream-oriented network: +// ip4/tcp, ip6/tcp, (TODO: unix, unixpacket) +// See Dial for the syntax of laddr. +func Listen(laddr ma.Multiaddr) (Listener, error) { + + // get the net.Listen friendly arguments from the remote addr + lnet, lnaddr, err := DialArgs(laddr) + if err != nil { + return nil, err + } + + nl, err := net.Listen(lnet, lnaddr) + if err != nil { + return nil, err + } + + // we want to fetch the new multiaddr from the listener, as it may + // have resolved to some other value. WrapNetListener does it for us. + return WrapNetListener(nl) +} + +// WrapNetListener wraps a net.Listener with a manet.Listener. +func WrapNetListener(nl net.Listener) (Listener, error) { + if nla, ok := nl.(*netListenerAdapter); ok { + return nla.Listener, nil + } + + laddr, err := FromNetAddr(nl.Addr()) + if err != nil { + return nil, err + } + + return &maListener{ + Listener: nl, + laddr: laddr, + }, nil +} + +// A PacketConn is a generic packet oriented network connection which uses an +// underlying net.PacketConn, wrapped with the locally bound Multiaddr. +type PacketConn interface { + net.PacketConn + + LocalMultiaddr() ma.Multiaddr + + ReadFromMultiaddr(b []byte) (int, ma.Multiaddr, error) + WriteToMultiaddr(b []byte, maddr ma.Multiaddr) (int, error) +} + +// maPacketConn implements PacketConn +type maPacketConn struct { + net.PacketConn + laddr ma.Multiaddr +} + +var _ PacketConn = (*maPacketConn)(nil) + +// LocalMultiaddr returns the bound local Multiaddr. +func (l *maPacketConn) LocalMultiaddr() ma.Multiaddr { + return l.laddr +} + +func (l *maPacketConn) ReadFromMultiaddr(b []byte) (int, ma.Multiaddr, error) { + n, addr, err := l.ReadFrom(b) + maddr, _ := FromNetAddr(addr) + return n, maddr, err +} + +func (l *maPacketConn) WriteToMultiaddr(b []byte, maddr ma.Multiaddr) (int, error) { + addr, err := ToNetAddr(maddr) + if err != nil { + return 0, err + } + return l.WriteTo(b, addr) +} + +// ListenPacket announces on the local network address laddr. +// The Multiaddr must be a packet driven network, like udp4 or udp6. +// See Dial for the syntax of laddr. +func ListenPacket(laddr ma.Multiaddr) (PacketConn, error) { + lnet, lnaddr, err := DialArgs(laddr) + if err != nil { + return nil, err + } + + pc, err := net.ListenPacket(lnet, lnaddr) + if err != nil { + return nil, err + } + + // We want to fetch the new multiaddr from the listener, as it may + // have resolved to some other value. WrapPacketConn does this. + return WrapPacketConn(pc) +} + +// WrapPacketConn wraps a net.PacketConn with a manet.PacketConn. +func WrapPacketConn(pc net.PacketConn) (PacketConn, error) { + laddr, err := FromNetAddr(pc.LocalAddr()) + if err != nil { + return nil, err + } + + return &maPacketConn{ + PacketConn: pc, + laddr: laddr, + }, nil +} + +// InterfaceMultiaddrs will return the addresses matching net.InterfaceAddrs +func InterfaceMultiaddrs() ([]ma.Multiaddr, error) { + addrs, err := net.InterfaceAddrs() + if err != nil { + return nil, err + } + + maddrs := make([]ma.Multiaddr, len(addrs)) + for i, a := range addrs { + maddrs[i], err = FromNetAddr(a) + if err != nil { + return nil, err + } + } + return maddrs, nil +} + +// AddrMatch returns the Multiaddrs that match the protocol stack on addr +func AddrMatch(match ma.Multiaddr, addrs []ma.Multiaddr) []ma.Multiaddr { + + // we should match transports entirely. + p1s := match.Protocols() + + out := make([]ma.Multiaddr, 0, len(addrs)) + for _, a := range addrs { + p2s := a.Protocols() + if len(p1s) != len(p2s) { + continue + } + + match := true + for i, p2 := range p2s { + if p1s[i].Code != p2.Code { + match = false + break + } + } + if match { + out = append(out, a) + } + } + return out +} diff --git a/go-multiaddr/net/net_test.go b/go-multiaddr/net/net_test.go new file mode 100644 index 0000000..6d72e65 --- /dev/null +++ b/go-multiaddr/net/net_test.go @@ -0,0 +1,693 @@ +package manet + +import ( + "bytes" + "fmt" + "net" + "os" + "path/filepath" + "sync" + "testing" + "time" + + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" +) + +func newMultiaddr(t *testing.T, m string) ma.Multiaddr { + maddr, err := ma.NewMultiaddr(m) + if err != nil { + t.Fatal("failed to construct multiaddr:", m, err) + } + return maddr +} + +func TestDial(t *testing.T) { + + listener, err := net.Listen("tcp", "127.0.0.1:4321") + if err != nil { + t.Fatal("failed to listen") + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + cB, err := listener.Accept() + if err != nil { + t.Error("failed to accept") + } + + // echo out + buf := make([]byte, 1024) + for { + _, err := cB.Read(buf) + if err != nil { + break + } + cB.Write(buf) + } + + wg.Done() + }() + + maddr := newMultiaddr(t, "/ip4/127.0.0.1/tcp/4321") + cA, err := Dial(maddr) + if err != nil { + t.Fatal("failed to dial") + } + + buf := make([]byte, 1024) + if _, err := cA.Write([]byte("beep boop")); err != nil { + t.Fatal("failed to write:", err) + } + + if _, err := cA.Read(buf); err != nil { + t.Fatal("failed to read:", buf, err) + } + + if !bytes.Equal(buf[:9], []byte("beep boop")) { + t.Fatal("failed to echo:", buf) + } + + maddr2 := cA.RemoteMultiaddr() + if !maddr2.Equal(maddr) { + t.Fatal("remote multiaddr not equal:", maddr, maddr2) + } + + cA.Close() + wg.Wait() +} + +func TestUnixSockets(t *testing.T) { + dir, err := os.MkdirTemp(os.TempDir(), "manettest") + if err != nil { + t.Fatal(err) + } + path := filepath.Join(dir, "listen.sock") + maddr := newMultiaddr(t, "/unix/"+path) + + listener, err := Listen(maddr) + if err != nil { + t.Fatal(err) + } + + payload := []byte("hello") + + // listen + done := make(chan struct{}, 1) + go func() { + conn, err := listener.Accept() + if err != nil { + t.Error(err) + } + defer conn.Close() + buf := make([]byte, 1024) + n, err := conn.Read(buf) + if err != nil { + t.Error(err) + } + if n != len(payload) { + t.Error("failed to read appropriate number of bytes") + } + if !bytes.Equal(buf[0:n], payload) { + t.Error("payload did not match") + } + done <- struct{}{} + }() + + // dial + conn, err := Dial(maddr) + if err != nil { + t.Fatal(err) + } + n, err := conn.Write(payload) + if err != nil { + t.Fatal(err) + } + if n != len(payload) { + t.Fatal("failed to write appropriate number of bytes") + } + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("timed out waiting for read") + } +} + +func TestListen(t *testing.T) { + + maddr := newMultiaddr(t, "/ip4/127.0.0.1/tcp/4322") + listener, err := Listen(maddr) + if err != nil { + t.Fatal("failed to listen") + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + cB, err := listener.Accept() + if err != nil { + t.Error("failed to accept") + } + + if !cB.LocalMultiaddr().Equal(maddr) { + t.Error("local multiaddr not equal:", maddr, cB.LocalMultiaddr()) + } + + // echo out + buf := make([]byte, 1024) + for { + _, err := cB.Read(buf) + if err != nil { + break + } + cB.Write(buf) + } + + wg.Done() + }() + + cA, err := net.Dial("tcp", "127.0.0.1:4322") + if err != nil { + t.Fatal("failed to dial") + } + + buf := make([]byte, 1024) + if _, err := cA.Write([]byte("beep boop")); err != nil { + t.Fatal("failed to write:", err) + } + + if _, err := cA.Read(buf); err != nil { + t.Fatal("failed to read:", buf, err) + } + + if !bytes.Equal(buf[:9], []byte("beep boop")) { + t.Fatal("failed to echo:", buf) + } + + maddr2, err := FromNetAddr(cA.RemoteAddr()) + if err != nil { + t.Fatal("failed to convert", err) + } + if !maddr2.Equal(maddr) { + t.Fatal("remote multiaddr not equal:", maddr, maddr2) + } + + cA.Close() + wg.Wait() +} + +func TestListenAddrs(t *testing.T) { + + test := func(addr, resaddr string, succeed bool) { + if resaddr == "" { + resaddr = addr + } + + maddr := newMultiaddr(t, addr) + l, err := Listen(maddr) + if !succeed { + if err == nil { + t.Fatal("succeeded in listening", addr) + } + return + } + if succeed && err != nil { + t.Error("failed to listen", addr, err) + } + if l == nil { + t.Error("failed to listen", addr, succeed, err) + } + if l.Multiaddr().String() != resaddr { + t.Error("listen addr did not resolve properly", l.Multiaddr().String(), resaddr, succeed, err) + } + + if err = l.Close(); err != nil { + t.Fatal("failed to close listener", addr, err) + } + } + + test("/ip4/127.0.0.1/tcp/4324", "", true) + test("/ip4/127.0.0.1/udp/4325", "", false) + test("/ip4/127.0.0.1/udp/4326/udt", "", false) + + if len(os.Getenv("CI")) > 0 { + test("/ip4/0.0.0.0/tcp/4324", "", true) + test("/ip4/0.0.0.0/udp/4325", "", false) + test("/ip4/0.0.0.0/udp/4326/udt", "", false) + + test("/ip6/::1/tcp/4324", "", true) + test("/ip6/::1/udp/4325", "", false) + test("/ip6/::1/udp/4326/udt", "", false) + test("/ip6/::/tcp/4324", "", true) + test("/ip6/::/udp/4325", "", false) + test("/ip6/::/udp/4326/udt", "", false) + + /* "An implementation should also support the concept of a "default" + * zone for each scope. And, when supported, the index value zero + * at each scope SHOULD be reserved to mean "use the default zone"." + * -- rfc4007. So, this _should_ work everywhere(?). */ + test("/ip6zone/0/ip6/::1/tcp/4324", "/ip6/::1/tcp/4324", true) + test("/ip6zone/0/ip6/::1/udp/4324", "", false) + } else { + t.Skip("all tests only run on CI") + } +} + +func TestListenAndDial(t *testing.T) { + + maddr := newMultiaddr(t, "/ip4/127.0.0.1/tcp/4323") + listener, err := Listen(maddr) + if err != nil { + t.Fatal("failed to listen") + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + cB, err := listener.Accept() + if err != nil { + t.Error("failed to accept") + } + + if !cB.LocalMultiaddr().Equal(maddr) { + t.Error("local multiaddr not equal:", maddr, cB.LocalMultiaddr()) + } + + // echo out + buf := make([]byte, 1024) + for { + _, err := cB.Read(buf) + if err != nil { + break + } + cB.Write(buf) + } + + wg.Done() + }() + + cA, err := Dial(newMultiaddr(t, "/ip4/127.0.0.1/tcp/4323")) + if err != nil { + t.Fatal("failed to dial") + } + + buf := make([]byte, 1024) + if _, err := cA.Write([]byte("beep boop")); err != nil { + t.Fatal("failed to write:", err) + } + + if _, err := cA.Read(buf); err != nil { + t.Fatal("failed to read:", buf, err) + } + + if !bytes.Equal(buf[:9], []byte("beep boop")) { + t.Fatal("failed to echo:", buf) + } + + maddr2 := cA.RemoteMultiaddr() + if !maddr2.Equal(maddr) { + t.Fatal("remote multiaddr not equal:", maddr, maddr2) + } + + cA.Close() + wg.Wait() +} + +func TestListenPacketAndDial(t *testing.T) { + maddr := newMultiaddr(t, "/ip4/127.0.0.1/udp/4324") + pc, err := ListenPacket(maddr) + if err != nil { + t.Fatal("failed to listen", err) + } + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + if !pc.LocalMultiaddr().Equal(maddr) { + t.Error("connection multiaddr not equal:", maddr, pc.LocalMultiaddr()) + } + + buffer := make([]byte, 1024) + _, addr, err := pc.ReadFrom(buffer) + if err != nil { + t.Error("failed to read into buffer", err) + } + pc.WriteTo(buffer, addr) + + wg.Done() + }() + + cn, err := Dial(maddr) + if err != nil { + t.Fatal("failed to dial", err) + } + + buf := make([]byte, 1024) + if _, err := cn.Write([]byte("beep boop")); err != nil { + t.Fatal("failed to write", err) + } + + if _, err := cn.Read(buf); err != nil { + t.Fatal("failed to read:", buf, err) + } + + if !bytes.Equal(buf[:9], []byte("beep boop")) { + t.Fatal("failed to echk:", buf) + } + + maddr2 := cn.RemoteMultiaddr() + if !maddr2.Equal(maddr) { + t.Fatal("remote multiaddr not equal:", maddr, maddr2) + } + + cn.Close() + pc.Close() + wg.Wait() +} + +func TestIPLoopback(t *testing.T) { + if IP4Loopback.String() != "/ip4/127.0.0.1" { + t.Error("IP4Loopback incorrect:", IP4Loopback) + } + + if IP6Loopback.String() != "/ip6/::1" { + t.Error("IP6Loopback incorrect:", IP6Loopback) + } + + if IP4MappedIP6Loopback.String() != "/ip6/::ffff:127.0.0.1" { + t.Error("IP4MappedIP6Loopback incorrect:", IP4MappedIP6Loopback) + } + + if !IsIPLoopback(IP4Loopback) { + t.Error("IsIPLoopback failed (IP4Loopback)") + } + + if !IsIPLoopback(newMultiaddr(t, "/ip4/127.1.80.9")) { + t.Error("IsIPLoopback failed (/ip4/127.1.80.9)") + } + + if IsIPLoopback(newMultiaddr(t, "/ip4/112.123.11.1")) { + t.Error("IsIPLoopback false positive (/ip4/112.123.11.1)") + } + + if IsIPLoopback(newMultiaddr(t, "/ip4/192.168.0.1/ip6/::1")) { + t.Error("IsIPLoopback false positive (/ip4/192.168.0.1/ip6/::1)") + } + + if !IsIPLoopback(IP6Loopback) { + t.Error("IsIPLoopback failed (IP6Loopback)") + } + + if !IsIPLoopback(newMultiaddr(t, "/ip6/127.0.0.1")) { + t.Error("IsIPLoopback failed (/ip6/127.0.0.1)") + } + + if !IsIPLoopback(newMultiaddr(t, "/ip6/127.99.3.2")) { + t.Error("IsIPLoopback failed (/ip6/127.99.3.2)") + } + + if IsIPLoopback(newMultiaddr(t, "/ip6/::fffa:127.99.3.2")) { + t.Error("IsIPLoopback false positive (/ip6/::fffa:127.99.3.2)") + } + + if !IsIPLoopback(newMultiaddr(t, "/ip6zone/0/ip6/::1")) { + t.Error("IsIPLoopback failed (/ip6zone/0/ip6/::1)") + } + + if !IsIPLoopback(newMultiaddr(t, "/ip6zone/xxx/ip6/::1")) { + t.Error("IsIPLoopback failed (/ip6zone/xxx/ip6/::1)") + } + + if IsIPLoopback(newMultiaddr(t, "/ip6zone/0/ip6/1::1")) { + t.Errorf("IsIPLoopback false positive (/ip6zone/0/ip6/1::1)") + } +} + +func TestIPUnspecified(t *testing.T) { + if IP4Unspecified.String() != "/ip4/0.0.0.0" { + t.Error("IP4Unspecified incorrect:", IP4Unspecified) + } + + if IP6Unspecified.String() != "/ip6/::" { + t.Error("IP6Unspecified incorrect:", IP6Unspecified) + } + + if !IsIPUnspecified(IP4Unspecified) { + t.Error("IsIPUnspecified failed (IP4Unspecified)") + } + + if !IsIPUnspecified(IP6Unspecified) { + t.Error("IsIPUnspecified failed (IP6Unspecified)") + } + + if !IsIPUnspecified(newMultiaddr(t, "/ip6zone/xxx/ip6/::")) { + t.Error("IsIPUnspecified failed (/ip6zone/xxx/ip6/::)") + } +} + +func TestIP6LinkLocal(t *testing.T) { + for a := 0; a < 65536; a++ { + isLinkLocal := a&0xffc0 == 0xfe80 || a&0xff0f == 0xff02 + m := newMultiaddr(t, fmt.Sprintf("/ip6/%x::1", a)) + if IsIP6LinkLocal(m) != isLinkLocal { + t.Errorf("IsIP6LinkLocal failed (%s != %v)", m, isLinkLocal) + } + } + + if !IsIP6LinkLocal(newMultiaddr(t, "/ip6zone/hello/ip6/fe80::9999")) { + t.Error("IsIP6LinkLocal failed (/ip6/fe80::9999)") + } + + bad := []ma.Multiaddr{ + newMultiaddr(t, "/ip6/fe80::1/tcp/1234"), // link local + newMultiaddr(t, "/ip6/fe80::100/tcp/1234"), // link local + } + good := []ma.Multiaddr{ + newMultiaddr(t, "/ip4/127.0.0.1/tcp/1234"), + newMultiaddr(t, "/ip6/::1/tcp/1234"), + newMultiaddr(t, "/ip4/1.2.3.4/udp/1234/utp"), + } + for _, addr := range bad { + require.True(t, IsIP6LinkLocal(addr), "%s is a link local addr", addr) + } + for _, addr := range good { + require.False(t, IsIP6LinkLocal(addr), "%s is not a link local addr", addr) + } +} + +func TestConvertNetAddr(t *testing.T) { + m1 := newMultiaddr(t, "/ip4/1.2.3.4/tcp/4001") + + n1, err := ToNetAddr(m1) + if err != nil { + t.Fatal(err) + } + + m2, err := FromNetAddr(n1) + if err != nil { + t.Fatal(err) + } + + if m1.String() != m2.String() { + t.Fatal("ToNetAddr + FromNetAddr did not work") + } +} + +func TestWrapNetConn(t *testing.T) { + // test WrapNetConn nil + if _, err := WrapNetConn(nil); err == nil { + t.Error("WrapNetConn(nil) should return an error") + } + + checkErr := func(err error, s string) { + if err != nil { + t.Fatal(s, err) + } + } + + listener, err := net.Listen("tcp", "127.0.0.1:0") + checkErr(err, "failed to listen") + + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(1) + go func() { + defer wg.Done() + cB, err := listener.Accept() + checkErr(err, "failed to accept") + _ = cB.(halfOpen) + cB.Close() + }() + + cA, err := net.Dial("tcp", listener.Addr().String()) + checkErr(err, "failed to dial") + defer cA.Close() + _ = cA.(halfOpen) + + lmaddr, err := FromNetAddr(cA.LocalAddr()) + checkErr(err, "failed to get local addr") + rmaddr, err := FromNetAddr(cA.RemoteAddr()) + checkErr(err, "failed to get remote addr") + + mcA, err := WrapNetConn(cA) + checkErr(err, "failed to wrap conn") + + _ = mcA.(halfOpen) + + if mcA.LocalAddr().String() != cA.LocalAddr().String() { + t.Error("wrapped conn local addr differs") + } + if mcA.RemoteAddr().String() != cA.RemoteAddr().String() { + t.Error("wrapped conn remote addr differs") + } + if mcA.LocalMultiaddr().String() != lmaddr.String() { + t.Error("wrapped conn local maddr differs") + } + if mcA.RemoteMultiaddr().String() != rmaddr.String() { + t.Error("wrapped conn remote maddr differs") + } +} + +func TestAddrMatch(t *testing.T) { + + test := func(m ma.Multiaddr, input, expect []ma.Multiaddr) { + actual := AddrMatch(m, input) + testSliceEqual(t, expect, actual) + } + + a := []ma.Multiaddr{ + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234"), + newMultiaddr(t, "/ip4/1.2.3.4/tcp/2345"), + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234/tcp/2345"), + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234/tcp/2345"), + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234/udp/1234"), + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234/udp/1234"), + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234/ip6/::1"), + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234/ip6/::1"), + newMultiaddr(t, "/ip6/::1/tcp/1234"), + newMultiaddr(t, "/ip6/::1/tcp/2345"), + newMultiaddr(t, "/ip6/::1/tcp/1234/tcp/2345"), + newMultiaddr(t, "/ip6/::1/tcp/1234/tcp/2345"), + newMultiaddr(t, "/ip6/::1/tcp/1234/udp/1234"), + newMultiaddr(t, "/ip6/::1/tcp/1234/udp/1234"), + newMultiaddr(t, "/ip6/::1/tcp/1234/ip6/::1"), + newMultiaddr(t, "/ip6/::1/tcp/1234/ip6/::1"), + } + + test(a[0], a, []ma.Multiaddr{ + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234"), + newMultiaddr(t, "/ip4/1.2.3.4/tcp/2345"), + }) + test(a[2], a, []ma.Multiaddr{ + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234/tcp/2345"), + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234/tcp/2345"), + }) + test(a[4], a, []ma.Multiaddr{ + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234/udp/1234"), + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234/udp/1234"), + }) + test(a[6], a, []ma.Multiaddr{ + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234/ip6/::1"), + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234/ip6/::1"), + }) + test(a[8], a, []ma.Multiaddr{ + newMultiaddr(t, "/ip6/::1/tcp/1234"), + newMultiaddr(t, "/ip6/::1/tcp/2345"), + }) + test(a[10], a, []ma.Multiaddr{ + newMultiaddr(t, "/ip6/::1/tcp/1234/tcp/2345"), + newMultiaddr(t, "/ip6/::1/tcp/1234/tcp/2345"), + }) + test(a[12], a, []ma.Multiaddr{ + newMultiaddr(t, "/ip6/::1/tcp/1234/udp/1234"), + newMultiaddr(t, "/ip6/::1/tcp/1234/udp/1234"), + }) + test(a[14], a, []ma.Multiaddr{ + newMultiaddr(t, "/ip6/::1/tcp/1234/ip6/::1"), + newMultiaddr(t, "/ip6/::1/tcp/1234/ip6/::1"), + }) + +} + +func testSliceEqual(t *testing.T, a, b []ma.Multiaddr) { + if len(a) != len(b) { + t.Error("differ", a, b) + } + for i, addrA := range a { + if !addrA.Equal(b[i]) { + t.Error("differ", a, b) + } + } +} + +func TestInterfaceAddressesWorks(t *testing.T) { + _, err := InterfaceMultiaddrs() + if err != nil { + t.Fatal(err) + } +} + +func TestNetListener(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:1234") + if err != nil { + t.Fatal(err) + } + defer listener.Close() + malist, err := WrapNetListener(listener) + if err != nil { + t.Fatal(err) + } + if !malist.Multiaddr().Equal(newMultiaddr(t, "/ip4/127.0.0.1/tcp/1234")) { + t.Fatal("unexpected multiaddr") + } + + go func() { + c, err := Dial(malist.Multiaddr()) + if err != nil { + t.Error("failed to dial") + } + if !c.RemoteMultiaddr().Equal(malist.Multiaddr()) { + t.Error("dialed wrong target") + } + c.Close() + + c, err = Dial(malist.Multiaddr()) + if err != nil { + t.Error("failed to dial") + } + c.Close() + }() + + c, err := malist.Accept() + if err != nil { + t.Fatal(err) + } + c.Close() + netList := NetListener(malist) + malist2, err := WrapNetListener(netList) + if err != nil { + t.Fatal(err) + } + if malist2 != malist { + t.Fatal("expected WrapNetListener(NetListener(malist)) == malist") + } + nc, err := netList.Accept() + if err != nil { + t.Fatal(err) + } + if !nc.(Conn).LocalMultiaddr().Equal(malist.Multiaddr()) { + t.Fatal("wrong multiaddr on conn") + } + nc.Close() +} + +func BenchmarkResolveUnspecifiedAddress(b *testing.B) { + b.ReportAllocs() + a, _ := ma.StringCast("/ip4/0.0.0.0/udp/42/quic-v1") + iaddrs, _ := interfaceAddresses() + for i := 0; i < b.N; i++ { + ResolveUnspecifiedAddress(a, iaddrs) + } +} diff --git a/go-multiaddr/net/private.go b/go-multiaddr/net/private.go new file mode 100644 index 0000000..606b2ff --- /dev/null +++ b/go-multiaddr/net/private.go @@ -0,0 +1,218 @@ +package manet + +import ( + "net" + "strings" + + ma "github.com/multiformats/go-multiaddr" +) + +// Private4 and Private6 are well-known private networks +var Private4, Private6 []*net.IPNet +var privateCIDR4 = []string{ + // localhost + "127.0.0.0/8", + // private networks + "10.0.0.0/8", + "100.64.0.0/10", + "172.16.0.0/12", + "192.168.0.0/16", + // link local + "169.254.0.0/16", +} +var privateCIDR6 = []string{ + // localhost + "::1/128", + // ULA reserved + "fc00::/7", + // link local + "fe80::/10", +} + +// Unroutable4 and Unroutable6 are well known unroutable address ranges +var Unroutable4, Unroutable6 []*net.IPNet +var unroutableCIDR4 = []string{ + "0.0.0.0/8", + "192.0.0.0/26", + "192.0.2.0/24", + "192.88.99.0/24", + "198.18.0.0/15", + "198.51.100.0/24", + "203.0.113.0/24", + "224.0.0.0/4", + "240.0.0.0/4", + "255.255.255.255/32", +} +var unroutableCIDR6 = []string{ + "ff00::/8", // multicast + "2001:db8::/32", // documentation +} + +var globalUnicast []*net.IPNet +var globalUnicastCIDR6 = []string{ + "2000::/3", +} + +var nat64CIDRs = []string{ + "64:ff9b:1::/48", // RFC 8215 + "64:ff9b::/96", // RFC 6052 +} + +var nat64 []*net.IPNet + +// unResolvableDomains do not resolve to an IP address. +// Ref: https://en.wikipedia.org/wiki/Special-use_domain_name#Reserved_domain_names +var unResolvableDomains = []string{ + // Reverse DNS Lookup + ".in-addr.arpa", + ".ip6.arpa", + + // RFC 6761: Users MAY assume that queries for "invalid" names will always return NXDOMAIN + // responses + ".invalid", +} + +// privateUseDomains are reserved for private use and have no central authority for consistent +// address resolution +// Ref: https://en.wikipedia.org/wiki/Special-use_domain_name#Reserved_domain_names +var privateUseDomains = []string{ + // RFC 8375: Reserved for home networks + ".home.arpa", + + // MDNS + ".local", + + // RFC 6761: No central authority for .test names + ".test", +} + +// RFC 6761: Users may assume that IPv4 and IPv6 address queries for localhost names will +// always resolve to the respective IP loopback address +const localHostDomain = ".localhost" + +func init() { + Private4 = parseCIDR(privateCIDR4) + Private6 = parseCIDR(privateCIDR6) + Unroutable4 = parseCIDR(unroutableCIDR4) + Unroutable6 = parseCIDR(unroutableCIDR6) + globalUnicast = parseCIDR(globalUnicastCIDR6) + nat64 = parseCIDR(nat64CIDRs) +} + +func parseCIDR(cidrs []string) []*net.IPNet { + ipnets := make([]*net.IPNet, len(cidrs)) + for i, cidr := range cidrs { + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return []*net.IPNet{} + } + ipnets[i] = ipnet + } + return ipnets +} + +// IsPublicAddr returns true if the IP part of the multiaddr is a publicly routable address +// or if it's a dns address without a special use domain e.g. .local. +func IsPublicAddr(a ma.Multiaddr) (bool, error) { + isPublic := false + var err error + ma.ForEach(a, func(c ma.Component, e error) bool { + if e != nil { + err = e + return false + } + switch c.Protocol().Code { + case ma.P_IP6ZONE: + return true + case ma.P_IP4: + ip := net.IP(c.RawValue()) + isPublic = !inAddrRange(ip, Private4) && !inAddrRange(ip, Unroutable4) + case ma.P_IP6: + ip := net.IP(c.RawValue()) + // IP6 documentation prefix(part of Unroutable6) is a subset of the ip6 + // global unicast allocation so we ensure that it's not a documentation + // prefix by diffing with Unroutable6 + isPublicUnicastAddr := inAddrRange(ip, globalUnicast) && !inAddrRange(ip, Unroutable6) + if isPublicUnicastAddr { + isPublic = true + return false + } + // The WellKnown NAT64 prefix(RFC 6052) can only reference a public IPv4 + // address. + // The Local use NAT64 prefix(RFC 8215) can reference private IPv4 + // addresses. But since the translation from Local use NAT64 prefix to IPv4 + // address is left to the user we have no way of knowing which IPv4 address + // is referenced. We count these as Public addresses because a false + // negative for this method here is generally worse than a false positive. + isPublic = inAddrRange(ip, nat64) + return false + case ma.P_DNS, ma.P_DNS4, ma.P_DNS6, ma.P_DNSADDR: + dnsAddr := c.Value() + isPublic = true + if isSubdomain(dnsAddr, localHostDomain) { + isPublic = false + return false + } + for _, ud := range unResolvableDomains { + if isSubdomain(dnsAddr, ud) { + isPublic = false + return false + } + } + for _, pd := range privateUseDomains { + if isSubdomain(dnsAddr, pd) { + isPublic = false + break + } + } + } + return false + }) + return isPublic, err +} + +// isSubdomain checks if child is sub domain of parent. It also returns true if child and parent are +// the same domain. +// Parent must have a "." prefix. +func isSubdomain(child, parent string) bool { + return strings.HasSuffix(child, parent) || child == parent[1:] +} + +// IsPrivateAddr returns true if the IP part of the mutiaddr is in a private network +func IsPrivateAddr(a ma.Multiaddr) (bool, error) { + isPrivate := false + var err error + ma.ForEach(a, func(c ma.Component, e error) bool { + if e != nil { + err = e + return false + } + switch c.Protocol().Code { + case ma.P_IP6ZONE: + return true + case ma.P_IP4: + isPrivate = inAddrRange(net.IP(c.RawValue()), Private4) + case ma.P_IP6: + isPrivate = inAddrRange(net.IP(c.RawValue()), Private6) + case ma.P_DNS, ma.P_DNS4, ma.P_DNS6, ma.P_DNSADDR: + dnsAddr := c.Value() + if isSubdomain(dnsAddr, localHostDomain) { + isPrivate = true + } + // We don't check for privateUseDomains because private use domains can + // resolve to public IP addresses + } + return false + }) + return isPrivate, err +} + +func inAddrRange(ip net.IP, ipnets []*net.IPNet) bool { + for _, ipnet := range ipnets { + if ipnet.Contains(ip) { + return true + } + } + + return false +} diff --git a/go-multiaddr/net/private_test.go b/go-multiaddr/net/private_test.go new file mode 100644 index 0000000..2b2944f --- /dev/null +++ b/go-multiaddr/net/private_test.go @@ -0,0 +1,95 @@ +package manet + +import ( + "fmt" + "testing" + + ma "github.com/multiformats/go-multiaddr" +) + +func TestIsPublicAddr(t *testing.T) { + addr1, _ := ma.StringCast("/ip4/192.168.1.1/tcp/80") + addr2, _ := ma.StringCast("/ip4/1.1.1.1/tcp/80") + addr3, _ := ma.StringCast("/tcp/80/ip4/1.1.1.1") + addr4, _ := ma.StringCast("/dns/node.libp2p.io/udp/1/quic-v1") + addr5, _ := ma.StringCast("/dnsaddr/node.libp2p.io/udp/1/quic-v1") + addr6, _ := ma.StringCast("/dns/node.libp2p.local/udp/1/quic-v1") + addr7, _ := ma.StringCast("/dns/localhost/udp/1/quic-v1") + addr8, _ := ma.StringCast("/dns/a.localhost/tcp/1") + addr9, _ := ma.StringCast("/ip6/2400::1/tcp/10") + addr10, _ := ma.StringCast("/ip6/2001:db8::42/tcp/10") + addr11, _ := ma.StringCast("/ip6/64:ff9b::1.1.1.1/tcp/10") + tests := []struct { + addr ma.Multiaddr + isPublic bool + isPrivate bool + }{ + { + addr: addr1, + isPublic: false, + isPrivate: true, + }, + { + addr: addr2, + isPublic: true, + isPrivate: false, + }, + { + addr: addr3, + isPublic: false, + isPrivate: false, + }, + { + addr: addr4, + isPublic: true, + isPrivate: false, + }, + { + addr: addr5, + isPublic: true, + isPrivate: false, + }, + { + addr: addr6, + isPublic: false, + isPrivate: false, // You can configure .local domains in local networks to return public addrs + }, + { + addr: addr7, + isPublic: false, + isPrivate: true, + }, + { + addr: addr8, + isPublic: false, + isPrivate: true, + }, + { + addr: addr9, + isPublic: true, + isPrivate: false, + }, + { + addr: addr10, + isPublic: false, + isPrivate: false, + }, + { + addr: addr11, + isPublic: true, + isPrivate: false, + }, + } + for i, tt := range tests { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + isPublic, _ := IsPublicAddr(tt.addr) + isPrivate, _ := IsPrivateAddr(tt.addr) + if isPublic != tt.isPublic { + t.Errorf("IsPublicAddr check failed for %s: expected %t, got %t", tt.addr, tt.isPublic, isPublic) + } + if isPrivate != tt.isPrivate { + t.Errorf("IsPrivateAddr check failed for %s: expected %t, got %t", tt.addr, tt.isPrivate, isPrivate) + } + }) + } +} diff --git a/go-multiaddr/net/registry.go b/go-multiaddr/net/registry.go new file mode 100644 index 0000000..15c7e20 --- /dev/null +++ b/go-multiaddr/net/registry.go @@ -0,0 +1,101 @@ +package manet + +import ( + "fmt" + "net" + "sync" + + ma "github.com/multiformats/go-multiaddr" +) + +// FromNetAddrFunc is a generic function which converts a net.Addr to Multiaddress +type FromNetAddrFunc func(a net.Addr) (ma.Multiaddr, error) + +// ToNetAddrFunc is a generic function which converts a Multiaddress to net.Addr +type ToNetAddrFunc func(ma ma.Multiaddr) (net.Addr, error) + +var defaultCodecs = NewCodecMap() + +func init() { + RegisterFromNetAddr(parseTCPNetAddr, "tcp", "tcp4", "tcp6") + RegisterFromNetAddr(parseUDPNetAddr, "udp", "udp4", "udp6") + RegisterFromNetAddr(parseIPNetAddr, "ip", "ip4", "ip6") + RegisterFromNetAddr(parseIPPlusNetAddr, "ip+net") + RegisterFromNetAddr(parseUnixNetAddr, "unix") + + RegisterToNetAddr(parseBasicNetMaddr, "tcp", "udp", "ip6", "ip4", "unix") +} + +// CodecMap holds a map of NetCodecs indexed by their Protocol ID +// along with parsers for the addresses they use. +// It is used to keep a list of supported network address codecs (protocols +// which addresses can be converted to and from multiaddresses). +type CodecMap struct { + addrParsers map[string]FromNetAddrFunc + maddrParsers map[string]ToNetAddrFunc + lk sync.Mutex +} + +// NewCodecMap initializes and returns a CodecMap object. +func NewCodecMap() *CodecMap { + return &CodecMap{ + addrParsers: make(map[string]FromNetAddrFunc), + maddrParsers: make(map[string]ToNetAddrFunc), + } +} + +// RegisterFromNetAddr registers a conversion from net.Addr instances to multiaddrs. +func RegisterFromNetAddr(from FromNetAddrFunc, networks ...string) { + defaultCodecs.RegisterFromNetAddr(from, networks...) +} + +// RegisterToNetAddr registers a conversion from multiaddrs to net.Addr instances. +func RegisterToNetAddr(to ToNetAddrFunc, protocols ...string) { + defaultCodecs.RegisterToNetAddr(to, protocols...) +} + +// RegisterFromNetAddr registers a conversion from net.Addr instances to multiaddrs +func (cm *CodecMap) RegisterFromNetAddr(from FromNetAddrFunc, networks ...string) { + cm.lk.Lock() + + for _, n := range networks { + cm.addrParsers[n] = from + } + + cm.lk.Unlock() +} + +// RegisterToNetAddr registers a conversion from multiaddrs to net.Addr instances +func (cm *CodecMap) RegisterToNetAddr(to ToNetAddrFunc, protocols ...string) { + cm.lk.Lock() + + for _, p := range protocols { + cm.maddrParsers[p] = to + } + + cm.lk.Unlock() +} + +func (cm *CodecMap) getAddrParser(net string) (FromNetAddrFunc, error) { + cm.lk.Lock() + + parser, ok := cm.addrParsers[net] + if !ok { + return nil, fmt.Errorf("unknown network %v", net) + } + + cm.lk.Unlock() + return parser, nil +} + +func (cm *CodecMap) getMaddrParser(name string) (ToNetAddrFunc, error) { + cm.lk.Lock() + + p, ok := cm.maddrParsers[name] + if !ok { + return nil, fmt.Errorf("network not supported: %s", name) + } + + cm.lk.Unlock() + return p, nil +} diff --git a/go-multiaddr/net/registry_test.go b/go-multiaddr/net/registry_test.go new file mode 100644 index 0000000..9aaf4de --- /dev/null +++ b/go-multiaddr/net/registry_test.go @@ -0,0 +1,42 @@ +package manet + +import ( + "net" + "testing" + + ma "github.com/multiformats/go-multiaddr" +) + +func TestRegisterFrom(t *testing.T) { + cm := NewCodecMap() + cm.RegisterFromNetAddr( + func(a net.Addr) (ma.Multiaddr, error) { return nil, nil }, + "test", "iptest", "blahtest", + ) + if _, ok := cm.addrParsers["test"]; !ok { + t.Fatal("myproto not properly registered") + } + if _, ok := cm.addrParsers["iptest"]; !ok { + t.Fatal("myproto not properly registered") + } + if _, ok := cm.addrParsers["blahtest"]; !ok { + t.Fatal("myproto not properly registered") + } +} + +func TestRegisterTo(t *testing.T) { + cm := NewCodecMap() + cm.RegisterToNetAddr( + func(a ma.Multiaddr) (net.Addr, error) { return nil, nil }, + "test", "iptest", "blahtest", + ) + if _, ok := cm.maddrParsers["test"]; !ok { + t.Fatal("myproto not properly registered") + } + if _, ok := cm.maddrParsers["iptest"]; !ok { + t.Fatal("myproto not properly registered") + } + if _, ok := cm.maddrParsers["blahtest"]; !ok { + t.Fatal("myproto not properly registered") + } +} diff --git a/go-multiaddr/net/resolve.go b/go-multiaddr/net/resolve.go new file mode 100644 index 0000000..dfe94af --- /dev/null +++ b/go-multiaddr/net/resolve.go @@ -0,0 +1,94 @@ +package manet + +import ( + "fmt" + + ma "github.com/multiformats/go-multiaddr" +) + +// ResolveUnspecifiedAddress expands an unspecified ip addresses (/ip4/0.0.0.0, /ip6/::) to +// use the known local interfaces. If ifaceAddr is nil, we request interface addresses +// from the network stack. (this is so you can provide a cached value if resolving many addrs) +func ResolveUnspecifiedAddress(resolve ma.Multiaddr, ifaceAddrs []ma.Multiaddr) ([]ma.Multiaddr, error) { + // split address into its components + first, rest, err := ma.SplitFirst(resolve) + if err != nil { + return nil, err + } + + // if first component (ip) is not unspecified, use it as is. + if !IsIPUnspecified(first) { + return []ma.Multiaddr{resolve}, nil + } + + resolveProto := resolve.Protocols()[0].Code + out := make([]ma.Multiaddr, 0, len(ifaceAddrs)) + for _, ia := range ifaceAddrs { + iafirst, _, err := ma.SplitFirst(ia) + if err != nil { + return nil, err + } + + // must match the first protocol to be resolve. + if iafirst.Protocol().Code != resolveProto { + continue + } + + joined := ia + if rest != nil { + joined = ma.Join(ia, rest) + } + out = append(out, joined) + } + if len(out) < 1 { + return nil, fmt.Errorf("failed to resolve: %s", resolve) + } + return out, nil +} + +// ResolveUnspecifiedAddresses expands unspecified ip addresses (/ip4/0.0.0.0, /ip6/::) to +// use the known local interfaces. +func ResolveUnspecifiedAddresses(unspecAddrs, ifaceAddrs []ma.Multiaddr) ([]ma.Multiaddr, error) { + // todo optimize: only fetch these if we have a "any" addr. + if len(ifaceAddrs) < 1 { + var err error + ifaceAddrs, err = interfaceAddresses() + if err != nil { + return nil, err + } + } + + var outputAddrs []ma.Multiaddr + for _, a := range unspecAddrs { + // unspecified? + resolved, err := ResolveUnspecifiedAddress(a, ifaceAddrs) + if err != nil { + continue // optimistic. if we can't resolve anything, we'll know at the bottom. + } + outputAddrs = append(outputAddrs, resolved...) + } + + if len(outputAddrs) < 1 { + return nil, fmt.Errorf("failed to specify addrs: %s", unspecAddrs) + } + return outputAddrs, nil +} + +// interfaceAddresses returns a list of addresses associated with local machine +// Note: we do not return link local addresses. IP loopback is ok, because we +// may be connecting to other nodes in the same machine. +func interfaceAddresses() ([]ma.Multiaddr, error) { + maddrs, err := InterfaceMultiaddrs() + if err != nil { + return nil, err + } + + var out []ma.Multiaddr + for _, a := range maddrs { + if IsIP6LinkLocal(a) { + continue + } + out = append(out, a) + } + return out, nil +} diff --git a/go-multiaddr/net/resolve_test.go b/go-multiaddr/net/resolve_test.go new file mode 100644 index 0000000..e4af820 --- /dev/null +++ b/go-multiaddr/net/resolve_test.go @@ -0,0 +1,60 @@ +package manet + +import ( + "testing" + + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" +) + +func TestResolvingAddrs(t *testing.T) { + unspec := []ma.Multiaddr{ + newMultiaddr(t, "/ip4/0.0.0.0/tcp/1234"), + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234"), + newMultiaddr(t, "/ip6/::/tcp/1234"), + newMultiaddr(t, "/ip6/::100/tcp/1234"), + newMultiaddr(t, "/ip4/0.0.0.0"), + } + + iface := []ma.Multiaddr{ + newMultiaddr(t, "/ip4/127.0.0.1"), + newMultiaddr(t, "/ip4/10.20.30.40"), + newMultiaddr(t, "/ip6/::1"), + newMultiaddr(t, "/ip6/::f"), + } + + spec := []ma.Multiaddr{ + newMultiaddr(t, "/ip4/127.0.0.1/tcp/1234"), + newMultiaddr(t, "/ip4/10.20.30.40/tcp/1234"), + newMultiaddr(t, "/ip4/1.2.3.4/tcp/1234"), + newMultiaddr(t, "/ip6/::1/tcp/1234"), + newMultiaddr(t, "/ip6/::f/tcp/1234"), + newMultiaddr(t, "/ip6/::100/tcp/1234"), + newMultiaddr(t, "/ip4/127.0.0.1"), + newMultiaddr(t, "/ip4/10.20.30.40"), + } + + actual, err := ResolveUnspecifiedAddresses(unspec, iface) + require.NoError(t, err) + require.Equal(t, actual, spec) + + ip4u := []ma.Multiaddr{newMultiaddr(t, "/ip4/0.0.0.0")} + ip4i := []ma.Multiaddr{newMultiaddr(t, "/ip4/1.2.3.4")} + + ip6u := []ma.Multiaddr{newMultiaddr(t, "/ip6/::")} + ip6i := []ma.Multiaddr{newMultiaddr(t, "/ip6/::1")} + + if _, err := ResolveUnspecifiedAddress(ip4u[0], ip6i); err == nil { + t.Fatal("should have failed") + } + if _, err := ResolveUnspecifiedAddress(ip6u[0], ip4i); err == nil { + t.Fatal("should have failed") + } + + if _, err := ResolveUnspecifiedAddresses(ip6u, ip4i); err == nil { + t.Fatal("should have failed") + } + if _, err := ResolveUnspecifiedAddresses(ip4u, ip6i); err == nil { + t.Fatal("should have failed") + } +} diff --git a/go-multiaddr/package.json b/go-multiaddr/package.json new file mode 100644 index 0000000..c493b27 --- /dev/null +++ b/go-multiaddr/package.json @@ -0,0 +1,23 @@ +{ + "author": "multiformats", + "bugs": { + "url": "https://github.com/multiformats/go-multiaddr/issues" + }, + "gx": { + "dvcsimport": "github.com/multiformats/go-multiaddr" + }, + "gxDependencies": [ + { + "hash": "QmerPMzPk1mJVowm8KgmoknWa4yCYvvugMPsgWmDNUvDLW", + "name": "go-multihash", + "version": "1.0.9" + } + ], + "gxVersion": "0.9.0", + "language": "go", + "license": "MIT", + "name": "go-multiaddr", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "1.4.1" +} + diff --git a/go-multiaddr/protocol.go b/go-multiaddr/protocol.go new file mode 100644 index 0000000..61a2924 --- /dev/null +++ b/go-multiaddr/protocol.go @@ -0,0 +1,102 @@ +package multiaddr + +import ( + "fmt" + "strings" +) + +// These are special sizes +const ( + LengthPrefixedVarSize = -1 +) + +// Protocol is a Multiaddr protocol description structure. +type Protocol struct { + // Name is the string representation of the protocol code. E.g., ip4, + // ip6, tcp, udp, etc. + Name string + + // Code is the protocol's multicodec (a normal, non-varint number). + Code int + + // VCode is a precomputed varint encoded version of Code. + VCode []byte + + // Size is the size of the argument to this protocol. + // + // * Size == 0 means this protocol takes no argument. + // * Size > 0 means this protocol takes a constant sized argument. + // * Size < 0 means this protocol takes a variable length, varint + // prefixed argument. + Size int // a size of -1 indicates a length-prefixed variable size + + // Path indicates a path protocol (e.g., unix). When parsing multiaddr + // strings, path protocols consume the remainder of the address instead + // of stopping at the next forward slash. + // + // Size must be LengthPrefixedVarSize. + Path bool + + // Transcoder converts between the byte representation and the string + // representation of this protocol's argument (if any). + // + // This should only be non-nil if Size != 0 + Transcoder Transcoder +} + +var protocolsByName = map[string]Protocol{} +var protocolsByCode = map[int]Protocol{} + +// Protocols is the list of multiaddr protocols supported by this module. +var Protocols = []Protocol{} + +func AddProtocol(p Protocol) error { + if _, ok := protocolsByName[p.Name]; ok { + return fmt.Errorf("protocol by the name %q already exists", p.Name) + } + + if _, ok := protocolsByCode[p.Code]; ok { + return fmt.Errorf("protocol code %d already taken by %q", p.Code, p.Code) + } + + if p.Size != 0 && p.Transcoder == nil { + return fmt.Errorf("protocols with arguments must define transcoders") + } + if p.Path && p.Size >= 0 { + return fmt.Errorf("path protocols must have variable-length sizes") + } + + Protocols = append(Protocols, p) + protocolsByName[p.Name] = p + protocolsByCode[p.Code] = p + return nil +} + +// ProtocolWithName returns the Protocol description with given string name. +func ProtocolWithName(s string) Protocol { + return protocolsByName[s] +} + +// ProtocolWithCode returns the Protocol description with given protocol code. +func ProtocolWithCode(c int) Protocol { + return protocolsByCode[c] +} + +// ProtocolsWithString returns a slice of protocols matching given string. +func ProtocolsWithString(s string) ([]Protocol, error) { + s = strings.Trim(s, "/") + sp := strings.Split(s, "/") + if len(sp) == 0 { + return nil, nil + } + + t := make([]Protocol, len(sp)) + for i, name := range sp { + p := ProtocolWithName(name) + if p.Code == 0 { + return nil, fmt.Errorf("no protocol with name: %s", name) + } + t[i] = p + } + return t, nil +} diff --git a/go-multiaddr/protocols.go b/go-multiaddr/protocols.go new file mode 100644 index 0000000..69c724e --- /dev/null +++ b/go-multiaddr/protocols.go @@ -0,0 +1,371 @@ +package multiaddr + +// You **MUST** register your multicodecs with +// https://github.com/multiformats/multicodec before adding them here. +const ( + P_IP4 = 4 + P_TCP = 6 + P_DNS = 53 // 4 or 6 + P_DNS4 = 54 + P_DNS6 = 55 + P_DNSADDR = 56 + P_UDP = 273 + P_DCCP = 33 + P_IP6 = 41 + P_IP6ZONE = 42 + P_IPCIDR = 43 + P_QUIC = 460 + P_QUIC_V1 = 461 + P_WEBTRANSPORT = 465 + P_CERTHASH = 466 + P_SCTP = 132 + P_CIRCUIT = 290 + P_UDT = 301 + P_UTP = 302 + P_UNIX = 400 + P_P2P = 421 + P_IPFS = P_P2P // alias for backwards compatibility + P_HTTP = 480 + P_HTTP_PATH = 481 + P_HTTPS = 443 // deprecated alias for /tls/http + P_ONION = 444 // also for backwards compatibility + P_ONION3 = 445 + P_GARLIC64 = 446 + P_GARLIC32 = 447 + P_P2P_WEBRTC_DIRECT = 276 // Deprecated. use webrtc-direct instead + P_TLS = 448 + P_SNI = 449 + P_NOISE = 454 + P_WS = 477 + P_WSS = 478 // deprecated alias for /tls/ws + P_PLAINTEXTV2 = 7367777 + P_WEBRTC_DIRECT = 280 + P_WEBRTC = 281 +) + +var ( + codeIP4, _ = CodeToVarint(P_IP4) + codeTCP, _ = CodeToVarint(P_TCP) + codeDNS, _ = CodeToVarint(P_DNS) + codeDNS4, _ = CodeToVarint(P_DNS4) + codeDNS6, _ = CodeToVarint(P_DNS6) + codeDNSADDR, _ = CodeToVarint(P_DNSADDR) + codeUDP, _ = CodeToVarint(P_UDP) + codeDCCP, _ = CodeToVarint(P_DCCP) + codeIP6, _ = CodeToVarint(P_IP6) + codeIPCIDR, _ = CodeToVarint(P_IPCIDR) + codeIP6ZONE, _ = CodeToVarint(P_IP6ZONE) + codeSCTP, _ = CodeToVarint(P_SCTP) + codeCIRCUIT, _ = CodeToVarint(P_CIRCUIT) + codeONION2, _ = CodeToVarint(P_ONION) + codeONION3, _ = CodeToVarint(P_ONION3) + codeGARLIC64, _ = CodeToVarint(P_GARLIC64) + codeGARLIC32, _ = CodeToVarint(P_GARLIC32) + codeUTP, _ = CodeToVarint(P_UTP) + codeUDT, _ = CodeToVarint(P_UDT) + codeQUIC, _ = CodeToVarint(P_QUIC) + codeQUICV1, _ = CodeToVarint(P_QUIC_V1) + codeWEBTRANSPORT, _ = CodeToVarint(P_WEBTRANSPORT) + codeCERTHASH, _ = CodeToVarint(P_CERTHASH) + codeHTTP, _ = CodeToVarint(P_HTTP) + codeHTTPPath, _ = CodeToVarint(P_HTTP_PATH) + codeHTTPS, _ = CodeToVarint(P_HTTPS) + codeP2P, _ = CodeToVarint(P_P2P) + codeUNIX, _ = CodeToVarint(P_UNIX) + codeP2P_WEBRTC_DIRECT, _ = CodeToVarint(P_P2P_WEBRTC_DIRECT) + codeTLS, _ = CodeToVarint(P_TLS) + codeSNI, _ = CodeToVarint(P_SNI) + codeNOISE, _ = CodeToVarint(P_NOISE) + codePlaintextV2, _ = CodeToVarint(P_PLAINTEXTV2) + codeWS, _ = CodeToVarint(P_WS) + codeWSS, _ = CodeToVarint(P_WSS) + codeWebRTCDirect, _ = CodeToVarint(P_WEBRTC_DIRECT) + codeWebRTC, _ = CodeToVarint(P_WEBRTC) + protoIP4 = Protocol{ + Name: "ip4", + Code: P_IP4, + VCode: codeIP4, + Size: 32, + Path: false, + Transcoder: TranscoderIP4, + } + protoTCP = Protocol{ + Name: "tcp", + Code: P_TCP, + VCode: codeTCP, + Size: 16, + Path: false, + Transcoder: TranscoderPort, + } + protoDNS = Protocol{ + Code: P_DNS, + Size: LengthPrefixedVarSize, + Name: "dns", + VCode: codeDNS, + Transcoder: TranscoderDns, + } + protoDNS4 = Protocol{ + Code: P_DNS4, + Size: LengthPrefixedVarSize, + Name: "dns4", + VCode: codeDNS4, + Transcoder: TranscoderDns, + } + protoDNS6 = Protocol{ + Code: P_DNS6, + Size: LengthPrefixedVarSize, + Name: "dns6", + VCode: codeDNS6, + Transcoder: TranscoderDns, + } + protoDNSADDR = Protocol{ + Code: P_DNSADDR, + Size: LengthPrefixedVarSize, + Name: "dnsaddr", + VCode: codeDNSADDR, + Transcoder: TranscoderDns, + } + protoUDP = Protocol{ + Name: "udp", + Code: P_UDP, + VCode: codeUDP, + Size: 16, + Path: false, + Transcoder: TranscoderPort, + } + protoDCCP = Protocol{ + Name: "dccp", + Code: P_DCCP, + VCode: codeDCCP, + Size: 16, + Path: false, + Transcoder: TranscoderPort, + } + protoIP6 = Protocol{ + Name: "ip6", + Code: P_IP6, + VCode: codeIP6, + Size: 128, + Transcoder: TranscoderIP6, + } + protoIPCIDR = Protocol{ + Name: "ipcidr", + Code: P_IPCIDR, + VCode: codeIPCIDR, + Size: 8, + Transcoder: TranscoderIPCIDR, + } + // these require varint + protoIP6ZONE = Protocol{ + Name: "ip6zone", + Code: P_IP6ZONE, + VCode: codeIP6ZONE, + Size: LengthPrefixedVarSize, + Path: false, + Transcoder: TranscoderIP6Zone, + } + protoSCTP = Protocol{ + Name: "sctp", + Code: P_SCTP, + VCode: codeSCTP, + Size: 16, + Transcoder: TranscoderPort, + } + + protoCIRCUIT = Protocol{ + Code: P_CIRCUIT, + Size: 0, + Name: "p2p-circuit", + VCode: codeCIRCUIT, + } + + protoONION2 = Protocol{ + Name: "onion", + Code: P_ONION, + VCode: codeONION2, + Size: 96, + Transcoder: TranscoderOnion, + } + protoONION3 = Protocol{ + Name: "onion3", + Code: P_ONION3, + VCode: codeONION3, + Size: 296, + Transcoder: TranscoderOnion3, + } + protoGARLIC64 = Protocol{ + Name: "garlic64", + Code: P_GARLIC64, + VCode: codeGARLIC64, + Size: LengthPrefixedVarSize, + Transcoder: TranscoderGarlic64, + } + protoGARLIC32 = Protocol{ + Name: "garlic32", + Code: P_GARLIC32, + VCode: codeGARLIC32, + Size: LengthPrefixedVarSize, + Transcoder: TranscoderGarlic32, + } + protoUTP = Protocol{ + Name: "utp", + Code: P_UTP, + VCode: codeUTP, + } + protoUDT = Protocol{ + Name: "udt", + Code: P_UDT, + VCode: codeUDT, + } + protoQUIC = Protocol{ + Name: "quic", + Code: P_QUIC, + VCode: codeQUIC, + } + protoQUICV1 = Protocol{ + Name: "quic-v1", + Code: P_QUIC_V1, + VCode: codeQUICV1, + } + protoWEBTRANSPORT = Protocol{ + Name: "webtransport", + Code: P_WEBTRANSPORT, + VCode: codeWEBTRANSPORT, + } + protoCERTHASH = Protocol{ + Name: "certhash", + Code: P_CERTHASH, + VCode: codeCERTHASH, + Size: LengthPrefixedVarSize, + Transcoder: TranscoderCertHash, + } + protoHTTP = Protocol{ + Name: "http", + Code: P_HTTP, + VCode: codeHTTP, + } + protoHTTPPath = Protocol{ + Name: "http-path", + Code: P_HTTP_PATH, + VCode: codeHTTPPath, + Size: LengthPrefixedVarSize, + Transcoder: TranscoderHTTPPath, + } + protoHTTPS = Protocol{ + Name: "https", + Code: P_HTTPS, + VCode: codeHTTPS, + } + protoP2P = Protocol{ + Name: "p2p", + Code: P_P2P, + VCode: codeP2P, + Size: LengthPrefixedVarSize, + Transcoder: TranscoderP2P, + } + protoUNIX = Protocol{ + Name: "unix", + Code: P_UNIX, + VCode: codeUNIX, + Size: LengthPrefixedVarSize, + Path: true, + Transcoder: TranscoderUnix, + } + protoP2P_WEBRTC_DIRECT = Protocol{ + Name: "p2p-webrtc-direct", + Code: P_P2P_WEBRTC_DIRECT, + VCode: codeP2P_WEBRTC_DIRECT, + } + protoTLS = Protocol{ + Name: "tls", + Code: P_TLS, + VCode: codeTLS, + } + protoSNI = Protocol{ + Name: "sni", + Size: LengthPrefixedVarSize, + Code: P_SNI, + VCode: codeSNI, + Transcoder: TranscoderDns, + } + protoNOISE = Protocol{ + Name: "noise", + Code: P_NOISE, + VCode: codeNOISE, + } + protoPlaintextV2 = Protocol{ + Name: "plaintextv2", + Code: P_PLAINTEXTV2, + VCode: codePlaintextV2, + } + protoWS = Protocol{ + Name: "ws", + Code: P_WS, + VCode: codeWS, + } + protoWSS = Protocol{ + Name: "wss", + Code: P_WSS, + VCode: codeWSS, + } + protoWebRTCDirect = Protocol{ + Name: "webrtc-direct", + Code: P_WEBRTC_DIRECT, + VCode: codeWebRTCDirect, + } + protoWebRTC = Protocol{ + Name: "webrtc", + Code: P_WEBRTC, + VCode: codeWebRTC, + } +) + +func init() { + for _, p := range []Protocol{ + protoIP4, + protoTCP, + protoDNS, + protoDNS4, + protoDNS6, + protoDNSADDR, + protoUDP, + protoDCCP, + protoIP6, + protoIP6ZONE, + protoIPCIDR, + protoSCTP, + protoCIRCUIT, + protoONION2, + protoONION3, + protoGARLIC64, + protoGARLIC32, + protoUTP, + protoUDT, + protoQUIC, + protoQUICV1, + protoWEBTRANSPORT, + protoCERTHASH, + protoHTTP, + protoHTTPPath, + protoHTTPS, + protoP2P, + protoUNIX, + protoP2P_WEBRTC_DIRECT, + protoTLS, + protoSNI, + protoNOISE, + protoWS, + protoWSS, + protoPlaintextV2, + protoWebRTCDirect, + protoWebRTC, + } { + if err := AddProtocol(p); err != nil { + panic(err) + } + } + + // explicitly set both of these + protocolsByName["p2p"] = protoP2P + protocolsByName["ipfs"] = protoP2P +} diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/0487b63847656fd4 b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/0487b63847656fd4 new file mode 100644 index 0000000..013e7dc --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/0487b63847656fd4 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("\xa5\x03&\x12$000000000000000000000000000000000000") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/04a87ae2740f7195 b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/04a87ae2740f7195 new file mode 100644 index 0000000..0292295 --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/04a87ae2740f7195 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("\x90\x03\x06000000") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/239d3594e0ee93bb b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/239d3594e0ee93bb new file mode 100644 index 0000000..7fc603c --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/239d3594e0ee93bb @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("\xbd\x0300000000000000000000000000000000000\x00\x00") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/2ef0d600700564d4 b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/2ef0d600700564d4 new file mode 100644 index 0000000..77c10c8 --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/2ef0d600700564d4 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("\x90\x03\x06000000\x90\x03\x06000000") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/385d14fbb016b8c3 b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/385d14fbb016b8c3 new file mode 100644 index 0000000..17866a9 --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/385d14fbb016b8c3 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("7*000000000000000000000000000000000000000000") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/511b72740453a863 b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/511b72740453a863 new file mode 100644 index 0000000..05d670c --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/511b72740453a863 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("\xd2\x03\x00") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/69ba454c4217999e b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/69ba454c4217999e new file mode 100644 index 0000000..62c184c --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/69ba454c4217999e @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("7\x00") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/9f0d778549d2b28e b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/9f0d778549d2b28e new file mode 100644 index 0000000..7326a78 --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/9f0d778549d2b28e @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("\xbc\x030000000000\x00\x00") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/af9576bc28339a8d b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/af9576bc28339a8d new file mode 100644 index 0000000..f490395 --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/af9576bc28339a8d @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("\x84\xe0\xff\x00") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/e9317f1a3c43de50 b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/e9317f1a3c43de50 new file mode 100644 index 0000000..6633245 --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/e9317f1a3c43de50 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("\x90\x03\x06/0000/") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/f1ebd17c93085805 b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/f1ebd17c93085805 new file mode 100644 index 0000000..9a3e0dd --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrBytes/f1ebd17c93085805 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("\xa5\x03\x030\x010") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/382a5bb1eff47833 b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/382a5bb1eff47833 new file mode 100644 index 0000000..976f505 --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/382a5bb1eff47833 @@ -0,0 +1,2 @@ +go test fuzz v1 +string("/garlic32/2222222222222222222222222222222222222222222222222222222") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/53eb3b6be337b1d7 b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/53eb3b6be337b1d7 new file mode 100644 index 0000000..f627d5d --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/53eb3b6be337b1d7 @@ -0,0 +1,2 @@ +go test fuzz v1 +string("/onion/\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\xa0\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7fd\x00-\a\t\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\x04\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\x00\x01\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\n\xec\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\n\xff\v\v\v\v\v\v\v\v\v\v\v\v\v\x80\x00\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\x87نŁ\xe2\xec\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\r\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\xf2\n\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\x1d\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v2222=:") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/63891c9534054d61 b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/63891c9534054d61 new file mode 100644 index 0000000..df7624a --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/63891c9534054d61 @@ -0,0 +1,2 @@ +go test fuzz v1 +string("/p2p/BAfZA2jA222222222222222222222222222222222222222222222222222222222") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/95a479f85dc92117 b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/95a479f85dc92117 new file mode 100644 index 0000000..3b6089e --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/95a479f85dc92117 @@ -0,0 +1,2 @@ +go test fuzz v1 +string("/p2p/BAfZBAjA222222222222222222222222222222222222222222222222222222222") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/9dba3b166a74fc47 b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/9dba3b166a74fc47 new file mode 100644 index 0000000..805c039 --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/9dba3b166a74fc47 @@ -0,0 +1,2 @@ +go test fuzz v1 +string("/garlic64/000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\r\r\r\r\r\r\r\r0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/a2b937de623ded67 b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/a2b937de623ded67 new file mode 100644 index 0000000..6c48422 --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/a2b937de623ded67 @@ -0,0 +1,2 @@ +go test fuzz v1 +string("/garlic32/222222222222222222222222222222222222222222222\r\r\r\r\r\r\r") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/bc05ef53a41e422a b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/bc05ef53a41e422a new file mode 100644 index 0000000..a542e41 --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/bc05ef53a41e422a @@ -0,0 +1,2 @@ +go test fuzz v1 +string("/p2p/Qm11ps1111111111111111111111111111111") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/d857c283ff1b2f2a b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/d857c283ff1b2f2a new file mode 100644 index 0000000..ec2b8bb --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/d857c283ff1b2f2a @@ -0,0 +1,2 @@ +go test fuzz v1 +string("/sni//sni/0") diff --git a/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/dffb2baac63c66ae b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/dffb2baac63c66ae new file mode 100644 index 0000000..829d05f --- /dev/null +++ b/go-multiaddr/testdata/fuzz/FuzzNewMultiaddrString/dffb2baac63c66ae @@ -0,0 +1,2 @@ +go test fuzz v1 +string("/onion/222222222222222=:1") diff --git a/go-multiaddr/transcoders.go b/go-multiaddr/transcoders.go new file mode 100644 index 0000000..9a36fc4 --- /dev/null +++ b/go-multiaddr/transcoders.go @@ -0,0 +1,491 @@ +package multiaddr + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "net" + "net/url" + "strconv" + "strings" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multibase" + mh "github.com/multiformats/go-multihash" +) + +type Transcoder interface { + // Validates and encodes to bytes a multiaddr that's in the string representation. + StringToBytes(string) ([]byte, error) + // Validates and decodes to a string a multiaddr that's in the bytes representation. + BytesToString([]byte) (string, error) + // Validates bytes when parsing a multiaddr that's already in the bytes representation. + ValidateBytes([]byte) error +} + +func NewTranscoderFromFunctions( + s2b func(string) ([]byte, error), + b2s func([]byte) (string, error), + val func([]byte) error, +) Transcoder { + return twrp{s2b, b2s, val} +} + +type twrp struct { + strtobyte func(string) ([]byte, error) + bytetostr func([]byte) (string, error) + validbyte func([]byte) error +} + +func (t twrp) StringToBytes(s string) ([]byte, error) { + return t.strtobyte(s) +} +func (t twrp) BytesToString(b []byte) (string, error) { + return t.bytetostr(b) +} + +func (t twrp) ValidateBytes(b []byte) error { + if t.validbyte == nil { + return nil + } + return t.validbyte(b) +} + +var TranscoderIP4 = NewTranscoderFromFunctions(ip4StB, ip4BtS, nil) +var TranscoderIP6 = NewTranscoderFromFunctions(ip6StB, ip6BtS, nil) +var TranscoderIP6Zone = NewTranscoderFromFunctions(ip6zoneStB, ip6zoneBtS, ip6zoneVal) +var TranscoderIPCIDR = NewTranscoderFromFunctions(ipcidrStB, ipcidrBtS, ipcidrValidate) + +func ipcidrBtS(b []byte) (string, error) { + if err := ipcidrValidate(b); err != nil { + return "", err + } + return strconv.Itoa(int(b[0])), nil +} + +func ipcidrStB(s string) ([]byte, error) { + ipMask, err := strconv.ParseUint(s, 10, 8) + if err != nil { + return nil, err + } + return []byte{byte(uint8(ipMask))}, nil +} + +func ipcidrValidate(b []byte) error { + if len(b) != 1 { + return fmt.Errorf("invalid length (should be == 1)") + } + return nil +} + +func ip4StB(s string) ([]byte, error) { + i := net.ParseIP(s).To4() + if i == nil { + return nil, fmt.Errorf("failed to parse ip4 addr: %s", s) + } + return i, nil +} + +func ip6zoneStB(s string) ([]byte, error) { + if len(s) == 0 { + return nil, fmt.Errorf("empty ip6zone") + } + if strings.Contains(s, "/") { + return nil, fmt.Errorf("IPv6 zone ID contains '/': %s", s) + } + return []byte(s), nil +} + +func ip6zoneBtS(b []byte) (string, error) { + if len(b) == 0 { + return "", fmt.Errorf("invalid length (should be > 0)") + } + return string(b), nil +} + +func ip6zoneVal(b []byte) error { + if len(b) == 0 { + return fmt.Errorf("invalid length (should be > 0)") + } + // Not supported as this would break multiaddrs. + if bytes.IndexByte(b, '/') >= 0 { + return fmt.Errorf("IPv6 zone ID contains '/': %s", string(b)) + } + return nil +} + +func ip6StB(s string) ([]byte, error) { + i := net.ParseIP(s).To16() + if i == nil { + return nil, fmt.Errorf("failed to parse ip6 addr: %s", s) + } + return i, nil +} + +func ip6BtS(b []byte) (string, error) { + ip := net.IP(b) + if ip4 := ip.To4(); ip4 != nil { + // Go fails to prepend the `::ffff:` part. + return "::ffff:" + ip4.String(), nil + } + return ip.String(), nil +} + +func ip4BtS(b []byte) (string, error) { + return net.IP(b).String(), nil +} + +var TranscoderPort = NewTranscoderFromFunctions(portStB, portBtS, nil) + +func portStB(s string) ([]byte, error) { + i, err := strconv.ParseUint(s, 10, 16) + if err != nil { + return nil, fmt.Errorf("failed to parse port addr: %s", err) + } + b := make([]byte, 2) + binary.BigEndian.PutUint16(b, uint16(i)) + return b, nil +} + +func portBtS(b []byte) (string, error) { + i := binary.BigEndian.Uint16(b) + return strconv.FormatUint(uint64(i), 10), nil +} + +var TranscoderOnion = NewTranscoderFromFunctions(onionStB, onionBtS, onionValidate) + +func onionStB(s string) ([]byte, error) { + addr := strings.Split(s, ":") + if len(addr) != 2 { + return nil, fmt.Errorf("failed to parse onion addr: %s does not contain a port number", s) + } + + onionHostBytes, err := base32.StdEncoding.DecodeString(strings.ToUpper(addr[0])) + if err != nil { + return nil, fmt.Errorf("failed to decode base32 onion addr: %s %s", s, err) + } + + // onion address without the ".onion" substring are 10 bytes long + if len(onionHostBytes) != 10 { + return nil, fmt.Errorf("failed to parse onion addr: %s not a Tor onion address", s) + } + + // onion port number + i, err := strconv.ParseUint(addr[1], 10, 16) + if err != nil { + return nil, fmt.Errorf("failed to parse onion addr: %s", err) + } + if i == 0 { + return nil, fmt.Errorf("failed to parse onion addr: %s", "non-zero port") + } + + onionPortBytes := make([]byte, 2) + binary.BigEndian.PutUint16(onionPortBytes, uint16(i)) + bytes := []byte{} + bytes = append(bytes, onionHostBytes...) + bytes = append(bytes, onionPortBytes...) + return bytes, nil +} + +func onionBtS(b []byte) (string, error) { + addr := strings.ToLower(base32.StdEncoding.EncodeToString(b[0:10])) + port := binary.BigEndian.Uint16(b[10:12]) + if port == 0 { + return "", fmt.Errorf("failed to parse onion addr: %s", "non-zero port") + } + return addr + ":" + strconv.FormatUint(uint64(port), 10), nil +} + +func onionValidate(b []byte) error { + if len(b) != 12 { + return fmt.Errorf("invalid len for onion addr: got %d expected 12", len(b)) + } + port := binary.BigEndian.Uint16(b[10:12]) + if port == 0 { + return fmt.Errorf("invalid port 0 for onion addr") + } + return nil +} + +var TranscoderOnion3 = NewTranscoderFromFunctions(onion3StB, onion3BtS, onion3Validate) + +func onion3StB(s string) ([]byte, error) { + addr := strings.Split(s, ":") + if len(addr) != 2 { + return nil, fmt.Errorf("failed to parse onion addr: %s does not contain a port number", s) + } + + // onion address without the ".onion" substring + if len(addr[0]) != 56 { + return nil, fmt.Errorf("failed to parse onion addr: %s not a Tor onionv3 address. len == %d", s, len(addr[0])) + } + onionHostBytes, err := base32.StdEncoding.DecodeString(strings.ToUpper(addr[0])) + if err != nil { + return nil, fmt.Errorf("failed to decode base32 onion addr: %s %s", s, err) + } + + // onion port number + i, err := strconv.ParseUint(addr[1], 10, 16) + if err != nil { + return nil, fmt.Errorf("failed to parse onion addr: %s", err) + } + if i == 0 { + return nil, fmt.Errorf("failed to parse onion addr: %s", "non-zero port") + } + + onionPortBytes := make([]byte, 2) + binary.BigEndian.PutUint16(onionPortBytes, uint16(i)) + bytes := []byte{} + bytes = append(bytes, onionHostBytes[0:35]...) + bytes = append(bytes, onionPortBytes...) + return bytes, nil +} + +func onion3BtS(b []byte) (string, error) { + addr := strings.ToLower(base32.StdEncoding.EncodeToString(b[0:35])) + port := binary.BigEndian.Uint16(b[35:37]) + if port < 1 { + return "", fmt.Errorf("failed to parse onion addr: %s", "port less than 1") + } + str := addr + ":" + strconv.FormatUint(uint64(port), 10) + return str, nil +} + +func onion3Validate(b []byte) error { + if len(b) != 37 { + return fmt.Errorf("invalid len for onion addr: got %d expected 37", len(b)) + } + port := binary.BigEndian.Uint16(b[35:37]) + if port == 0 { + return fmt.Errorf("invalid port 0 for onion addr") + } + return nil +} + +var TranscoderGarlic64 = NewTranscoderFromFunctions(garlic64StB, garlic64BtS, garlic64Validate) + +// i2p uses an alternate character set for base64 addresses. This returns an appropriate encoder. +var garlicBase64Encoding = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-~") + +func garlic64StB(s string) ([]byte, error) { + garlicHostBytes, err := garlicBase64Encoding.DecodeString(s) + if err != nil { + return nil, fmt.Errorf("failed to decode base64 i2p addr: %s %s", s, err) + } + + if err := garlic64Validate(garlicHostBytes); err != nil { + return nil, err + } + return garlicHostBytes, nil +} + +func garlic64BtS(b []byte) (string, error) { + if err := garlic64Validate(b); err != nil { + return "", err + } + addr := garlicBase64Encoding.EncodeToString(b) + return addr, nil +} + +func garlic64Validate(b []byte) error { + // A garlic64 address will always be greater than 386 bytes long when encoded. + if len(b) < 386 { + return fmt.Errorf("failed to validate garlic addr: %s not an i2p base64 address. len: %d", b, len(b)) + } + return nil +} + +var TranscoderGarlic32 = NewTranscoderFromFunctions(garlic32StB, garlic32BtS, garlic32Validate) + +var garlicBase32Encoding = base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567") + +func garlic32StB(s string) ([]byte, error) { + for len(s)%8 != 0 { + s += "=" + } + garlicHostBytes, err := garlicBase32Encoding.DecodeString(s) + if err != nil { + return nil, fmt.Errorf("failed to decode base32 garlic addr: %s, err: %v len: %v", s, err, len(s)) + } + + if err := garlic32Validate(garlicHostBytes); err != nil { + return nil, err + } + return garlicHostBytes, nil +} + +func garlic32BtS(b []byte) (string, error) { + if err := garlic32Validate(b); err != nil { + return "", err + } + return strings.TrimRight(garlicBase32Encoding.EncodeToString(b), "="), nil +} + +func garlic32Validate(b []byte) error { + // an i2p address with encrypted leaseset has len >= 35 bytes + // all other addresses will always be exactly 32 bytes + // https://geti2p.net/spec/b32encrypted + if len(b) < 35 && len(b) != 32 { + return fmt.Errorf("failed to validate garlic addr: %s not an i2p base32 address. len: %d", b, len(b)) + } + return nil +} + +var TranscoderP2P = NewTranscoderFromFunctions(p2pStB, p2pBtS, p2pVal) + +// The encoded peer ID can either be a CID of a key or a raw multihash (identity +// or sha256-256). +func p2pStB(s string) ([]byte, error) { + // check if the address is a base58 encoded sha256 or identity multihash + if strings.HasPrefix(s, "Qm") || strings.HasPrefix(s, "1") { + m, err := mh.FromB58String(s) + if err != nil { + return nil, fmt.Errorf("failed to parse p2p addr: %s %s", s, err) + } + if err := p2pVal(m); err != nil { + return nil, err + } + return m, nil + } + + // check if the address is a CID + c, err := cid.Decode(s) + if err != nil { + return nil, fmt.Errorf("failed to parse p2p addr: %s %s", s, err) + } + + if ty := c.Type(); ty == cid.Libp2pKey { + if err := p2pVal(c.Hash()); err != nil { + return nil, err + } + return c.Hash(), nil + } else { + return nil, fmt.Errorf("failed to parse p2p addr: %s has the invalid codec %d", s, ty) + } +} + +func p2pVal(b []byte) error { + h, err := mh.Decode([]byte(b)) + if err != nil { + return fmt.Errorf("invalid multihash: %s", err) + } + // Peer IDs require either sha256 or identity multihash + // https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md#peer-ids + if h.Code != mh.SHA2_256 && h.Code != mh.IDENTITY { + return fmt.Errorf("invalid multihash code %d expected sha-256 or identity", h.Code) + } + // This check should ideally be in multihash. sha256 digest lengths MUST be 32 + if h.Code == mh.SHA2_256 && h.Length != 32 { + return fmt.Errorf("invalid digest length %d for sha256 addr: expected 32", h.Length) + } + return nil +} + +func p2pBtS(b []byte) (string, error) { + m, err := mh.Cast(b) + if err != nil { + return "", err + } + return m.B58String(), nil +} + +var TranscoderUnix = NewTranscoderFromFunctions(unixStB, unixBtS, unixValidate) + +func unixStB(s string) ([]byte, error) { + return []byte(s), nil +} + +func unixBtS(b []byte) (string, error) { + return string(b), nil +} + +func unixValidate(b []byte) error { + // The string to bytes parser requires that all Path protocols begin with a '/' + // file://./codec.go#L49 + if len(b) < 2 { + return fmt.Errorf("byte slice too short: %d", len(b)) + } + if b[0] != '/' { + return errors.New("path protocol must begin with '/'") + } + if b[len(b)-1] == '/' { + return errors.New("unix socket path must not end in '/'") + } + return nil +} + +var TranscoderDns = NewTranscoderFromFunctions(dnsStB, dnsBtS, dnsVal) + +func dnsVal(b []byte) error { + if len(b) == 0 { + return fmt.Errorf("empty dns addr") + } + if bytes.IndexByte(b, '/') >= 0 { + return fmt.Errorf("domain name %q contains a slash", string(b)) + } + return nil +} + +func dnsStB(s string) ([]byte, error) { + b := []byte(s) + if err := dnsVal(b); err != nil { + return nil, err + } + return b, nil +} + +func dnsBtS(b []byte) (string, error) { + return string(b), nil +} + +var TranscoderCertHash = NewTranscoderFromFunctions(certHashStB, certHashBtS, validateCertHash) + +func certHashStB(s string) ([]byte, error) { + _, data, err := multibase.Decode(s) + if err != nil { + return nil, err + } + if _, err := mh.Decode(data); err != nil { + return nil, err + } + return data, nil +} + +func certHashBtS(b []byte) (string, error) { + return multibase.Encode(multibase.Base64url, b) +} + +func validateCertHash(b []byte) error { + _, err := mh.Decode(b) + return err +} + +var TranscoderHTTPPath = NewTranscoderFromFunctions(httpPathStB, httpPathBtS, validateHTTPPath) + +func httpPathStB(s string) ([]byte, error) { + unescaped, err := url.QueryUnescape(s) + if err != nil { + return nil, err + } + if len(unescaped) == 0 { + return nil, fmt.Errorf("empty http path is not allowed") + } + return []byte(unescaped), err +} + +func httpPathBtS(b []byte) (string, error) { + if len(b) == 0 { + return "", fmt.Errorf("empty http path is not allowed") + } + return url.QueryEscape(string(b)), nil +} + +func validateHTTPPath(b []byte) error { + if len(b) == 0 { + return fmt.Errorf("empty http path is not allowed") + } + return nil // We can represent any byte slice when we escape it. +} diff --git a/go-multiaddr/util.go b/go-multiaddr/util.go new file mode 100644 index 0000000..723d4a7 --- /dev/null +++ b/go-multiaddr/util.go @@ -0,0 +1,211 @@ +package multiaddr + +import "fmt" + +// Split returns the sub-address portions of a multiaddr. +func Split(m Multiaddr) []Multiaddr { + if _, ok := m.(*Component); ok { + return []Multiaddr{m} + } + var addrs []Multiaddr + var err error + ForEach(m, func(c Component, e error) bool { + if e != nil { + err = e + return false + } + addrs = append(addrs, &c) + return true + }) + + if err != nil { + return []Multiaddr{} + } + + return addrs +} + +// Join returns a combination of addresses. +func Join(ms ...Multiaddr) Multiaddr { + switch len(ms) { + case 0: + // empty multiaddr, unfortunately, we have callers that rely on + // this contract. + return &multiaddr{} + case 1: + return ms[0] + } + + length := 0 + for _, m := range ms { + if m == nil { + continue + } + length += len(m.Bytes()) + } + + bidx := 0 + b := make([]byte, length) + if length == 0 { + return nil + } + for _, mb := range ms { + if mb == nil { + continue + } + bidx += copy(b[bidx:], mb.Bytes()) + } + if length == 0 { + return nil + } + return &multiaddr{bytes: b} +} + +// Cast re-casts a byte slice as a multiaddr. +func Cast(b []byte) (Multiaddr, error) { + m, err := NewMultiaddrBytes(b) + if err != nil { + return nil, fmt.Errorf("multiaddr failed to parse: %s", err) + } + return m, nil +} + +// StringCast like Cast, but parses a string. +func StringCast(s string) (Multiaddr, error) { + m, err := NewMultiaddr(s) + if err != nil { + return nil, fmt.Errorf("multiaddr failed to parse: %s", err) + } + return m, nil +} + +// SplitFirst returns the first component and the rest of the multiaddr. +func SplitFirst(m Multiaddr) (*Component, Multiaddr, error) { + if m == nil { + return nil, nil, nil + } + // Shortcut if we already have a component + if c, ok := m.(*Component); ok { + return c, nil, nil + } + + b := m.Bytes() + if len(b) == 0 { + return nil, nil, nil + } + n, c, err := readComponent(b) + if err != nil { + return nil, nil, err + } + if len(b) == n { + return &c, nil, nil + } + return &c, &multiaddr{b[n:]}, nil +} + +// SplitLast returns the rest of the multiaddr and the last component. +func SplitLast(m Multiaddr) (Multiaddr, *Component, error) { + if m == nil { + return nil, nil, nil + } + + // Shortcut if we already have a component + if c, ok := m.(*Component); ok { + return nil, c, nil + } + + b := m.Bytes() + if len(b) == 0 { + return nil, nil, nil + } + + var ( + c Component + err error + offset int + ) + for { + var n int + n, c, err = readComponent(b[offset:]) + if err != nil { + return nil, nil, err + } + if len(b) == n+offset { + // Reached end + if offset == 0 { + // Only one component + return nil, &c, nil + } + return &multiaddr{b[:offset]}, &c, nil + } + offset += n + } +} + +// SplitFunc splits the multiaddr when the callback first returns true. The +// component on which the callback first returns will be included in the +// *second* multiaddr. +func SplitFunc(m Multiaddr, cb func(Component) bool) (Multiaddr, Multiaddr, error) { + if m == nil { + return nil, nil, nil + } + // Shortcut if we already have a component + if c, ok := m.(*Component); ok { + if cb(*c) { + return nil, m, nil + } + return m, nil, nil + } + b := m.Bytes() + if len(b) == 0 { + return nil, nil, nil + } + var ( + c Component + err error + offset int + ) + for offset < len(b) { + var n int + n, c, err = readComponent(b[offset:]) + if err != nil { + return nil, nil, err + } + if cb(c) { + break + } + offset += n + } + switch offset { + case 0: + return nil, m, nil + case len(b): + return m, nil, nil + default: + return &multiaddr{b[:offset]}, &multiaddr{b[offset:]}, nil + } +} + +// ForEach walks over the multiaddr, component by component. +// +// This function iterates over components *by value* to avoid allocating. +// Return true to continue iteration, false to stop. +func ForEach(m Multiaddr, cb func(c Component, err error) bool) { + if m == nil { + return + } + // Shortcut if we already have a component + if c, ok := m.(*Component); ok { + cb(*c, nil) + return + } + + b := m.Bytes() + for len(b) > 0 { + n, c, err := readComponent(b) + if !cb(c, err) { + return + } + b = b[n:] + } +} diff --git a/go-multiaddr/util_test.go b/go-multiaddr/util_test.go new file mode 100644 index 0000000..5544444 --- /dev/null +++ b/go-multiaddr/util_test.go @@ -0,0 +1,151 @@ +package multiaddr + +import ( + "strings" + "testing" +) + +func TestSplitFirstLast(t *testing.T) { + ipStr := "/ip4/0.0.0.0" + tcpStr := "/tcp/123" + quicStr := "/quic" + ipfsStr := "/ipfs/QmPSQnBKM9g7BaUcZCvswUJVscQ1ipjmwxN5PXCjkp9EQ7" + + for _, x := range [][]string{ + {ipStr, tcpStr, quicStr, ipfsStr}, + {ipStr, tcpStr, ipfsStr}, + {ipStr, tcpStr}, + {ipStr}, + } { + addr, _ := StringCast(strings.Join(x, "")) + head, tail, _ := SplitFirst(addr) + rest, last, _ := SplitLast(addr) + if len(x) == 0 { + if head != nil { + t.Error("expected head to be nil") + } + if tail != nil { + t.Error("expected tail to be nil") + } + if rest != nil { + t.Error("expected rest to be nil") + } + if last != nil { + t.Error("expected last to be nil") + } + continue + } + s, _ := StringCast(x[0]) + if !head.Equal(s) { + t.Errorf("expected %s to be %s", head, x[0]) + } + s, _ = StringCast(x[len(x)-1]) + if !last.Equal(s) { + t.Errorf("expected %s to be %s", head, x[len(x)-1]) + } + if len(x) == 1 { + if tail != nil { + t.Error("expected tail to be nil") + } + if rest != nil { + t.Error("expected rest to be nil") + } + continue + } + tailExp := strings.Join(x[1:], "") + s, _ = StringCast(tailExp) + if !tail.Equal(s) { + t.Errorf("expected %s to be %s", tail, tailExp) + } + restExp := strings.Join(x[:len(x)-1], "") + s, _ = StringCast(restExp) + if !rest.Equal(s) { + t.Errorf("expected %s to be %s", rest, restExp) + } + } + + c, err := NewComponent("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + + ci, m, _ := SplitFirst(c) + if !ci.Equal(c) || m != nil { + t.Error("split first on component failed") + } + m, ci, _ = SplitLast(c) + if !ci.Equal(c) || m != nil { + t.Error("split last on component failed") + } + cis := Split(c) + if len(cis) != 1 || !cis[0].Equal(c) { + t.Error("split on component failed") + } + m1, m2, _ := SplitFunc(c, func(c Component) bool { + return true + }) + if m1 != nil || !m2.Equal(c) { + t.Error("split func(true) on component failed") + } + m1, m2, _ = SplitFunc(c, func(c Component) bool { + return false + }) + if !m1.Equal(c) || m2 != nil { + t.Error("split func(false) on component failed") + } + + i := 0 + ForEach(c, func(ci Component, e error) bool { + if e != nil { + t.Error(e) + } + if i != 0 { + t.Error("expected exactly one component") + } + i++ + if !ci.Equal(c) { + t.Error("foreach on component failed") + } + return true + }) +} + +func TestSplitFunc(t *testing.T) { + ipStr := "/ip4/0.0.0.0" + tcpStr := "/tcp/123" + quicStr := "/quic" + ipfsStr := "/ipfs/QmPSQnBKM9g7BaUcZCvswUJVscQ1ipjmwxN5PXCjkp9EQ7" + + for _, x := range [][]string{ + {ipStr, tcpStr, quicStr, ipfsStr}, + {ipStr, tcpStr, ipfsStr}, + {ipStr, tcpStr}, + {ipStr}, + } { + addr, _ := StringCast(strings.Join(x, "")) + for i, cs := range x { + target, _ := StringCast(cs) + a, b, _ := SplitFunc(addr, func(c Component) bool { + return c.Equal(target) + }) + if i == 0 { + if a != nil { + t.Error("expected nil addr") + } + } else { + s, _ := StringCast(strings.Join(x[:i], "")) + if !a.Equal(s) { + t.Error("split failed") + } + s, _ = StringCast(strings.Join(x[i:], "")) + if !b.Equal(s) { + t.Error("split failed") + } + } + } + a, b, _ := SplitFunc(addr, func(_ Component) bool { return false }) + if !a.Equal(addr) || b != nil { + t.Error("should not have split") + } + } +} diff --git a/go-multiaddr/varint.go b/go-multiaddr/varint.go new file mode 100644 index 0000000..7e27be0 --- /dev/null +++ b/go-multiaddr/varint.go @@ -0,0 +1,28 @@ +package multiaddr + +import ( + "errors" + "math" + + "github.com/multiformats/go-varint" +) + +// CodeToVarint converts an integer to a varint-encoded []byte +func CodeToVarint(num int) ([]byte, error) { + if num < 0 || num > math.MaxInt32 { + return nil, errors.New("invalid code") + } + return varint.ToUvarint(uint64(num)), nil +} + +func ReadVarintCode(b []byte) (int, int, error) { + code, n, err := varint.FromUvarint(b) + if err != nil { + return 0, 0, err + } + if code > math.MaxInt32 { + // we only allow 32bit codes. + return 0, 0, varint.ErrOverflow + } + return int(code), n, err +} diff --git a/go-multiaddr/version.json b/go-multiaddr/version.json new file mode 100644 index 0000000..d3f7968 --- /dev/null +++ b/go-multiaddr/version.json @@ -0,0 +1,3 @@ +{ + "version": "v0.13.0" +} diff --git a/node/go.mod b/node/go.mod index 3283999..b82c41e 100644 --- a/node/go.mod +++ b/node/go.mod @@ -7,8 +7,14 @@ toolchain go1.22.1 // A necessary hack until source.quilibrium.com is open to all replace source.quilibrium.com/quilibrium/monorepo/nekryptology => ../nekryptology +replace github.com/multiformats/go-multiaddr => ../go-multiaddr + +replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns + replace github.com/libp2p/go-libp2p => ../go-libp2p +replace github.com/libp2p/go-libp2p-kad-dht => ../go-libp2p-kad-dht + replace github.com/libp2p/go-libp2p-gostream => ../go-libp2p-gostream replace source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub => ../go-libp2p-blossomsub @@ -24,6 +30,7 @@ require ( require ( filippo.io/edwards25519 v1.0.0-rc.1 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.2 // indirect github.com/pion/datachannel v1.5.6 // indirect github.com/pion/dtls/v2 v2.2.11 // indirect github.com/pion/ice/v2 v2.3.25 // indirect @@ -40,8 +47,9 @@ require ( github.com/pion/transport/v2 v2.2.5 // indirect github.com/pion/turn/v2 v2.1.6 // indirect github.com/pion/webrtc/v3 v3.2.40 // indirect - go.opentelemetry.io/otel v1.14.0 // indirect - go.opentelemetry.io/otel/trace v1.14.0 // indirect + go.opentelemetry.io/otel v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/trace v1.16.0 // indirect go.uber.org/mock v0.4.0 // indirect ) @@ -63,7 +71,7 @@ require ( github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cloudflare/circl v1.3.3 + github.com/cloudflare/circl v1.3.9 github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect @@ -88,10 +96,9 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/iden3/go-iden3-crypto v0.0.16 - github.com/ipfs/boxo v0.8.0 // indirect + github.com/ipfs/boxo v0.10.0 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-datastore v0.6.0 // indirect - github.com/ipfs/go-ipfs-util v0.0.2 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect github.com/ipld/go-ipld-prime v0.20.0 // indirect @@ -105,7 +112,7 @@ require ( github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect - github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect github.com/libp2p/go-libp2p-record v0.2.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-nat v0.2.0 // indirect @@ -158,7 +165,7 @@ require ( golang.org/x/sys v0.21.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect - gonum.org/v1/gonum v0.11.0 // indirect + gonum.org/v1/gonum v0.13.0 // indirect google.golang.org/grpc v1.58.2 lukechampine.com/blake3 v1.2.1 // indirect ) diff --git a/node/go.sum b/node/go.sum index bb3cb2f..6c18a54 100644 --- a/node/go.sum +++ b/node/go.sum @@ -9,8 +9,6 @@ dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU= filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= -github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= @@ -39,14 +37,12 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7 github.com/bwesterb/go-ristretto v1.2.3 h1:1w53tCkGhCQ5djbat3+MH0BAQ5Kfgbt56UZQ/JMzngw= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.9 h1:QFrlgFYf2Qpi8bSpVPK1HBvWpx16v/1TZivyo7pGuBE= +github.com/cloudflare/circl v1.3.9/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/consensys/bavard v0.1.8-0.20210915155054-088da2f7f54a/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= github.com/consensys/gnark-crypto v0.5.3 h1:4xLFGZR3NWEH2zy+YzvzHicpToQR8FXFbfLNvpGB+rE= @@ -71,14 +67,9 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPc github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= -github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po= -github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= @@ -136,8 +127,6 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -147,6 +136,7 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -183,8 +173,6 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= -github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -192,18 +180,15 @@ github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk= github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= -github.com/ipfs/boxo v0.8.0 h1:UdjAJmHzQHo/j3g3b1bAcAXCj/GM6iTwvSlBDvPBNBs= -github.com/ipfs/boxo v0.8.0/go.mod h1:RIsi4CnTyQ7AUsNn5gXljJYZlQrHBMnJp94p73liFiA= +github.com/ipfs/boxo v0.10.0 h1:tdDAxq8jrsbRkYoF+5Rcqyeb91hgWe2hp7iLu7ORZLY= +github.com/ipfs/boxo v0.10.0/go.mod h1:Fg+BnfxZ0RPzR0nOodzdIq3A7KgoWAOWsEIImrIQdBM= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= -github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= -github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= -github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= @@ -234,6 +219,10 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= @@ -257,12 +246,12 @@ github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFG github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= -github.com/libp2p/go-libp2p-kad-dht v0.23.0 h1:sxE6LxLopp79eLeV695n7+c77V/Vn4AMF28AdM/XFqM= -github.com/libp2p/go-libp2p-kad-dht v0.23.0/go.mod h1:oO5N308VT2msnQI6qi5M61wzPmJYg7Tr9e16m5n7uDU= -github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= -github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= +github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= +github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= +github.com/libp2p/go-libp2p-routing-helpers v0.7.2 h1:xJMFyhQ3Iuqnk9Q2dYE1eUTzsah7NLw3Qs2zjUV78T0= +github.com/libp2p/go-libp2p-routing-helpers v0.7.2/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= @@ -284,7 +273,6 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= @@ -295,38 +283,35 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdn github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.12.4 h1:rrKqpY9h+n80EwhhC/kkcunCZZ7URIF8yN1WEUt2Hvc= -github.com/multiformats/go-multiaddr v0.12.4/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= -github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= -github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= -github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= -github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= @@ -473,8 +458,6 @@ github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -491,10 +474,12 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= @@ -530,6 +515,8 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= @@ -539,6 +526,7 @@ golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOM golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -553,7 +541,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -574,7 +564,9 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= @@ -614,14 +606,16 @@ golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -636,6 +630,7 @@ golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= @@ -647,7 +642,9 @@ golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= @@ -678,6 +675,7 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= @@ -685,8 +683,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= -gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= +gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -746,6 +744,7 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= @@ -754,6 +753,7 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=