mirror of
https://source.quilibrium.com/quilibrium/ceremonyclient.git
synced 2024-11-14 20:25:17 +00:00
fix: remove unnecessary panics, recovers, defers, also fix some weird things that have been tragically broken in libp2p because they never knew what was wrong
This commit is contained in:
parent
6b6cb1852f
commit
a007452f3e
@ -4,17 +4,19 @@ go 1.21
|
||||
|
||||
toolchain go1.22.4
|
||||
|
||||
replace github.com/multiformats/go-multiaddr => ../go-multiaddr
|
||||
|
||||
replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns
|
||||
|
||||
replace github.com/libp2p/go-libp2p => ../go-libp2p
|
||||
|
||||
replace github.com/libp2p/go-libp2p-gostream => ../go-libp2p-gostream
|
||||
|
||||
require (
|
||||
github.com/benbjohnson/clock v1.3.5
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/ipfs/go-log/v2 v2.5.1
|
||||
github.com/libp2p/go-buffer-pool v0.1.0
|
||||
github.com/libp2p/go-libp2p v0.35.4
|
||||
github.com/libp2p/go-libp2p-gostream v0.6.0
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0
|
||||
github.com/libp2p/go-msgio v0.3.0
|
||||
github.com/multiformats/go-multiaddr v0.12.4
|
||||
@ -25,7 +27,7 @@ require (
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cloudflare/circl v1.3.3 // indirect
|
||||
github.com/cloudflare/circl v1.3.9 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
@ -37,22 +39,19 @@ require (
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||
github.com/golang/mock v1.6.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
|
||||
github.com/ipfs/go-cid v0.4.1 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/koron/go-ssdp v0.0.4 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-nat v0.2.0 // indirect
|
||||
@ -61,7 +60,6 @@ require (
|
||||
github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/miekg/dns v1.1.58 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
@ -106,7 +104,6 @@ require (
|
||||
github.com/raulk/go-watchdog v1.3.0 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/stretchr/testify v1.9.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/dig v1.17.1 // indirect
|
||||
go.uber.org/fx v1.22.1 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
|
@ -10,7 +10,6 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGy
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
@ -23,11 +22,9 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
|
||||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||
github.com/cloudflare/circl v1.3.9 h1:QFrlgFYf2Qpi8bSpVPK1HBvWpx16v/1TZivyo7pGuBE=
|
||||
github.com/cloudflare/circl v1.3.9/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU=
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||
github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
|
||||
github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
@ -36,15 +33,13 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
@ -55,7 +50,6 @@ github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0
|
||||
github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
|
||||
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
|
||||
@ -64,9 +58,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
@ -81,28 +74,22 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20221203041831-ce31453925ec h1:fR20TYVVwhK4O7r7y+McjRYyaTH6/vjwJOajE+XhlzM=
|
||||
github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo=
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@ -115,16 +102,11 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc=
|
||||
github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw=
|
||||
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
|
||||
github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
|
||||
github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
|
||||
github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
|
||||
@ -139,14 +121,12 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
|
||||
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.1 h1:U33DW0aiEj633gHYw3LoDNfkDiYnE5Q8M/TKJn2f2jI=
|
||||
github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
|
||||
@ -164,10 +144,6 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||
github.com/libp2p/go-libp2p v0.25.0 h1:ND6Hc6ZYCzC8S++C4mOD7LdPnLXRkNbr12/8FXgUfIo=
|
||||
github.com/libp2p/go-libp2p v0.25.0/go.mod h1:vXHmFpcfl+xIGN4qW58Bw3a0/SKGAesr5/T4IuJHE3o=
|
||||
github.com/libp2p/go-libp2p v0.35.4 h1:FDiBUYLkueFwsuNJUZaxKRdpKvBOWU64qQPL768bSeg=
|
||||
github.com/libp2p/go-libp2p v0.35.4/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
@ -178,12 +154,8 @@ github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
|
||||
github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
|
||||
github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
|
||||
github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
|
||||
github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560=
|
||||
github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k=
|
||||
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
|
||||
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
||||
github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ=
|
||||
github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
|
||||
github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
|
||||
github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
@ -191,17 +163,11 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
|
||||
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
|
||||
@ -212,57 +178,45 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdn
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
|
||||
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
|
||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||
github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
|
||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
||||
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
|
||||
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
|
||||
github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU=
|
||||
github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs=
|
||||
github.com/multiformats/go-multiaddr v0.12.4 h1:rrKqpY9h+n80EwhhC/kkcunCZZ7URIF8yN1WEUt2Hvc=
|
||||
github.com/multiformats/go-multiaddr v0.12.4/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII=
|
||||
github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A=
|
||||
github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
|
||||
github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI=
|
||||
github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8=
|
||||
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
|
||||
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
||||
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
||||
github.com/multiformats/go-multicodec v0.7.0 h1:rTUjGOwjlhGHbEMbPoSUJowG1spZTVsITRANCjKTUAQ=
|
||||
github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw=
|
||||
github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
|
||||
github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
|
||||
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||
github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108=
|
||||
github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc=
|
||||
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
|
||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
||||
github.com/multiformats/go-multistream v0.4.0 h1:5i4JbawClkbuaX+mIVXiHQYVPxUW+zjv6w7jtSRukxc=
|
||||
github.com/multiformats/go-multistream v0.4.0/go.mod h1:BS6ZSYcA4NwYEaIMeCtpJydp2Dc+fNRA6uJMSu/m8+4=
|
||||
github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE=
|
||||
github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA=
|
||||
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw=
|
||||
github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
|
||||
github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg=
|
||||
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
|
||||
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
|
||||
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
|
||||
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
@ -306,57 +260,40 @@ github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLh
|
||||
github.com/pion/transport/v2 v2.2.5 h1:iyi25i/21gQck4hfRhomF6SktmUQjRsRW4WJdhfc3Kc=
|
||||
github.com/pion/transport/v2 v2.2.5/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
|
||||
github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0=
|
||||
github.com/pion/transport/v3 v3.0.2 h1:r+40RJR25S9w3jbA6/5uEPTzcdn7ncyU44RWCbHkLg4=
|
||||
github.com/pion/transport/v3 v3.0.2/go.mod h1:nIToODoOlb5If2jF9y2Igfx3PFYWfuXi37m0IlWa/D0=
|
||||
github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
|
||||
github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc=
|
||||
github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
|
||||
github.com/pion/webrtc/v3 v3.2.40 h1:Wtfi6AZMQg+624cvCXUuSmrKWepSB7zfgYDOYqsSOVU=
|
||||
github.com/pion/webrtc/v3 v3.2.40/go.mod h1:M1RAe3TNTD1tzyvqHrbVODfwdPGSXOUo/OgpoGGJqFY=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
|
||||
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
|
||||
github.com/quic-go/qtls-go1-18 v0.2.0 h1:5ViXqBZ90wpUcZS0ge79rf029yx0dYB0McyPJwqqj7U=
|
||||
github.com/quic-go/qtls-go1-18 v0.2.0/go.mod h1:moGulGHK7o6O8lSPSZNoOwcLvJKJ85vVNc7oJFD65bc=
|
||||
github.com/quic-go/qtls-go1-19 v0.2.0 h1:Cvn2WdhyViFUHoOqK52i51k4nDX8EwIh5VJiVM4nttk=
|
||||
github.com/quic-go/qtls-go1-19 v0.2.0/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI=
|
||||
github.com/quic-go/qtls-go1-20 v0.1.0 h1:d1PK3ErFy9t7zxKsG3NXBJXZjp/kMLoIb3y/kV54oAI=
|
||||
github.com/quic-go/qtls-go1-20 v0.1.0/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
|
||||
github.com/quic-go/quic-go v0.32.0 h1:lY02md31s1JgPiiyfqJijpu/UX/Iun304FI3yUqX7tA=
|
||||
github.com/quic-go/quic-go v0.32.0/go.mod h1:/fCsKANhQIeD5l76c2JFU+07gVE3KaA0FP+0zMWwfwo=
|
||||
github.com/quic-go/quic-go v0.44.0 h1:So5wOr7jyO4vzL2sd8/pD9Kesciv91zSk8BoFngItQ0=
|
||||
github.com/quic-go/quic-go v0.44.0/go.mod h1:z4cx/9Ny9UtGITIPzmPTXh1ULfOyWh4qGQlpnPcWmek=
|
||||
github.com/quic-go/webtransport-go v0.5.1 h1:1eVb7WDWCRoaeTtFHpFBJ6WDN1bSrPrRoW6tZgSw0Ow=
|
||||
github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg=
|
||||
github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM=
|
||||
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
|
||||
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@ -395,13 +332,10 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
@ -416,24 +350,19 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
|
||||
go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys=
|
||||
go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
|
||||
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
|
||||
go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
|
||||
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
|
||||
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
@ -447,8 +376,8 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
@ -458,8 +387,7 @@ golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOM
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@ -472,9 +400,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
|
||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -493,11 +421,10 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
@ -521,7 +448,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
@ -546,9 +472,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@ -562,6 +487,7 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
@ -575,8 +501,8 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
@ -585,6 +511,8 @@ golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@ -596,13 +524,10 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -625,10 +550,6 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@ -642,16 +563,15 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
|
||||
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
|
||||
lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
|
||||
lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
|
||||
lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
|
||||
nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g=
|
||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||
|
15
go-libp2p-kad-dht/CODEOWNERS
Normal file
15
go-libp2p-kad-dht/CODEOWNERS
Normal file
@ -0,0 +1,15 @@
|
||||
# CODEOWNERS
|
||||
|
||||
# default owner is the libp2p team
|
||||
*.go @libp2p/go-libp2p-maintainers @guillaumemichel
|
||||
/pb/ @libp2p/go-libp2p-maintainers @guillaumemichel
|
||||
|
||||
# dual is an application for IPFS
|
||||
/dual/ @libp2p/kubo-maintainers @guillaumemichel
|
||||
# fullrt is IPFS specific
|
||||
/fullrt/ @libp2p/kubo-maintainers @guillaumemichel
|
||||
# providers describe the IPFS specific providers
|
||||
/providers/ @libp2p/kubo-maintainers @guillaumemichel
|
||||
# records are IPFS specific
|
||||
/records.go @libp2p/kubo-maintainers @guillaumemichel
|
||||
/records_test.go @libp2p/kubo-maintainers @guillaumemichel
|
21
go-libp2p-kad-dht/LICENSE
Normal file
21
go-libp2p-kad-dht/LICENSE
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Protocol Labs, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
51
go-libp2p-kad-dht/README.md
Normal file
51
go-libp2p-kad-dht/README.md
Normal file
@ -0,0 +1,51 @@
|
||||
# go-libp2p-kad-dht
|
||||
|
||||
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai)
|
||||
[![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io)
|
||||
[![GoDoc](https://godoc.org/github.com/libp2p/go-libp2p-kad-dht?status.svg)](https://godoc.org/github.com/libp2p/go-libp2p-kad-dht)
|
||||
[![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io)
|
||||
|
||||
> A Go implementation of [libp2p Kademlia DHT specification](https://github.com/libp2p/specs/tree/master/kad-dht)
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Install](#install)
|
||||
- [Usage](#usage)
|
||||
- [Optimizations](#optimizations)
|
||||
- [Contribute](#contribute)
|
||||
- [Maintainers](#maintainers)
|
||||
- [License](#license)
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
go get github.com/libp2p/go-libp2p-kad-dht
|
||||
```
|
||||
|
||||
## Optimizations
|
||||
|
||||
Client-side optimizations are described in [optimizations.md](./optimizations.md)
|
||||
|
||||
## Usage
|
||||
|
||||
Go to https://godoc.org/github.com/libp2p/go-libp2p-kad-dht.
|
||||
|
||||
## Contribute
|
||||
|
||||
Contributions welcome. Please check out [the issues](https://github.com/libp2p/go-libp2p-kad-dht/issues).
|
||||
|
||||
Check out our [contributing document](https://github.com/libp2p/community/blob/master/CONTRIBUTE.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to libp2p are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
|
||||
|
||||
<!-- Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. -->
|
||||
|
||||
## Maintainers
|
||||
|
||||
- [@ipfs/kubo-maintainers](https://github.com/orgs/ipfs/teams/kubo-maintainers)
|
||||
- [@libp2p/go-libp2p-maintainers](https://github.com/orgs/libp2p/teams/go-libp2p-maintainers)
|
||||
- [@guillaumemichel](https://github.com/guillaumemichel)
|
||||
|
||||
See [CODEOWNERS](./CODEOWNERS).
|
||||
|
||||
## License
|
||||
|
||||
[MIT](LICENSE) © Protocol Labs Inc.
|
266
go-libp2p-kad-dht/crawler/crawler.go
Normal file
266
go-libp2p-kad-dht/crawler/crawler.go
Normal file
@ -0,0 +1,266 @@
|
||||
package crawler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
//lint:ignore SA1019 TODO migrate away from gogo pb
|
||||
"github.com/libp2p/go-msgio/protoio"
|
||||
|
||||
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
||||
kbucket "github.com/libp2p/go-libp2p-kbucket"
|
||||
)
|
||||
|
||||
var (
|
||||
logger = logging.Logger("dht-crawler")
|
||||
|
||||
_ Crawler = (*DefaultCrawler)(nil)
|
||||
)
|
||||
|
||||
type (
|
||||
// Crawler connects to hosts in the DHT to track routing tables of peers.
|
||||
Crawler interface {
|
||||
// Run crawls the DHT starting from the startingPeers, and calls either handleSuccess or handleFail depending on whether a peer was successfully contacted or not.
|
||||
Run(ctx context.Context, startingPeers []*peer.AddrInfo, handleSuccess HandleQueryResult, handleFail HandleQueryFail)
|
||||
}
|
||||
// DefaultCrawler provides a default implementation of Crawler.
|
||||
DefaultCrawler struct {
|
||||
parallelism int
|
||||
connectTimeout time.Duration
|
||||
host host.Host
|
||||
dhtRPC *pb.ProtocolMessenger
|
||||
dialAddressExtendDur time.Duration
|
||||
}
|
||||
)
|
||||
|
||||
// NewDefaultCrawler creates a new DefaultCrawler
|
||||
func NewDefaultCrawler(host host.Host, opts ...Option) (*DefaultCrawler, error) {
|
||||
o := new(options)
|
||||
if err := defaults(o); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(o); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
pm, err := pb.NewProtocolMessenger(&messageSender{h: host, protocols: o.protocols, timeout: o.perMsgTimeout})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &DefaultCrawler{
|
||||
parallelism: o.parallelism,
|
||||
connectTimeout: o.connectTimeout,
|
||||
host: host,
|
||||
dhtRPC: pm,
|
||||
dialAddressExtendDur: o.dialAddressExtendDur,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MessageSender handles sending wire protocol messages to a given peer
|
||||
type messageSender struct {
|
||||
h host.Host
|
||||
protocols []protocol.ID
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// SendRequest sends a peer a message and waits for its response
|
||||
func (ms *messageSender) SendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
|
||||
s, err := ms.h.NewStream(ctx, p, ms.protocols...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := protoio.NewDelimitedWriter(s)
|
||||
if err := w.WriteMsg(pmes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := protoio.NewDelimitedReader(s, network.MessageSizeMax)
|
||||
tctx, cancel := context.WithTimeout(ctx, ms.timeout)
|
||||
defer cancel()
|
||||
defer func() { _ = s.Close() }()
|
||||
|
||||
msg := new(pb.Message)
|
||||
if err := ctxReadMsg(tctx, r, msg); err != nil {
|
||||
_ = s.Reset()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
func ctxReadMsg(ctx context.Context, rc protoio.ReadCloser, mes *pb.Message) error {
|
||||
errc := make(chan error, 1)
|
||||
go func(r protoio.ReadCloser) {
|
||||
defer close(errc)
|
||||
err := r.ReadMsg(mes)
|
||||
errc <- err
|
||||
}(rc)
|
||||
|
||||
select {
|
||||
case err := <-errc:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// SendMessage sends a peer a message without waiting on a response
|
||||
func (ms *messageSender) SendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error {
|
||||
s, err := ms.h.NewStream(ctx, p, ms.protocols...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = s.Close() }()
|
||||
|
||||
w := protoio.NewDelimitedWriter(s)
|
||||
return w.WriteMsg(pmes)
|
||||
}
|
||||
|
||||
// HandleQueryResult is a callback on successful peer query
|
||||
type HandleQueryResult func(p peer.ID, rtPeers []*peer.AddrInfo)
|
||||
|
||||
// HandleQueryFail is a callback on failed peer query
|
||||
type HandleQueryFail func(p peer.ID, err error)
|
||||
|
||||
// Run crawls dht peers from an initial seed of `startingPeers`
|
||||
func (c *DefaultCrawler) Run(ctx context.Context, startingPeers []*peer.AddrInfo, handleSuccess HandleQueryResult, handleFail HandleQueryFail) {
|
||||
jobs := make(chan peer.ID, 1)
|
||||
results := make(chan *queryResult, 1)
|
||||
|
||||
// Start worker goroutines
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(c.parallelism)
|
||||
for i := 0; i < c.parallelism; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for p := range jobs {
|
||||
res := c.queryPeer(ctx, p)
|
||||
results <- res
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
defer wg.Wait()
|
||||
defer close(jobs)
|
||||
|
||||
var toDial []*peer.AddrInfo
|
||||
peersSeen := make(map[peer.ID]struct{})
|
||||
|
||||
numSkipped := 0
|
||||
for _, ai := range startingPeers {
|
||||
extendAddrs := c.host.Peerstore().Addrs(ai.ID)
|
||||
if len(ai.Addrs) > 0 {
|
||||
extendAddrs = append(extendAddrs, ai.Addrs...)
|
||||
c.host.Peerstore().AddAddrs(ai.ID, extendAddrs, c.dialAddressExtendDur)
|
||||
}
|
||||
if len(extendAddrs) == 0 {
|
||||
numSkipped++
|
||||
continue
|
||||
}
|
||||
|
||||
toDial = append(toDial, ai)
|
||||
peersSeen[ai.ID] = struct{}{}
|
||||
}
|
||||
|
||||
if numSkipped > 0 {
|
||||
logger.Infof("%d starting peers were skipped due to lack of addresses. Starting crawl with %d peers", numSkipped, len(toDial))
|
||||
}
|
||||
|
||||
numQueried := 0
|
||||
outstanding := 0
|
||||
|
||||
for len(toDial) > 0 || outstanding > 0 {
|
||||
var jobCh chan peer.ID
|
||||
var nextPeerID peer.ID
|
||||
if len(toDial) > 0 {
|
||||
jobCh = jobs
|
||||
nextPeerID = toDial[0].ID
|
||||
}
|
||||
|
||||
select {
|
||||
case res := <-results:
|
||||
if len(res.data) > 0 {
|
||||
logger.Debugf("peer %v had %d peers", res.peer, len(res.data))
|
||||
rtPeers := make([]*peer.AddrInfo, 0, len(res.data))
|
||||
for p, ai := range res.data {
|
||||
c.host.Peerstore().AddAddrs(p, ai.Addrs, c.dialAddressExtendDur)
|
||||
if _, ok := peersSeen[p]; !ok {
|
||||
peersSeen[p] = struct{}{}
|
||||
toDial = append(toDial, ai)
|
||||
}
|
||||
rtPeers = append(rtPeers, ai)
|
||||
}
|
||||
if handleSuccess != nil {
|
||||
handleSuccess(res.peer, rtPeers)
|
||||
}
|
||||
} else if handleFail != nil {
|
||||
handleFail(res.peer, res.err)
|
||||
}
|
||||
outstanding--
|
||||
case jobCh <- nextPeerID:
|
||||
outstanding++
|
||||
numQueried++
|
||||
toDial = toDial[1:]
|
||||
logger.Debugf("starting %d out of %d", numQueried, len(peersSeen))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type queryResult struct {
|
||||
peer peer.ID
|
||||
data map[peer.ID]*peer.AddrInfo
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *DefaultCrawler) queryPeer(ctx context.Context, nextPeer peer.ID) *queryResult {
|
||||
tmpRT, err := kbucket.NewRoutingTable(20, kbucket.ConvertPeerID(nextPeer), time.Hour, c.host.Peerstore(), time.Hour, nil)
|
||||
if err != nil {
|
||||
logger.Errorf("error creating rt for peer %v : %v", nextPeer, err)
|
||||
return &queryResult{nextPeer, nil, err}
|
||||
}
|
||||
|
||||
connCtx, cancel := context.WithTimeout(ctx, c.connectTimeout)
|
||||
defer cancel()
|
||||
err = c.host.Connect(connCtx, peer.AddrInfo{ID: nextPeer})
|
||||
if err != nil {
|
||||
logger.Debugf("could not connect to peer %v: %v", nextPeer, err)
|
||||
return &queryResult{nextPeer, nil, err}
|
||||
}
|
||||
|
||||
localPeers := make(map[peer.ID]*peer.AddrInfo)
|
||||
var retErr error
|
||||
for cpl := 0; cpl <= 15; cpl++ {
|
||||
generatePeer, err := tmpRT.GenRandPeerID(uint(cpl))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
peers, err := c.dhtRPC.GetClosestPeers(ctx, nextPeer, generatePeer)
|
||||
if err != nil {
|
||||
logger.Debugf("error finding data on peer %v with cpl %d : %v", nextPeer, cpl, err)
|
||||
retErr = err
|
||||
break
|
||||
}
|
||||
for _, ai := range peers {
|
||||
if _, ok := localPeers[ai.ID]; !ok {
|
||||
localPeers[ai.ID] = ai
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if retErr != nil {
|
||||
return &queryResult{nextPeer, nil, retErr}
|
||||
}
|
||||
|
||||
return &queryResult{nextPeer, localPeers, retErr}
|
||||
}
|
72
go-libp2p-kad-dht/crawler/options.go
Normal file
72
go-libp2p-kad-dht/crawler/options.go
Normal file
@ -0,0 +1,72 @@
|
||||
package crawler
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
// Option DHT Crawler option type.
|
||||
type Option func(*options) error
|
||||
|
||||
type options struct {
|
||||
protocols []protocol.ID
|
||||
parallelism int
|
||||
connectTimeout time.Duration
|
||||
perMsgTimeout time.Duration
|
||||
dialAddressExtendDur time.Duration
|
||||
}
|
||||
|
||||
// defaults are the default crawler options. This option will be automatically
|
||||
// prepended to any options you pass to the crawler constructor.
|
||||
var defaults = func(o *options) error {
|
||||
o.protocols = []protocol.ID{"/ipfs/kad/1.0.0"}
|
||||
o.parallelism = 1000
|
||||
o.connectTimeout = time.Second * 5
|
||||
o.perMsgTimeout = time.Second * 5
|
||||
o.dialAddressExtendDur = time.Minute * 30
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithProtocols defines the ordered set of protocols the crawler will use to talk to other nodes
|
||||
func WithProtocols(protocols []protocol.ID) Option {
|
||||
return func(o *options) error {
|
||||
o.protocols = append([]protocol.ID{}, protocols...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParallelism defines the number of queries that can be issued in parallel
|
||||
func WithParallelism(parallelism int) Option {
|
||||
return func(o *options) error {
|
||||
o.parallelism = parallelism
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMsgTimeout defines the amount of time a single DHT message is allowed to take before it's deemed failed
|
||||
func WithMsgTimeout(timeout time.Duration) Option {
|
||||
return func(o *options) error {
|
||||
o.perMsgTimeout = timeout
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithConnectTimeout defines the time for peer connection before timing out
|
||||
func WithConnectTimeout(timeout time.Duration) Option {
|
||||
return func(o *options) error {
|
||||
o.connectTimeout = timeout
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDialAddrExtendDuration sets the duration by which the TTL of dialed address in peer store are
|
||||
// extended.
|
||||
// Defaults to 30 minutes if unset.
|
||||
func WithDialAddrExtendDuration(ext time.Duration) Option {
|
||||
return func(o *options) error {
|
||||
o.dialAddressExtendDur = ext
|
||||
return nil
|
||||
}
|
||||
}
|
942
go-libp2p-kad-dht/dht.go
Normal file
942
go-libp2p-kad-dht/dht.go
Normal file
@ -0,0 +1,942 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-routing-helpers/tracing"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
dhtcfg "github.com/libp2p/go-libp2p-kad-dht/internal/config"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal/net"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/metrics"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/netsize"
|
||||
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/providers"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/rtrefresh"
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p-kbucket/peerdiversity"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
recpb "github.com/libp2p/go-libp2p-record/pb"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/multiformats/go-base32"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"go.opencensus.io/tag"
|
||||
"go.uber.org/multierr"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const tracer = tracing.Tracer("go-libp2p-kad-dht")
|
||||
const dhtName = "IpfsDHT"
|
||||
|
||||
var (
|
||||
logger = logging.Logger("dht")
|
||||
baseLogger = logger.Desugar()
|
||||
|
||||
rtFreezeTimeout = 1 * time.Minute
|
||||
)
|
||||
|
||||
const (
|
||||
// BaseConnMgrScore is the base of the score set on the connection
|
||||
// manager "kbucket" tag. It is added with the common prefix length
|
||||
// between two peer IDs.
|
||||
baseConnMgrScore = 5
|
||||
)
|
||||
|
||||
type mode int
|
||||
|
||||
const (
|
||||
modeServer mode = iota + 1
|
||||
modeClient
|
||||
)
|
||||
|
||||
const (
|
||||
kad1 protocol.ID = "/kad/1.0.0"
|
||||
)
|
||||
|
||||
const (
|
||||
kbucketTag = "kbucket"
|
||||
protectedBuckets = 2
|
||||
)
|
||||
|
||||
// IpfsDHT is an implementation of Kademlia with S/Kademlia modifications.
|
||||
// It is used to implement the base Routing module.
|
||||
type IpfsDHT struct {
|
||||
host host.Host // the network services we need
|
||||
self peer.ID // Local peer (yourself)
|
||||
selfKey kb.ID
|
||||
peerstore peerstore.Peerstore // Peer Registry
|
||||
|
||||
datastore ds.Datastore // Local data
|
||||
|
||||
routingTable *kb.RoutingTable // Array of routing tables for differently distanced nodes
|
||||
// providerStore stores & manages the provider records for this Dht peer.
|
||||
providerStore providers.ProviderStore
|
||||
|
||||
// manages Routing Table refresh
|
||||
rtRefreshManager *rtrefresh.RtRefreshManager
|
||||
|
||||
birth time.Time // When this peer started up
|
||||
|
||||
Validator record.Validator
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
|
||||
protoMessenger *pb.ProtocolMessenger
|
||||
msgSender pb.MessageSenderWithDisconnect
|
||||
|
||||
stripedPutLocks [256]sync.Mutex
|
||||
|
||||
// DHT protocols we query with. We'll only add peers to our routing
|
||||
// table if they speak these protocols.
|
||||
protocols []protocol.ID
|
||||
|
||||
// DHT protocols we can respond to.
|
||||
serverProtocols []protocol.ID
|
||||
|
||||
auto ModeOpt
|
||||
mode mode
|
||||
modeLk sync.Mutex
|
||||
|
||||
bucketSize int
|
||||
alpha int // The concurrency parameter per path
|
||||
beta int // The number of peers closest to a target that must have responded for a query path to terminate
|
||||
|
||||
queryPeerFilter QueryFilterFunc
|
||||
routingTablePeerFilter RouteTableFilterFunc
|
||||
rtPeerDiversityFilter peerdiversity.PeerIPGroupFilter
|
||||
|
||||
autoRefresh bool
|
||||
|
||||
// timeout for the lookupCheck operation
|
||||
lookupCheckTimeout time.Duration
|
||||
// number of concurrent lookupCheck operations
|
||||
lookupCheckCapacity int
|
||||
lookupChecksLk sync.Mutex
|
||||
|
||||
// A function returning a set of bootstrap peers to fallback on if all other attempts to fix
|
||||
// the routing table fail (or, e.g., this is the first time this node is
|
||||
// connecting to the network).
|
||||
bootstrapPeers func() []peer.AddrInfo
|
||||
|
||||
maxRecordAge time.Duration
|
||||
|
||||
// Allows disabling dht subsystems. These should _only_ be set on
|
||||
// "forked" DHTs (e.g., DHTs with custom protocols and/or private
|
||||
// networks).
|
||||
enableProviders, enableValues bool
|
||||
|
||||
disableFixLowPeers bool
|
||||
fixLowPeersChan chan struct{}
|
||||
|
||||
addPeerToRTChan chan peer.ID
|
||||
refreshFinishedCh chan struct{}
|
||||
|
||||
rtFreezeTimeout time.Duration
|
||||
|
||||
// network size estimator
|
||||
nsEstimator *netsize.Estimator
|
||||
enableOptProv bool
|
||||
|
||||
// a bound channel to limit asynchronicity of in-flight ADD_PROVIDER RPCs
|
||||
optProvJobsPool chan struct{}
|
||||
|
||||
// configuration variables for tests
|
||||
testAddressUpdateProcessing bool
|
||||
|
||||
// addrFilter is used to filter the addresses we put into the peer store.
|
||||
// Mostly used to filter out localhost and local addresses.
|
||||
addrFilter func([]ma.Multiaddr) []ma.Multiaddr
|
||||
}
|
||||
|
||||
// Assert that IPFS assumptions about interfaces aren't broken. These aren't a
|
||||
// guarantee, but we can use them to aid refactoring.
|
||||
var (
|
||||
_ routing.ContentRouting = (*IpfsDHT)(nil)
|
||||
_ routing.Routing = (*IpfsDHT)(nil)
|
||||
_ routing.PeerRouting = (*IpfsDHT)(nil)
|
||||
_ routing.PubKeyFetcher = (*IpfsDHT)(nil)
|
||||
_ routing.ValueStore = (*IpfsDHT)(nil)
|
||||
)
|
||||
|
||||
// New creates a new DHT with the specified host and options.
|
||||
// Please note that being connected to a DHT peer does not necessarily imply that it's also in the DHT Routing Table.
|
||||
// If the Routing Table has more than "minRTRefreshThreshold" peers, we consider a peer as a Routing Table candidate ONLY when
|
||||
// we successfully get a query response from it OR if it send us a query.
|
||||
func New(ctx context.Context, h host.Host, options ...Option) (*IpfsDHT, error) {
|
||||
var cfg dhtcfg.Config
|
||||
if err := cfg.Apply(append([]Option{dhtcfg.Defaults}, options...)...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cfg.ApplyFallbacks(h); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dht, err := makeDHT(h, cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create DHT, err=%s", err)
|
||||
}
|
||||
|
||||
dht.autoRefresh = cfg.RoutingTable.AutoRefresh
|
||||
|
||||
dht.maxRecordAge = cfg.MaxRecordAge
|
||||
dht.enableProviders = cfg.EnableProviders
|
||||
dht.enableValues = cfg.EnableValues
|
||||
dht.disableFixLowPeers = cfg.DisableFixLowPeers
|
||||
|
||||
dht.Validator = cfg.Validator
|
||||
dht.msgSender = net.NewMessageSenderImpl(h, dht.protocols)
|
||||
dht.protoMessenger, err = pb.NewProtocolMessenger(dht.msgSender)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dht.testAddressUpdateProcessing = cfg.TestAddressUpdateProcessing
|
||||
|
||||
dht.auto = cfg.Mode
|
||||
switch cfg.Mode {
|
||||
case ModeAuto, ModeClient:
|
||||
dht.mode = modeClient
|
||||
case ModeAutoServer, ModeServer:
|
||||
dht.mode = modeServer
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid dht mode %d", cfg.Mode)
|
||||
}
|
||||
|
||||
if dht.mode == modeServer {
|
||||
if err := dht.moveToServerMode(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// register for event bus and network notifications
|
||||
if err := dht.startNetworkSubscriber(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// go-routine to make sure we ALWAYS have RT peer addresses in the peerstore
|
||||
// since RT membership is decoupled from connectivity
|
||||
go dht.persistRTPeersInPeerStore()
|
||||
|
||||
dht.rtPeerLoop()
|
||||
|
||||
// Fill routing table with currently connected peers that are DHT servers
|
||||
for _, p := range dht.host.Network().Peers() {
|
||||
dht.peerFound(p)
|
||||
}
|
||||
|
||||
dht.rtRefreshManager.Start()
|
||||
|
||||
// listens to the fix low peers chan and tries to fix the Routing Table
|
||||
if !dht.disableFixLowPeers {
|
||||
dht.runFixLowPeersLoop()
|
||||
}
|
||||
|
||||
return dht, nil
|
||||
}
|
||||
|
||||
// NewDHT creates a new DHT object with the given peer as the 'local' host.
|
||||
// IpfsDHT's initialized with this function will respond to DHT requests,
|
||||
// whereas IpfsDHT's initialized with NewDHTClient will not.
|
||||
func NewDHT(ctx context.Context, h host.Host, dstore ds.Batching) *IpfsDHT {
|
||||
dht, err := New(ctx, h, Datastore(dstore))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dht
|
||||
}
|
||||
|
||||
// NewDHTClient creates a new DHT object with the given peer as the 'local'
|
||||
// host. IpfsDHT clients initialized with this function will not respond to DHT
|
||||
// requests. If you need a peer to respond to DHT requests, use NewDHT instead.
|
||||
func NewDHTClient(ctx context.Context, h host.Host, dstore ds.Batching) *IpfsDHT {
|
||||
dht, err := New(ctx, h, Datastore(dstore), Mode(ModeClient))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dht
|
||||
}
|
||||
|
||||
func makeDHT(h host.Host, cfg dhtcfg.Config) (*IpfsDHT, error) {
|
||||
var protocols, serverProtocols []protocol.ID
|
||||
|
||||
v1proto := cfg.ProtocolPrefix + kad1
|
||||
|
||||
if cfg.V1ProtocolOverride != "" {
|
||||
v1proto = cfg.V1ProtocolOverride
|
||||
}
|
||||
|
||||
protocols = []protocol.ID{v1proto}
|
||||
serverProtocols = []protocol.ID{v1proto}
|
||||
|
||||
dht := &IpfsDHT{
|
||||
datastore: cfg.Datastore,
|
||||
self: h.ID(),
|
||||
selfKey: kb.ConvertPeerID(h.ID()),
|
||||
peerstore: h.Peerstore(),
|
||||
host: h,
|
||||
birth: time.Now(),
|
||||
protocols: protocols,
|
||||
serverProtocols: serverProtocols,
|
||||
bucketSize: cfg.BucketSize,
|
||||
alpha: cfg.Concurrency,
|
||||
beta: cfg.Resiliency,
|
||||
lookupCheckCapacity: cfg.LookupCheckConcurrency,
|
||||
queryPeerFilter: cfg.QueryPeerFilter,
|
||||
routingTablePeerFilter: cfg.RoutingTable.PeerFilter,
|
||||
rtPeerDiversityFilter: cfg.RoutingTable.DiversityFilter,
|
||||
addrFilter: cfg.AddressFilter,
|
||||
|
||||
fixLowPeersChan: make(chan struct{}, 1),
|
||||
|
||||
addPeerToRTChan: make(chan peer.ID),
|
||||
refreshFinishedCh: make(chan struct{}),
|
||||
|
||||
enableOptProv: cfg.EnableOptimisticProvide,
|
||||
optProvJobsPool: nil,
|
||||
}
|
||||
|
||||
var maxLastSuccessfulOutboundThreshold time.Duration
|
||||
|
||||
// The threshold is calculated based on the expected amount of time that should pass before we
|
||||
// query a peer as part of our refresh cycle.
|
||||
// To grok the Math Wizardy that produced these exact equations, please be patient as a document explaining it will
|
||||
// be published soon.
|
||||
if cfg.Concurrency < cfg.BucketSize { // (alpha < K)
|
||||
l1 := math.Log(float64(1) / float64(cfg.BucketSize)) // (Log(1/K))
|
||||
l2 := math.Log(float64(1) - (float64(cfg.Concurrency) / float64(cfg.BucketSize))) // Log(1 - (alpha / K))
|
||||
maxLastSuccessfulOutboundThreshold = time.Duration(l1 / l2 * float64(cfg.RoutingTable.RefreshInterval))
|
||||
} else {
|
||||
maxLastSuccessfulOutboundThreshold = cfg.RoutingTable.RefreshInterval
|
||||
}
|
||||
|
||||
// construct routing table
|
||||
// use twice the theoritical usefulness threhold to keep older peers around longer
|
||||
rt, err := makeRoutingTable(dht, cfg, 2*maxLastSuccessfulOutboundThreshold)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct routing table,err=%s", err)
|
||||
}
|
||||
dht.routingTable = rt
|
||||
dht.bootstrapPeers = cfg.BootstrapPeers
|
||||
|
||||
dht.lookupCheckTimeout = cfg.RoutingTable.RefreshQueryTimeout
|
||||
|
||||
// init network size estimator
|
||||
dht.nsEstimator = netsize.NewEstimator(h.ID(), rt, cfg.BucketSize)
|
||||
|
||||
if dht.enableOptProv {
|
||||
dht.optProvJobsPool = make(chan struct{}, cfg.OptimisticProvideJobsPoolSize)
|
||||
}
|
||||
|
||||
// rt refresh manager
|
||||
dht.rtRefreshManager, err = makeRtRefreshManager(dht, cfg, maxLastSuccessfulOutboundThreshold)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct RT Refresh Manager,err=%s", err)
|
||||
}
|
||||
|
||||
// create a tagged context derived from the original context
|
||||
// the DHT context should be done when the process is closed
|
||||
dht.ctx, dht.cancel = context.WithCancel(dht.newContextWithLocalTags(context.Background()))
|
||||
|
||||
if cfg.ProviderStore != nil {
|
||||
dht.providerStore = cfg.ProviderStore
|
||||
} else {
|
||||
dht.providerStore, err = providers.NewProviderManager(h.ID(), dht.peerstore, cfg.Datastore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing default provider manager (%v)", err)
|
||||
}
|
||||
}
|
||||
|
||||
dht.rtFreezeTimeout = rtFreezeTimeout
|
||||
|
||||
return dht, nil
|
||||
}
|
||||
|
||||
// lookupCheck performs a lookup request to a remote peer.ID, verifying that it is able to
|
||||
// answer it correctly
|
||||
func (dht *IpfsDHT) lookupCheck(ctx context.Context, p peer.ID) error {
|
||||
// lookup request to p requesting for its own peer.ID
|
||||
peerids, err := dht.protoMessenger.GetClosestPeers(ctx, p, p)
|
||||
// p is expected to return at least 1 peer id, unless our routing table has
|
||||
// less than bucketSize peers, in which case we aren't picky about who we
|
||||
// add to the routing table.
|
||||
if err == nil && len(peerids) == 0 && dht.routingTable.Size() >= dht.bucketSize {
|
||||
return fmt.Errorf("peer %s failed to return its closest peers, got %d", p, len(peerids))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func makeRtRefreshManager(dht *IpfsDHT, cfg dhtcfg.Config, maxLastSuccessfulOutboundThreshold time.Duration) (*rtrefresh.RtRefreshManager, error) {
|
||||
keyGenFnc := func(cpl uint) (string, error) {
|
||||
p, err := dht.routingTable.GenRandPeerID(cpl)
|
||||
return string(p), err
|
||||
}
|
||||
|
||||
queryFnc := func(ctx context.Context, key string) error {
|
||||
_, err := dht.GetClosestPeers(ctx, key)
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := rtrefresh.NewRtRefreshManager(
|
||||
dht.host, dht.routingTable, cfg.RoutingTable.AutoRefresh,
|
||||
keyGenFnc,
|
||||
queryFnc,
|
||||
dht.lookupCheck,
|
||||
cfg.RoutingTable.RefreshQueryTimeout,
|
||||
cfg.RoutingTable.RefreshInterval,
|
||||
maxLastSuccessfulOutboundThreshold,
|
||||
dht.refreshFinishedCh)
|
||||
|
||||
return r, err
|
||||
}
|
||||
|
||||
func makeRoutingTable(dht *IpfsDHT, cfg dhtcfg.Config, maxLastSuccessfulOutboundThreshold time.Duration) (*kb.RoutingTable, error) {
|
||||
// make a Routing Table Diversity Filter
|
||||
var filter *peerdiversity.Filter
|
||||
if dht.rtPeerDiversityFilter != nil {
|
||||
df, err := peerdiversity.NewFilter(dht.rtPeerDiversityFilter, "rt/diversity", func(p peer.ID) int {
|
||||
return kb.CommonPrefixLen(dht.selfKey, kb.ConvertPeerID(p))
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct peer diversity filter: %w", err)
|
||||
}
|
||||
|
||||
filter = df
|
||||
}
|
||||
|
||||
rt, err := kb.NewRoutingTable(cfg.BucketSize, dht.selfKey, time.Minute, dht.host.Peerstore(), maxLastSuccessfulOutboundThreshold, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmgr := dht.host.ConnManager()
|
||||
|
||||
rt.PeerAdded = func(p peer.ID) {
|
||||
commonPrefixLen := kb.CommonPrefixLen(dht.selfKey, kb.ConvertPeerID(p))
|
||||
if commonPrefixLen < protectedBuckets {
|
||||
cmgr.Protect(p, kbucketTag)
|
||||
} else {
|
||||
cmgr.TagPeer(p, kbucketTag, baseConnMgrScore)
|
||||
}
|
||||
}
|
||||
rt.PeerRemoved = func(p peer.ID) {
|
||||
cmgr.Unprotect(p, kbucketTag)
|
||||
cmgr.UntagPeer(p, kbucketTag)
|
||||
|
||||
// try to fix the RT
|
||||
dht.fixRTIfNeeded()
|
||||
}
|
||||
|
||||
return rt, err
|
||||
}
|
||||
|
||||
// ProviderStore returns the provider storage object for storing and retrieving provider records.
|
||||
func (dht *IpfsDHT) ProviderStore() providers.ProviderStore {
|
||||
return dht.providerStore
|
||||
}
|
||||
|
||||
// GetRoutingTableDiversityStats returns the diversity stats for the Routing Table.
|
||||
func (dht *IpfsDHT) GetRoutingTableDiversityStats() []peerdiversity.CplDiversityStats {
|
||||
return dht.routingTable.GetDiversityStats()
|
||||
}
|
||||
|
||||
// Mode allows introspection of the operation mode of the DHT
|
||||
func (dht *IpfsDHT) Mode() ModeOpt {
|
||||
return dht.auto
|
||||
}
|
||||
|
||||
// runFixLowPeersLoop manages simultaneous requests to fixLowPeers
|
||||
func (dht *IpfsDHT) runFixLowPeersLoop() {
|
||||
dht.wg.Add(1)
|
||||
go func() {
|
||||
defer dht.wg.Done()
|
||||
|
||||
dht.fixLowPeers()
|
||||
|
||||
ticker := time.NewTicker(periodicBootstrapInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-dht.fixLowPeersChan:
|
||||
case <-ticker.C:
|
||||
case <-dht.ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
dht.fixLowPeers()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// fixLowPeers tries to get more peers into the routing table if we're below the threshold
|
||||
func (dht *IpfsDHT) fixLowPeers() {
|
||||
if dht.routingTable.Size() > minRTRefreshThreshold {
|
||||
return
|
||||
}
|
||||
|
||||
// we try to add all peers we are connected to to the Routing Table
|
||||
// in case they aren't already there.
|
||||
for _, p := range dht.host.Network().Peers() {
|
||||
dht.peerFound(p)
|
||||
}
|
||||
|
||||
// TODO Active Bootstrapping
|
||||
// We should first use non-bootstrap peers we knew of from previous
|
||||
// snapshots of the Routing Table before we connect to the bootstrappers.
|
||||
// See https://github.com/libp2p/go-libp2p-kad-dht/issues/387.
|
||||
if dht.routingTable.Size() == 0 && dht.bootstrapPeers != nil {
|
||||
bootstrapPeers := dht.bootstrapPeers()
|
||||
if len(bootstrapPeers) == 0 {
|
||||
// No point in continuing, we have no peers!
|
||||
return
|
||||
}
|
||||
|
||||
found := 0
|
||||
for _, i := range rand.Perm(len(bootstrapPeers)) {
|
||||
ai := bootstrapPeers[i]
|
||||
err := dht.Host().Connect(dht.ctx, ai)
|
||||
if err == nil {
|
||||
found++
|
||||
} else {
|
||||
logger.Warnw("failed to bootstrap", "peer", ai.ID, "error", err)
|
||||
}
|
||||
|
||||
// Wait for two bootstrap peers, or try them all.
|
||||
//
|
||||
// Why two? In theory, one should be enough
|
||||
// normally. However, if the network were to
|
||||
// restart and everyone connected to just one
|
||||
// bootstrapper, we'll end up with a mostly
|
||||
// partitioned network.
|
||||
//
|
||||
// So we always bootstrap with two random peers.
|
||||
if found == maxNBoostrappers {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we still don't have peers in our routing table(probably because Identify hasn't completed),
|
||||
// there is no point in triggering a Refresh.
|
||||
if dht.routingTable.Size() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if dht.autoRefresh {
|
||||
dht.rtRefreshManager.RefreshNoWait()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO This is hacky, horrible and the programmer needs to have his mother called a hamster.
|
||||
// SHOULD be removed once https://github.com/libp2p/go-libp2p/issues/800 goes in.
|
||||
func (dht *IpfsDHT) persistRTPeersInPeerStore() {
|
||||
tickr := time.NewTicker(peerstore.RecentlyConnectedAddrTTL / 3)
|
||||
defer tickr.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tickr.C:
|
||||
ps := dht.routingTable.ListPeers()
|
||||
for _, p := range ps {
|
||||
dht.peerstore.UpdateAddrs(p, peerstore.RecentlyConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL)
|
||||
}
|
||||
case <-dht.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getLocal attempts to retrieve the value from the datastore.
|
||||
//
|
||||
// returns nil, nil when either nothing is found or the value found doesn't properly validate.
|
||||
// returns nil, some_error when there's a *datastore* error (i.e., something goes very wrong)
|
||||
func (dht *IpfsDHT) getLocal(ctx context.Context, key string) (*recpb.Record, error) {
|
||||
logger.Debugw("finding value in datastore", "key", internal.LoggableRecordKeyString(key))
|
||||
|
||||
rec, err := dht.getRecordFromDatastore(ctx, mkDsKey(key))
|
||||
if err != nil {
|
||||
logger.Warnw("get local failed", "key", internal.LoggableRecordKeyString(key), "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Double check the key. Can't hurt.
|
||||
if rec != nil && string(rec.GetKey()) != key {
|
||||
logger.Errorw("BUG: found a DHT record that didn't match it's key", "expected", internal.LoggableRecordKeyString(key), "got", rec.GetKey())
|
||||
return nil, nil
|
||||
|
||||
}
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
// putLocal stores the key value pair in the datastore
|
||||
func (dht *IpfsDHT) putLocal(ctx context.Context, key string, rec *recpb.Record) error {
|
||||
data, err := proto.Marshal(rec)
|
||||
if err != nil {
|
||||
logger.Warnw("failed to put marshal record for local put", "error", err, "key", internal.LoggableRecordKeyString(key))
|
||||
return err
|
||||
}
|
||||
|
||||
return dht.datastore.Put(ctx, mkDsKey(key), data)
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) rtPeerLoop() {
|
||||
dht.wg.Add(1)
|
||||
go func() {
|
||||
defer dht.wg.Done()
|
||||
|
||||
var bootstrapCount uint
|
||||
var isBootsrapping bool
|
||||
var timerCh <-chan time.Time
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timerCh:
|
||||
dht.routingTable.MarkAllPeersIrreplaceable()
|
||||
case p := <-dht.addPeerToRTChan:
|
||||
if dht.routingTable.Size() == 0 {
|
||||
isBootsrapping = true
|
||||
bootstrapCount = 0
|
||||
timerCh = nil
|
||||
}
|
||||
// queryPeer set to true as we only try to add queried peers to the RT
|
||||
newlyAdded, err := dht.routingTable.TryAddPeer(p, true, isBootsrapping)
|
||||
if err != nil {
|
||||
// peer not added.
|
||||
continue
|
||||
}
|
||||
if newlyAdded {
|
||||
// peer was added to the RT, it can now be fixed if needed.
|
||||
dht.fixRTIfNeeded()
|
||||
} else {
|
||||
// the peer is already in our RT, but we just successfully queried it and so let's give it a
|
||||
// bump on the query time so we don't ping it too soon for a liveliness check.
|
||||
dht.routingTable.UpdateLastSuccessfulOutboundQueryAt(p, time.Now())
|
||||
}
|
||||
case <-dht.refreshFinishedCh:
|
||||
bootstrapCount = bootstrapCount + 1
|
||||
if bootstrapCount == 2 {
|
||||
timerCh = time.NewTimer(dht.rtFreezeTimeout).C
|
||||
}
|
||||
|
||||
old := isBootsrapping
|
||||
isBootsrapping = false
|
||||
if old {
|
||||
dht.rtRefreshManager.RefreshNoWait()
|
||||
}
|
||||
|
||||
case <-dht.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// peerFound verifies whether the found peer advertises DHT protocols
|
||||
// and probe it to make sure it answers DHT queries as expected. If
|
||||
// it fails to answer, it isn't added to the routingTable.
|
||||
func (dht *IpfsDHT) peerFound(p peer.ID) {
|
||||
// if the peer is already in the routing table or the appropriate bucket is
|
||||
// already full, don't try to add the new peer.ID
|
||||
if !dht.routingTable.UsefulNewPeer(p) {
|
||||
return
|
||||
}
|
||||
|
||||
// verify whether the remote peer advertises the right dht protocol
|
||||
b, err := dht.validRTPeer(p)
|
||||
if err != nil {
|
||||
logger.Errorw("failed to validate if peer is a DHT peer", "peer", p, "error", err)
|
||||
} else if b {
|
||||
|
||||
// check if the maximal number of concurrent lookup checks is reached
|
||||
dht.lookupChecksLk.Lock()
|
||||
if dht.lookupCheckCapacity == 0 {
|
||||
dht.lookupChecksLk.Unlock()
|
||||
// drop the new peer.ID if the maximal number of concurrent lookup
|
||||
// checks is reached
|
||||
return
|
||||
}
|
||||
dht.lookupCheckCapacity--
|
||||
dht.lookupChecksLk.Unlock()
|
||||
|
||||
go func() {
|
||||
livelinessCtx, cancel := context.WithTimeout(dht.ctx, dht.lookupCheckTimeout)
|
||||
defer cancel()
|
||||
|
||||
// performing a FIND_NODE query
|
||||
err := dht.lookupCheck(livelinessCtx, p)
|
||||
|
||||
dht.lookupChecksLk.Lock()
|
||||
dht.lookupCheckCapacity++
|
||||
dht.lookupChecksLk.Unlock()
|
||||
|
||||
if err != nil {
|
||||
logger.Debugw("connected peer not answering DHT request as expected", "peer", p, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
// if the FIND_NODE succeeded, the peer is considered as valid
|
||||
dht.validPeerFound(p)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// validPeerFound signals the routingTable that we've found a peer that
|
||||
// supports the DHT protocol, and just answered correctly to a DHT FindPeers
|
||||
func (dht *IpfsDHT) validPeerFound(p peer.ID) {
|
||||
if c := baseLogger.Check(zap.DebugLevel, "peer found"); c != nil {
|
||||
c.Write(zap.String("peer", p.String()))
|
||||
}
|
||||
|
||||
select {
|
||||
case dht.addPeerToRTChan <- p:
|
||||
case <-dht.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// peerStoppedDHT signals the routing table that a peer is unable to responsd to DHT queries anymore.
|
||||
func (dht *IpfsDHT) peerStoppedDHT(p peer.ID) {
|
||||
logger.Debugw("peer stopped dht", "peer", p)
|
||||
// A peer that does not support the DHT protocol is dead for us.
|
||||
// There's no point in talking to anymore till it starts supporting the DHT protocol again.
|
||||
dht.routingTable.RemovePeer(p)
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) fixRTIfNeeded() {
|
||||
select {
|
||||
case dht.fixLowPeersChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// FindLocal looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in.
|
||||
func (dht *IpfsDHT) FindLocal(ctx context.Context, id peer.ID) peer.AddrInfo {
|
||||
_, span := internal.StartSpan(ctx, "IpfsDHT.FindLocal", trace.WithAttributes(attribute.Stringer("PeerID", id)))
|
||||
defer span.End()
|
||||
|
||||
switch dht.host.Network().Connectedness(id) {
|
||||
case network.Connected, network.CanConnect:
|
||||
return dht.peerstore.PeerInfo(id)
|
||||
default:
|
||||
return peer.AddrInfo{}
|
||||
}
|
||||
}
|
||||
|
||||
// nearestPeersToQuery returns the routing tables closest peers.
|
||||
func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.ID {
|
||||
closer := dht.routingTable.NearestPeers(kb.ConvertKey(string(pmes.GetKey())), count)
|
||||
return closer
|
||||
}
|
||||
|
||||
// betterPeersToQuery returns nearestPeersToQuery with some additional filtering
|
||||
func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, from peer.ID, count int) []peer.ID {
|
||||
closer := dht.nearestPeersToQuery(pmes, count)
|
||||
|
||||
// no node? nil
|
||||
if closer == nil {
|
||||
logger.Infow("no closer peers to send", from)
|
||||
return nil
|
||||
}
|
||||
|
||||
filtered := make([]peer.ID, 0, len(closer))
|
||||
for _, clp := range closer {
|
||||
|
||||
// == to self? thats bad
|
||||
if clp == dht.self {
|
||||
logger.Error("BUG betterPeersToQuery: attempted to return self! this shouldn't happen...")
|
||||
return nil
|
||||
}
|
||||
// Dont send a peer back themselves
|
||||
if clp == from {
|
||||
continue
|
||||
}
|
||||
|
||||
filtered = append(filtered, clp)
|
||||
}
|
||||
|
||||
// ok seems like closer nodes
|
||||
return filtered
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) setMode(m mode) error {
|
||||
dht.modeLk.Lock()
|
||||
defer dht.modeLk.Unlock()
|
||||
|
||||
if m == dht.mode {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch m {
|
||||
case modeServer:
|
||||
return dht.moveToServerMode()
|
||||
case modeClient:
|
||||
return dht.moveToClientMode()
|
||||
default:
|
||||
return fmt.Errorf("unrecognized dht mode: %d", m)
|
||||
}
|
||||
}
|
||||
|
||||
// moveToServerMode advertises (via libp2p identify updates) that we are able to respond to DHT queries and sets the appropriate stream handlers.
|
||||
// Note: We may support responding to queries with protocols aside from our primary ones in order to support
|
||||
// interoperability with older versions of the DHT protocol.
|
||||
func (dht *IpfsDHT) moveToServerMode() error {
|
||||
dht.mode = modeServer
|
||||
for _, p := range dht.serverProtocols {
|
||||
dht.host.SetStreamHandler(p, dht.handleNewStream)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// moveToClientMode stops advertising (and rescinds advertisements via libp2p identify updates) that we are able to
|
||||
// respond to DHT queries and removes the appropriate stream handlers. We also kill all inbound streams that were
|
||||
// utilizing the handled protocols.
|
||||
// Note: We may support responding to queries with protocols aside from our primary ones in order to support
|
||||
// interoperability with older versions of the DHT protocol.
|
||||
func (dht *IpfsDHT) moveToClientMode() error {
|
||||
dht.mode = modeClient
|
||||
for _, p := range dht.serverProtocols {
|
||||
dht.host.RemoveStreamHandler(p)
|
||||
}
|
||||
|
||||
pset := make(map[protocol.ID]bool)
|
||||
for _, p := range dht.serverProtocols {
|
||||
pset[p] = true
|
||||
}
|
||||
|
||||
for _, c := range dht.host.Network().Conns() {
|
||||
for _, s := range c.GetStreams() {
|
||||
if pset[s.Protocol()] {
|
||||
if s.Stat().Direction == network.DirInbound {
|
||||
_ = s.Reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) getMode() mode {
|
||||
dht.modeLk.Lock()
|
||||
defer dht.modeLk.Unlock()
|
||||
return dht.mode
|
||||
}
|
||||
|
||||
// Context returns the DHT's context.
|
||||
func (dht *IpfsDHT) Context() context.Context {
|
||||
return dht.ctx
|
||||
}
|
||||
|
||||
// RoutingTable returns the DHT's routingTable.
|
||||
func (dht *IpfsDHT) RoutingTable() *kb.RoutingTable {
|
||||
return dht.routingTable
|
||||
}
|
||||
|
||||
// Close calls Process Close.
|
||||
func (dht *IpfsDHT) Close() error {
|
||||
dht.cancel()
|
||||
dht.wg.Wait()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
closes := [...]func() error{
|
||||
dht.rtRefreshManager.Close,
|
||||
dht.providerStore.Close,
|
||||
}
|
||||
var errors [len(closes)]error
|
||||
wg.Add(len(errors))
|
||||
for i, c := range closes {
|
||||
go func(i int, c func() error) {
|
||||
defer wg.Done()
|
||||
errors[i] = c()
|
||||
}(i, c)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return multierr.Combine(errors[:]...)
|
||||
}
|
||||
|
||||
func mkDsKey(s string) ds.Key {
|
||||
return ds.NewKey(base32.RawStdEncoding.EncodeToString([]byte(s)))
|
||||
}
|
||||
|
||||
// PeerID returns the DHT node's Peer ID.
|
||||
func (dht *IpfsDHT) PeerID() peer.ID {
|
||||
return dht.self
|
||||
}
|
||||
|
||||
// PeerKey returns a DHT key, converted from the DHT node's Peer ID.
|
||||
func (dht *IpfsDHT) PeerKey() []byte {
|
||||
return kb.ConvertPeerID(dht.self)
|
||||
}
|
||||
|
||||
// Host returns the libp2p host this DHT is operating with.
|
||||
func (dht *IpfsDHT) Host() host.Host {
|
||||
return dht.host
|
||||
}
|
||||
|
||||
// Ping sends a ping message to the passed peer and waits for a response.
|
||||
func (dht *IpfsDHT) Ping(ctx context.Context, p peer.ID) error {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.Ping", trace.WithAttributes(attribute.Stringer("PeerID", p)))
|
||||
defer span.End()
|
||||
return dht.protoMessenger.Ping(ctx, p)
|
||||
}
|
||||
|
||||
// NetworkSize returns the most recent estimation of the DHT network size.
|
||||
// EXPERIMENTAL: We do not provide any guarantees that this method will
|
||||
// continue to exist in the codebase. Use it at your own risk.
|
||||
func (dht *IpfsDHT) NetworkSize() (int32, error) {
|
||||
return dht.nsEstimator.NetworkSize()
|
||||
}
|
||||
|
||||
// newContextWithLocalTags returns a new context.Context with the InstanceID and
|
||||
// PeerID keys populated. It will also take any extra tags that need adding to
|
||||
// the context as tag.Mutators.
|
||||
func (dht *IpfsDHT) newContextWithLocalTags(ctx context.Context, extraTags ...tag.Mutator) context.Context {
|
||||
extraTags = append(
|
||||
extraTags,
|
||||
tag.Upsert(metrics.KeyPeerID, dht.self.String()),
|
||||
tag.Upsert(metrics.KeyInstanceID, fmt.Sprintf("%p", dht)),
|
||||
)
|
||||
ctx, _ = tag.New(
|
||||
ctx,
|
||||
extraTags...,
|
||||
) // ignoring error as it is unrelated to the actual function of this code.
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) maybeAddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
||||
// Don't add addresses for self or our connected peers. We have better ones.
|
||||
if p == dht.self || dht.host.Network().Connectedness(p) == network.Connected {
|
||||
return
|
||||
}
|
||||
dht.peerstore.AddAddrs(p, dht.filterAddrs(addrs), ttl)
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) filterAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
if f := dht.addrFilter; f != nil {
|
||||
return f(addrs)
|
||||
}
|
||||
return addrs
|
||||
}
|
84
go-libp2p-kad-dht/dht_bootstrap.go
Normal file
84
go-libp2p-kad-dht/dht_bootstrap.go
Normal file
@ -0,0 +1,84 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// DefaultBootstrapPeers is a set of public DHT bootstrap peers provided by libp2p.
|
||||
var DefaultBootstrapPeers []multiaddr.Multiaddr
|
||||
|
||||
// Minimum number of peers in the routing table. If we drop below this and we
|
||||
// see a new peer, we trigger a bootstrap round.
|
||||
var minRTRefreshThreshold = 10
|
||||
|
||||
const (
|
||||
periodicBootstrapInterval = 2 * time.Minute
|
||||
maxNBoostrappers = 2
|
||||
)
|
||||
|
||||
func init() {
|
||||
for _, s := range []string{
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
|
||||
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
|
||||
} {
|
||||
ma, err := multiaddr.NewMultiaddr(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
DefaultBootstrapPeers = append(DefaultBootstrapPeers, ma)
|
||||
}
|
||||
}
|
||||
|
||||
// GetDefaultBootstrapPeerAddrInfos returns the peer.AddrInfos for the default
|
||||
// bootstrap peers so we can use these for initializing the DHT by passing these to the
|
||||
// BootstrapPeers(...) option.
|
||||
func GetDefaultBootstrapPeerAddrInfos() []peer.AddrInfo {
|
||||
ds := make([]peer.AddrInfo, 0, len(DefaultBootstrapPeers))
|
||||
|
||||
for i := range DefaultBootstrapPeers {
|
||||
info, err := peer.AddrInfoFromP2pAddr(DefaultBootstrapPeers[i])
|
||||
if err != nil {
|
||||
logger.Errorw("failed to convert bootstrapper address to peer addr info", "address",
|
||||
DefaultBootstrapPeers[i].String(), err, "err")
|
||||
continue
|
||||
}
|
||||
ds = append(ds, *info)
|
||||
}
|
||||
return ds
|
||||
}
|
||||
|
||||
// Bootstrap tells the DHT to get into a bootstrapped state satisfying the
|
||||
// IpfsRouter interface.
|
||||
func (dht *IpfsDHT) Bootstrap(ctx context.Context) (err error) {
|
||||
_, end := tracer.Bootstrap(dhtName, ctx)
|
||||
defer func() { end(err) }()
|
||||
|
||||
dht.fixRTIfNeeded()
|
||||
dht.rtRefreshManager.RefreshNoWait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshRoutingTable tells the DHT to refresh it's routing tables.
|
||||
//
|
||||
// The returned channel will block until the refresh finishes, then yield the
|
||||
// error and close. The channel is buffered and safe to ignore.
|
||||
func (dht *IpfsDHT) RefreshRoutingTable() <-chan error {
|
||||
return dht.rtRefreshManager.Refresh(false)
|
||||
}
|
||||
|
||||
// ForceRefresh acts like RefreshRoutingTable but forces the DHT to refresh all
|
||||
// buckets in the Routing Table irrespective of when they were last refreshed.
|
||||
//
|
||||
// The returned channel will block until the refresh finishes, then yield the
|
||||
// error and close. The channel is buffered and safe to ignore.
|
||||
func (dht *IpfsDHT) ForceRefresh() <-chan error {
|
||||
return dht.rtRefreshManager.Refresh(true)
|
||||
}
|
201
go-libp2p-kad-dht/dht_bootstrap_test.go
Normal file
201
go-libp2p-kad-dht/dht_bootstrap_test.go
Normal file
@ -0,0 +1,201 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSelfWalkOnAddressChange(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// create three DHT instances with auto refresh disabled
|
||||
d1 := setupDHT(ctx, t, false, DisableAutoRefresh(), forceAddressUpdateProcessing(t))
|
||||
d2 := setupDHT(ctx, t, false, DisableAutoRefresh())
|
||||
d3 := setupDHT(ctx, t, false, DisableAutoRefresh())
|
||||
|
||||
var connectedTo *IpfsDHT
|
||||
// connect d1 to whoever is "further"
|
||||
if kb.CommonPrefixLen(kb.ConvertPeerID(d1.self), kb.ConvertPeerID(d2.self)) <=
|
||||
kb.CommonPrefixLen(kb.ConvertPeerID(d1.self), kb.ConvertPeerID(d3.self)) {
|
||||
connect(t, ctx, d1, d3)
|
||||
connectedTo = d3
|
||||
} else {
|
||||
connect(t, ctx, d1, d2)
|
||||
connectedTo = d2
|
||||
}
|
||||
|
||||
// then connect d2 AND d3
|
||||
connect(t, ctx, d2, d3)
|
||||
|
||||
// d1 should have ONLY 1 peer in it's RT
|
||||
waitForWellFormedTables(t, []*IpfsDHT{d1}, 1, 1, 2*time.Second)
|
||||
require.Equal(t, connectedTo.self, d1.routingTable.ListPeers()[0])
|
||||
|
||||
// now emit the address change event
|
||||
em, err := d1.host.EventBus().Emitter(&event.EvtLocalAddressesUpdated{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, em.Emit(event.EvtLocalAddressesUpdated{}))
|
||||
waitForWellFormedTables(t, []*IpfsDHT{d1}, 2, 2, 2*time.Second)
|
||||
// it should now have both peers in the RT
|
||||
ps := d1.routingTable.ListPeers()
|
||||
require.Contains(t, ps, d2.self)
|
||||
require.Contains(t, ps, d3.self)
|
||||
}
|
||||
|
||||
func TestDefaultBootstrappers(t *testing.T) {
|
||||
ds := GetDefaultBootstrapPeerAddrInfos()
|
||||
require.NotEmpty(t, ds)
|
||||
require.Len(t, ds, len(DefaultBootstrapPeers))
|
||||
|
||||
dfmap := make(map[peer.ID]peer.AddrInfo)
|
||||
for _, p := range DefaultBootstrapPeers {
|
||||
info, err := peer.AddrInfoFromP2pAddr(p)
|
||||
require.NoError(t, err)
|
||||
dfmap[info.ID] = *info
|
||||
}
|
||||
|
||||
for _, p := range ds {
|
||||
inf, ok := dfmap[p.ID]
|
||||
require.True(t, ok)
|
||||
require.ElementsMatch(t, p.Addrs, inf.Addrs)
|
||||
delete(dfmap, p.ID)
|
||||
}
|
||||
require.Empty(t, dfmap)
|
||||
}
|
||||
|
||||
func TestBootstrappersReplacable(t *testing.T) {
|
||||
old := rtFreezeTimeout
|
||||
rtFreezeTimeout = 100 * time.Millisecond
|
||||
defer func() {
|
||||
rtFreezeTimeout = old
|
||||
}()
|
||||
ctx := context.Background()
|
||||
d := setupDHT(ctx, t, false, disableFixLowPeersRoutine(t), BucketSize(2))
|
||||
defer d.host.Close()
|
||||
defer d.Close()
|
||||
|
||||
var d1 *IpfsDHT
|
||||
var d2 *IpfsDHT
|
||||
|
||||
// d1 & d2 have a cpl of 0
|
||||
for {
|
||||
d1 = setupDHT(ctx, t, false, disableFixLowPeersRoutine(t))
|
||||
if kb.CommonPrefixLen(d.selfKey, d1.selfKey) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
d2 = setupDHT(ctx, t, false, disableFixLowPeersRoutine(t))
|
||||
if kb.CommonPrefixLen(d.selfKey, d2.selfKey) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
defer d1.host.Close()
|
||||
defer d1.Close()
|
||||
|
||||
defer d2.host.Close()
|
||||
defer d2.Close()
|
||||
|
||||
connect(t, ctx, d, d1)
|
||||
connect(t, ctx, d, d2)
|
||||
require.Len(t, d.routingTable.ListPeers(), 2)
|
||||
|
||||
// d3 & d4 with cpl=0 will go in as d1 & d2 are replacable.
|
||||
var d3 *IpfsDHT
|
||||
var d4 *IpfsDHT
|
||||
|
||||
for {
|
||||
d3 = setupDHT(ctx, t, false, disableFixLowPeersRoutine(t))
|
||||
if kb.CommonPrefixLen(d.selfKey, d3.selfKey) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
d4 = setupDHT(ctx, t, false, disableFixLowPeersRoutine(t))
|
||||
if kb.CommonPrefixLen(d.selfKey, d4.selfKey) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
defer d3.host.Close()
|
||||
defer d3.Close()
|
||||
defer d4.host.Close()
|
||||
defer d4.Close()
|
||||
|
||||
connect(t, ctx, d, d3)
|
||||
connect(t, ctx, d, d4)
|
||||
require.Len(t, d.routingTable.ListPeers(), 2)
|
||||
require.Contains(t, d.routingTable.ListPeers(), d3.self)
|
||||
require.Contains(t, d.routingTable.ListPeers(), d4.self)
|
||||
|
||||
// do couple of refreshes and wait for the Routing Table to be "frozen".
|
||||
<-d.RefreshRoutingTable()
|
||||
<-d.RefreshRoutingTable()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// adding d5 fails because RT is frozen
|
||||
var d5 *IpfsDHT
|
||||
for {
|
||||
d5 = setupDHT(ctx, t, false, disableFixLowPeersRoutine(t))
|
||||
if kb.CommonPrefixLen(d.selfKey, d5.selfKey) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
defer d5.host.Close()
|
||||
defer d5.Close()
|
||||
|
||||
connectNoSync(t, ctx, d, d5)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
require.Len(t, d.routingTable.ListPeers(), 2)
|
||||
require.Contains(t, d.routingTable.ListPeers(), d3.self)
|
||||
require.Contains(t, d.routingTable.ListPeers(), d4.self)
|
||||
|
||||
// Let's empty the routing table
|
||||
for _, p := range d.routingTable.ListPeers() {
|
||||
d.routingTable.RemovePeer(p)
|
||||
}
|
||||
require.Len(t, d.routingTable.ListPeers(), 0)
|
||||
|
||||
// adding d1 & d2 works now because there is space in the Routing Table
|
||||
require.NoError(t, d.host.Network().ClosePeer(d1.self))
|
||||
require.NoError(t, d.host.Network().ClosePeer(d2.self))
|
||||
connect(t, ctx, d, d1)
|
||||
connect(t, ctx, d, d2)
|
||||
require.Len(t, d.routingTable.ListPeers(), 2)
|
||||
require.Contains(t, d.routingTable.ListPeers(), d1.self)
|
||||
require.Contains(t, d.routingTable.ListPeers(), d2.self)
|
||||
|
||||
// adding d3 & d4 also works because the RT is not frozen.
|
||||
require.NoError(t, d.host.Network().ClosePeer(d3.self))
|
||||
require.NoError(t, d.host.Network().ClosePeer(d4.self))
|
||||
connect(t, ctx, d, d3)
|
||||
connect(t, ctx, d, d4)
|
||||
require.Len(t, d.routingTable.ListPeers(), 2)
|
||||
require.Contains(t, d.routingTable.ListPeers(), d3.self)
|
||||
require.Contains(t, d.routingTable.ListPeers(), d4.self)
|
||||
|
||||
// run refreshes and freeze the RT
|
||||
<-d.RefreshRoutingTable()
|
||||
<-d.RefreshRoutingTable()
|
||||
time.Sleep(1 * time.Second)
|
||||
// cant add d1 & d5 because RT is frozen.
|
||||
require.NoError(t, d.host.Network().ClosePeer(d1.self))
|
||||
require.NoError(t, d.host.Network().ClosePeer(d5.self))
|
||||
connectNoSync(t, ctx, d, d1)
|
||||
connectNoSync(t, ctx, d, d5)
|
||||
d.peerFound(d5.self)
|
||||
d.peerFound(d1.self)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
require.Len(t, d.routingTable.ListPeers(), 2)
|
||||
require.Contains(t, d.routingTable.ListPeers(), d3.self)
|
||||
require.Contains(t, d.routingTable.ListPeers(), d4.self)
|
||||
}
|
243
go-libp2p-kad-dht/dht_filters.go
Normal file
243
go-libp2p-kad-dht/dht_filters.go
Normal file
@ -0,0 +1,243 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/google/gopacket/routing"
|
||||
netroute "github.com/libp2p/go-netroute"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
|
||||
dhtcfg "github.com/libp2p/go-libp2p-kad-dht/internal/config"
|
||||
)
|
||||
|
||||
// QueryFilterFunc is a filter applied when considering peers to dial when querying
|
||||
type QueryFilterFunc = dhtcfg.QueryFilterFunc
|
||||
|
||||
// RouteTableFilterFunc is a filter applied when considering connections to keep in
|
||||
// the local route table.
|
||||
type RouteTableFilterFunc = dhtcfg.RouteTableFilterFunc
|
||||
|
||||
var publicCIDR6 = "2000::/3"
|
||||
var public6 *net.IPNet
|
||||
|
||||
func init() {
|
||||
_, public6, _ = net.ParseCIDR(publicCIDR6)
|
||||
}
|
||||
|
||||
// isPublicAddr follows the logic of manet.IsPublicAddr, except it uses
|
||||
// a stricter definition of "public" for ipv6: namely "is it in 2000::/3"?
|
||||
func isPublicAddr(a ma.Multiaddr) bool {
|
||||
ip, err := manet.ToIP(a)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if ip.To4() != nil {
|
||||
return !inAddrRange(ip, manet.Private4) && !inAddrRange(ip, manet.Unroutable4)
|
||||
}
|
||||
|
||||
return public6.Contains(ip)
|
||||
}
|
||||
|
||||
// isPrivateAddr follows the logic of manet.IsPrivateAddr, except that
|
||||
// it uses a stricter definition of "public" for ipv6
|
||||
func isPrivateAddr(a ma.Multiaddr) bool {
|
||||
ip, err := manet.ToIP(a)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if ip.To4() != nil {
|
||||
return inAddrRange(ip, manet.Private4)
|
||||
}
|
||||
|
||||
return !public6.Contains(ip) && !inAddrRange(ip, manet.Unroutable6)
|
||||
}
|
||||
|
||||
// PublicQueryFilter returns true if the peer is suspected of being publicly accessible
|
||||
func PublicQueryFilter(_ interface{}, ai peer.AddrInfo) bool {
|
||||
if len(ai.Addrs) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hasPublicAddr bool
|
||||
for _, a := range ai.Addrs {
|
||||
if !isRelayAddr(a) && isPublicAddr(a) {
|
||||
hasPublicAddr = true
|
||||
}
|
||||
}
|
||||
return hasPublicAddr
|
||||
}
|
||||
|
||||
type hasHost interface {
|
||||
Host() host.Host
|
||||
}
|
||||
|
||||
var _ QueryFilterFunc = PublicQueryFilter
|
||||
|
||||
// PublicRoutingTableFilter allows a peer to be added to the routing table if the connections to that peer indicate
|
||||
// that it is on a public network
|
||||
func PublicRoutingTableFilter(dht interface{}, p peer.ID) bool {
|
||||
d := dht.(hasHost)
|
||||
|
||||
conns := d.Host().Network().ConnsToPeer(p)
|
||||
if len(conns) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Do we have a public address for this peer?
|
||||
id := conns[0].RemotePeer()
|
||||
known := d.Host().Peerstore().PeerInfo(id)
|
||||
for _, a := range known.Addrs {
|
||||
if !isRelayAddr(a) && isPublicAddr(a) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
var _ RouteTableFilterFunc = PublicRoutingTableFilter
|
||||
|
||||
// PrivateQueryFilter doens't currently restrict which peers we are willing to query from the local DHT.
|
||||
func PrivateQueryFilter(_ interface{}, ai peer.AddrInfo) bool {
|
||||
return len(ai.Addrs) > 0
|
||||
}
|
||||
|
||||
var _ QueryFilterFunc = PrivateQueryFilter
|
||||
|
||||
// We call this very frequently but routes can technically change at runtime.
|
||||
// Cache it for two minutes.
|
||||
const routerCacheTime = 2 * time.Minute
|
||||
|
||||
var routerCache struct {
|
||||
sync.RWMutex
|
||||
router routing.Router
|
||||
expires time.Time
|
||||
}
|
||||
|
||||
func getCachedRouter() routing.Router {
|
||||
routerCache.RLock()
|
||||
router := routerCache.router
|
||||
expires := routerCache.expires
|
||||
routerCache.RUnlock()
|
||||
|
||||
if time.Now().Before(expires) {
|
||||
return router
|
||||
}
|
||||
|
||||
routerCache.Lock()
|
||||
defer routerCache.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
if now.Before(routerCache.expires) {
|
||||
return router
|
||||
}
|
||||
routerCache.router, _ = netroute.New()
|
||||
routerCache.expires = now.Add(routerCacheTime)
|
||||
return router
|
||||
}
|
||||
|
||||
// PrivateRoutingTableFilter allows a peer to be added to the routing table if the connections to that peer indicate
|
||||
// that it is on a private network
|
||||
func PrivateRoutingTableFilter(dht interface{}, p peer.ID) bool {
|
||||
d := dht.(hasHost)
|
||||
conns := d.Host().Network().ConnsToPeer(p)
|
||||
return privRTFilter(d, conns)
|
||||
}
|
||||
|
||||
func privRTFilter(dht interface{}, conns []network.Conn) bool {
|
||||
d := dht.(hasHost)
|
||||
h := d.Host()
|
||||
|
||||
router := getCachedRouter()
|
||||
myAdvertisedIPs := make([]net.IP, 0)
|
||||
for _, a := range h.Addrs() {
|
||||
if isPublicAddr(a) && !isRelayAddr(a) {
|
||||
ip, err := manet.ToIP(a)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
myAdvertisedIPs = append(myAdvertisedIPs, ip)
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range conns {
|
||||
ra := c.RemoteMultiaddr()
|
||||
if isPrivateAddr(ra) && !isRelayAddr(ra) {
|
||||
return true
|
||||
}
|
||||
|
||||
if isPublicAddr(ra) {
|
||||
ip, err := manet.ToIP(ra)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// if the ip is the same as one of the local host's public advertised IPs - then consider it local
|
||||
for _, i := range myAdvertisedIPs {
|
||||
if i.Equal(ip) {
|
||||
return true
|
||||
}
|
||||
if ip.To4() == nil {
|
||||
if i.To4() == nil && isEUI(ip) && sameV6Net(i, ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if there's no gateway - a direct host in the OS routing table - then consider it local
|
||||
// This is relevant in particular to ipv6 networks where the addresses may all be public,
|
||||
// but the nodes are aware of direct links between each other.
|
||||
if router != nil {
|
||||
_, gw, _, err := router.Route(ip)
|
||||
if gw == nil && err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
var _ RouteTableFilterFunc = PrivateRoutingTableFilter
|
||||
|
||||
func isEUI(ip net.IP) bool {
|
||||
// per rfc 2373
|
||||
return len(ip) == net.IPv6len && ip[11] == 0xff && ip[12] == 0xfe
|
||||
}
|
||||
|
||||
func sameV6Net(a, b net.IP) bool {
|
||||
//lint:ignore SA1021 We're comparing only parts of the IP address here.
|
||||
return len(a) == net.IPv6len && len(b) == net.IPv6len && bytes.Equal(a[0:8], b[0:8]) //nolint
|
||||
}
|
||||
|
||||
func isRelayAddr(a ma.Multiaddr) bool {
|
||||
found := false
|
||||
ma.ForEach(a, func(c ma.Component, e error) bool {
|
||||
if e != nil {
|
||||
return false
|
||||
}
|
||||
found = c.Protocol().Code == ma.P_CIRCUIT
|
||||
return !found
|
||||
})
|
||||
return found
|
||||
}
|
||||
|
||||
func inAddrRange(ip net.IP, ipnets []*net.IPNet) bool {
|
||||
for _, ipnet := range ipnets {
|
||||
if ipnet.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
79
go-libp2p-kad-dht/dht_filters_test.go
Normal file
79
go-libp2p-kad-dht/dht_filters_test.go
Normal file
@ -0,0 +1,79 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
ic "github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
func TestIsRelay(t *testing.T) {
|
||||
a, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/5002/p2p/QmdPU7PfRyKehdrP5A3WqmjyD6bhVpU1mLGKppa2FjGDjZ/p2p-circuit/p2p/QmVT6GYwjeeAF5TR485Yc58S3xRF5EFsZ5YAF4VcP3URHt")
|
||||
if !isRelayAddr(a) {
|
||||
t.Fatalf("thought %s was not a relay", a)
|
||||
}
|
||||
a, _ = ma.NewMultiaddr("/p2p-circuit/p2p/QmVT6GYwjeeAF5TR485Yc58S3xRF5EFsZ5YAF4VcP3URHt")
|
||||
if !isRelayAddr(a) {
|
||||
t.Fatalf("thought %s was not a relay", a)
|
||||
}
|
||||
a, _ = ma.NewMultiaddr("/ip4/127.0.0.1/tcp/5002/p2p/QmdPU7PfRyKehdrP5A3WqmjyD6bhVpU1mLGKppa2FjGDjZ")
|
||||
if isRelayAddr(a) {
|
||||
t.Fatalf("thought %s was a relay", a)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type mockConn struct {
|
||||
local peer.AddrInfo
|
||||
remote peer.AddrInfo
|
||||
|
||||
isClosed atomic.Bool
|
||||
}
|
||||
|
||||
var _ network.Conn = (*mockConn)(nil)
|
||||
|
||||
func (m *mockConn) ID() string { return "0" }
|
||||
func (m *mockConn) Close() error {
|
||||
m.isClosed.Store(true)
|
||||
return nil
|
||||
}
|
||||
func (m *mockConn) NewStream(context.Context) (network.Stream, error) { return nil, nil }
|
||||
func (m *mockConn) GetStreams() []network.Stream { return []network.Stream{} }
|
||||
func (m *mockConn) Stat() network.ConnStats {
|
||||
return network.ConnStats{Stats: network.Stats{Direction: network.DirOutbound}}
|
||||
}
|
||||
func (m *mockConn) Scope() network.ConnScope { return &network.NullScope{} }
|
||||
func (m *mockConn) LocalMultiaddr() ma.Multiaddr { return m.local.Addrs[0] }
|
||||
func (m *mockConn) RemoteMultiaddr() ma.Multiaddr { return m.remote.Addrs[0] }
|
||||
func (m *mockConn) LocalPeer() peer.ID { return m.local.ID }
|
||||
func (m *mockConn) LocalPrivateKey() ic.PrivKey { return nil }
|
||||
func (m *mockConn) RemotePeer() peer.ID { return m.remote.ID }
|
||||
func (m *mockConn) RemotePublicKey() ic.PubKey { return nil }
|
||||
func (m *mockConn) ConnState() network.ConnectionState { return network.ConnectionState{} }
|
||||
func (m *mockConn) IsClosed() bool { return m.isClosed.Load() }
|
||||
|
||||
func TestFilterCaching(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
d := setupDHT(ctx, t, true)
|
||||
|
||||
remote, _ := manet.FromIP(net.IPv4(8, 8, 8, 8))
|
||||
if privRTFilter(d, []network.Conn{&mockConn{
|
||||
local: d.Host().Peerstore().PeerInfo(d.Host().ID()),
|
||||
remote: peer.AddrInfo{ID: "", Addrs: []ma.Multiaddr{remote}},
|
||||
}}) {
|
||||
t.Fatal("filter should prevent public remote peers.")
|
||||
}
|
||||
|
||||
r1 := getCachedRouter()
|
||||
r2 := getCachedRouter()
|
||||
if r1 != r2 {
|
||||
t.Fatal("router should be returned multiple times.")
|
||||
}
|
||||
}
|
166
go-libp2p-kad-dht/dht_net.go
Normal file
166
go-libp2p-kad-dht/dht_net.go
Normal file
@ -0,0 +1,166 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal/net"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/metrics"
|
||||
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
||||
|
||||
"github.com/libp2p/go-msgio"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/tag"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var dhtStreamIdleTimeout = 1 * time.Minute
|
||||
|
||||
// ErrReadTimeout is an error that occurs when no message is read within the timeout period.
|
||||
var ErrReadTimeout = net.ErrReadTimeout
|
||||
|
||||
// handleNewStream implements the network.StreamHandler
|
||||
func (dht *IpfsDHT) handleNewStream(s network.Stream) {
|
||||
if dht.handleNewMessage(s) {
|
||||
// If we exited without error, close gracefully.
|
||||
_ = s.Close()
|
||||
} else {
|
||||
// otherwise, send an error.
|
||||
_ = s.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true on orderly completion of writes (so we can Close the stream).
|
||||
func (dht *IpfsDHT) handleNewMessage(s network.Stream) bool {
|
||||
ctx := dht.ctx
|
||||
r := msgio.NewVarintReaderSize(s, network.MessageSizeMax)
|
||||
|
||||
mPeer := s.Conn().RemotePeer()
|
||||
|
||||
timer := time.AfterFunc(dhtStreamIdleTimeout, func() { _ = s.Reset() })
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
if dht.getMode() != modeServer {
|
||||
logger.Debugf("ignoring incoming dht message while not in server mode")
|
||||
return false
|
||||
}
|
||||
|
||||
var req pb.Message
|
||||
msgbytes, err := r.ReadMsg()
|
||||
msgLen := len(msgbytes)
|
||||
if err != nil {
|
||||
r.ReleaseMsg(msgbytes)
|
||||
if err == io.EOF {
|
||||
return true
|
||||
}
|
||||
// This string test is necessary because there isn't a single stream reset error
|
||||
// instance in use.
|
||||
if c := baseLogger.Check(zap.DebugLevel, "error reading message"); c != nil && err.Error() != "stream reset" {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
if msgLen > 0 {
|
||||
_ = stats.RecordWithTags(ctx,
|
||||
[]tag.Mutator{tag.Upsert(metrics.KeyMessageType, "UNKNOWN")},
|
||||
metrics.ReceivedMessages.M(1),
|
||||
metrics.ReceivedMessageErrors.M(1),
|
||||
metrics.ReceivedBytes.M(int64(msgLen)),
|
||||
)
|
||||
}
|
||||
return false
|
||||
}
|
||||
err = req.Unmarshal(msgbytes)
|
||||
r.ReleaseMsg(msgbytes)
|
||||
if err != nil {
|
||||
if c := baseLogger.Check(zap.DebugLevel, "error unmarshaling message"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
_ = stats.RecordWithTags(ctx,
|
||||
[]tag.Mutator{tag.Upsert(metrics.KeyMessageType, "UNKNOWN")},
|
||||
metrics.ReceivedMessages.M(1),
|
||||
metrics.ReceivedMessageErrors.M(1),
|
||||
metrics.ReceivedBytes.M(int64(msgLen)),
|
||||
)
|
||||
return false
|
||||
}
|
||||
|
||||
timer.Reset(dhtStreamIdleTimeout)
|
||||
|
||||
startTime := time.Now()
|
||||
ctx, _ := tag.New(ctx,
|
||||
tag.Upsert(metrics.KeyMessageType, req.GetType().String()),
|
||||
)
|
||||
|
||||
stats.Record(ctx,
|
||||
metrics.ReceivedMessages.M(1),
|
||||
metrics.ReceivedBytes.M(int64(msgLen)),
|
||||
)
|
||||
|
||||
handler := dht.handlerForMsgType(req.GetType())
|
||||
if handler == nil {
|
||||
stats.Record(ctx, metrics.ReceivedMessageErrors.M(1))
|
||||
if c := baseLogger.Check(zap.DebugLevel, "can't handle received message"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Int32("type", int32(req.GetType())))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if c := baseLogger.Check(zap.DebugLevel, "handling message"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Int32("type", int32(req.GetType())),
|
||||
zap.Binary("key", req.GetKey()))
|
||||
}
|
||||
resp, err := handler(ctx, mPeer, &req)
|
||||
if err != nil {
|
||||
stats.Record(ctx, metrics.ReceivedMessageErrors.M(1))
|
||||
if c := baseLogger.Check(zap.DebugLevel, "error handling message"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Int32("type", int32(req.GetType())),
|
||||
zap.Binary("key", req.GetKey()),
|
||||
zap.Error(err))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if c := baseLogger.Check(zap.DebugLevel, "handled message"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Int32("type", int32(req.GetType())),
|
||||
zap.Binary("key", req.GetKey()),
|
||||
zap.Duration("time", time.Since(startTime)))
|
||||
}
|
||||
|
||||
if resp == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// send out response msg
|
||||
err = net.WriteMsg(s, resp)
|
||||
if err != nil {
|
||||
stats.Record(ctx, metrics.ReceivedMessageErrors.M(1))
|
||||
if c := baseLogger.Check(zap.DebugLevel, "error writing response"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Int32("type", int32(req.GetType())),
|
||||
zap.Binary("key", req.GetKey()),
|
||||
zap.Error(err))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
elapsedTime := time.Since(startTime)
|
||||
|
||||
if c := baseLogger.Check(zap.DebugLevel, "responded to message"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Int32("type", int32(req.GetType())),
|
||||
zap.Binary("key", req.GetKey()),
|
||||
zap.Duration("time", elapsedTime))
|
||||
}
|
||||
|
||||
latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
|
||||
stats.Record(ctx, metrics.InboundRequestLatency.M(latencyMillis))
|
||||
}
|
||||
}
|
358
go-libp2p-kad-dht/dht_options.go
Normal file
358
go-libp2p-kad-dht/dht_options.go
Normal file
@ -0,0 +1,358 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
dhtcfg "github.com/libp2p/go-libp2p-kad-dht/internal/config"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/providers"
|
||||
"github.com/libp2p/go-libp2p-kbucket/peerdiversity"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// ModeOpt describes what mode the dht should operate in
|
||||
type ModeOpt = dhtcfg.ModeOpt
|
||||
|
||||
const (
|
||||
// ModeAuto utilizes EvtLocalReachabilityChanged events sent over the event bus to dynamically switch the DHT
|
||||
// between Client and Server modes based on network conditions
|
||||
ModeAuto ModeOpt = iota
|
||||
// ModeClient operates the DHT as a client only, it cannot respond to incoming queries
|
||||
ModeClient
|
||||
// ModeServer operates the DHT as a server, it can both send and respond to queries
|
||||
ModeServer
|
||||
// ModeAutoServer operates in the same way as ModeAuto, but acts as a server when reachability is unknown
|
||||
ModeAutoServer
|
||||
)
|
||||
|
||||
// DefaultPrefix is the application specific prefix attached to all DHT protocols by default.
|
||||
const DefaultPrefix protocol.ID = "/ipfs"
|
||||
|
||||
type Option = dhtcfg.Option
|
||||
|
||||
// ProviderStore sets the provider storage manager.
|
||||
func ProviderStore(ps providers.ProviderStore) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.ProviderStore = ps
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RoutingTableLatencyTolerance sets the maximum acceptable latency for peers
|
||||
// in the routing table's cluster.
|
||||
func RoutingTableLatencyTolerance(latency time.Duration) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.RoutingTable.LatencyTolerance = latency
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RoutingTableRefreshQueryTimeout sets the timeout for routing table refresh
|
||||
// queries.
|
||||
func RoutingTableRefreshQueryTimeout(timeout time.Duration) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.RoutingTable.RefreshQueryTimeout = timeout
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RoutingTableRefreshPeriod sets the period for refreshing buckets in the
|
||||
// routing table. The DHT will refresh buckets every period by:
|
||||
//
|
||||
// 1. First searching for nearby peers to figure out how many buckets we should try to fill.
|
||||
// 1. Then searching for a random key in each bucket that hasn't been queried in
|
||||
// the last refresh period.
|
||||
func RoutingTableRefreshPeriod(period time.Duration) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.RoutingTable.RefreshInterval = period
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Datastore configures the DHT to use the specified datastore.
|
||||
//
|
||||
// Defaults to an in-memory (temporary) map.
|
||||
func Datastore(ds ds.Batching) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.Datastore = ds
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Mode configures which mode the DHT operates in (Client, Server, Auto).
|
||||
//
|
||||
// Defaults to ModeAuto.
|
||||
func Mode(m ModeOpt) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.Mode = m
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Validator configures the DHT to use the specified validator.
|
||||
//
|
||||
// Defaults to a namespaced validator that can validate both public key (under the "pk"
|
||||
// namespace) and IPNS records (under the "ipns" namespace). Setting the validator
|
||||
// implies that the user wants to control the validators and therefore the default
|
||||
// public key and IPNS validators will not be added.
|
||||
func Validator(v record.Validator) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.Validator = v
|
||||
c.ValidatorChanged = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NamespacedValidator adds a validator namespaced under `ns`. This option fails
|
||||
// if the DHT is not using a `record.NamespacedValidator` as its validator (it
|
||||
// uses one by default but this can be overridden with the `Validator` option).
|
||||
// Adding a namespaced validator without changing the `Validator` will result in
|
||||
// adding a new validator in addition to the default public key and IPNS validators.
|
||||
// The "pk" and "ipns" namespaces cannot be overridden here unless a new `Validator`
|
||||
// has been set first.
|
||||
//
|
||||
// Example: Given a validator registered as `NamespacedValidator("ipns",
|
||||
// myValidator)`, all records with keys starting with `/ipns/` will be validated
|
||||
// with `myValidator`.
|
||||
func NamespacedValidator(ns string, v record.Validator) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
nsval, ok := c.Validator.(record.NamespacedValidator)
|
||||
if !ok {
|
||||
return fmt.Errorf("can only add namespaced validators to a NamespacedValidator")
|
||||
}
|
||||
nsval[ns] = v
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ProtocolPrefix sets an application specific prefix to be attached to all DHT protocols. For example,
|
||||
// /myapp/kad/1.0.0 instead of /ipfs/kad/1.0.0. Prefix should be of the form /myapp.
|
||||
//
|
||||
// Defaults to dht.DefaultPrefix
|
||||
func ProtocolPrefix(prefix protocol.ID) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.ProtocolPrefix = prefix
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ProtocolExtension adds an application specific protocol to the DHT protocol. For example,
|
||||
// /ipfs/lan/kad/1.0.0 instead of /ipfs/kad/1.0.0. extension should be of the form /lan.
|
||||
func ProtocolExtension(ext protocol.ID) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.ProtocolPrefix += ext
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// V1ProtocolOverride overrides the protocolID used for /kad/1.0.0 with another. This is an
|
||||
// advanced feature, and should only be used to handle legacy networks that have not been
|
||||
// using protocolIDs of the form /app/kad/1.0.0.
|
||||
//
|
||||
// This option will override and ignore the ProtocolPrefix and ProtocolExtension options
|
||||
func V1ProtocolOverride(proto protocol.ID) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.V1ProtocolOverride = proto
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// BucketSize configures the bucket size (k in the Kademlia paper) of the routing table.
|
||||
//
|
||||
// The default value is 20.
|
||||
func BucketSize(bucketSize int) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.BucketSize = bucketSize
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Concurrency configures the number of concurrent requests (alpha in the Kademlia paper) for a given query path.
|
||||
//
|
||||
// The default value is 10.
|
||||
func Concurrency(alpha int) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.Concurrency = alpha
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Resiliency configures the number of peers closest to a target that must have responded in order for a given query
|
||||
// path to complete.
|
||||
//
|
||||
// The default value is 3.
|
||||
func Resiliency(beta int) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.Resiliency = beta
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// LookupInterval configures maximal number of go routines that can be used to
|
||||
// perform a lookup check operation, before adding a new node to the routing table.
|
||||
func LookupCheckConcurrency(n int) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.LookupCheckConcurrency = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// MaxRecordAge specifies the maximum time that any node will hold onto a record ("PutValue record")
|
||||
// from the time its received. This does not apply to any other forms of validity that
|
||||
// the record may contain.
|
||||
// For example, a record may contain an ipns entry with an EOL saying its valid
|
||||
// until the year 2020 (a great time in the future). For that record to stick around
|
||||
// it must be rebroadcasted more frequently than once every 'MaxRecordAge'
|
||||
func MaxRecordAge(maxAge time.Duration) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.MaxRecordAge = maxAge
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DisableAutoRefresh completely disables 'auto-refresh' on the DHT routing
|
||||
// table. This means that we will neither refresh the routing table periodically
|
||||
// nor when the routing table size goes below the minimum threshold.
|
||||
func DisableAutoRefresh() Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.RoutingTable.AutoRefresh = false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DisableProviders disables storing and retrieving provider records.
|
||||
//
|
||||
// Defaults to enabled.
|
||||
//
|
||||
// WARNING: do not change this unless you're using a forked DHT (i.e., a private
|
||||
// network and/or distinct DHT protocols with the `Protocols` option).
|
||||
func DisableProviders() Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.EnableProviders = false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DisableValues disables storing and retrieving value records (including
|
||||
// public keys).
|
||||
//
|
||||
// Defaults to enabled.
|
||||
//
|
||||
// WARNING: do not change this unless you're using a forked DHT (i.e., a private
|
||||
// network and/or distinct DHT protocols with the `Protocols` option).
|
||||
func DisableValues() Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.EnableValues = false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// QueryFilter sets a function that approves which peers may be dialed in a query
|
||||
func QueryFilter(filter QueryFilterFunc) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.QueryPeerFilter = filter
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RoutingTableFilter sets a function that approves which peers may be added to the routing table. The host should
|
||||
// already have at least one connection to the peer under consideration.
|
||||
func RoutingTableFilter(filter RouteTableFilterFunc) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.RoutingTable.PeerFilter = filter
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// BootstrapPeers configures the bootstrapping nodes that we will connect to to seed
|
||||
// and refresh our Routing Table if it becomes empty.
|
||||
func BootstrapPeers(bootstrappers ...peer.AddrInfo) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.BootstrapPeers = func() []peer.AddrInfo {
|
||||
return bootstrappers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// BootstrapPeersFunc configures the function that returns the bootstrapping nodes that we will
|
||||
// connect to to seed and refresh our Routing Table if it becomes empty.
|
||||
func BootstrapPeersFunc(getBootstrapPeers func() []peer.AddrInfo) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.BootstrapPeers = getBootstrapPeers
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RoutingTablePeerDiversityFilter configures the implementation of the `PeerIPGroupFilter` that will be used
|
||||
// to construct the diversity filter for the Routing Table.
|
||||
// Please see the docs for `peerdiversity.PeerIPGroupFilter` AND `peerdiversity.Filter` for more details.
|
||||
func RoutingTablePeerDiversityFilter(pg peerdiversity.PeerIPGroupFilter) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.RoutingTable.DiversityFilter = pg
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// disableFixLowPeersRoutine disables the "fixLowPeers" routine in the DHT.
|
||||
// This is ONLY for tests.
|
||||
func disableFixLowPeersRoutine(t *testing.T) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.DisableFixLowPeers = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// forceAddressUpdateProcessing forces the DHT to handle changes to the hosts addresses.
|
||||
// This occurs even when AutoRefresh has been disabled.
|
||||
// This is ONLY for tests.
|
||||
func forceAddressUpdateProcessing(t *testing.T) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.TestAddressUpdateProcessing = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// EnableOptimisticProvide enables an optimization that skips the last hops of the provide process.
|
||||
// This works by using the network size estimator (which uses the keyspace density of queries)
|
||||
// to optimistically send ADD_PROVIDER requests when we most likely have found the last hop.
|
||||
// It will also run some ADD_PROVIDER requests asynchronously in the background after returning,
|
||||
// this allows to optimistically return earlier if some threshold number of RPCs have succeeded.
|
||||
// The number of background/in-flight queries can be configured with the OptimisticProvideJobsPoolSize
|
||||
// option.
|
||||
//
|
||||
// EXPERIMENTAL: This is an experimental option and might be removed in the future. Use at your own risk.
|
||||
func EnableOptimisticProvide() Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.EnableOptimisticProvide = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptimisticProvideJobsPoolSize allows to configure the asynchronicity limit for in-flight ADD_PROVIDER RPCs.
|
||||
// It makes sense to set it to a multiple of optProvReturnRatio * BucketSize. Check the description of
|
||||
// EnableOptimisticProvide for more details.
|
||||
//
|
||||
// EXPERIMENTAL: This is an experimental option and might be removed in the future. Use at your own risk.
|
||||
func OptimisticProvideJobsPoolSize(size int) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.OptimisticProvideJobsPoolSize = size
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// AddressFilter allows to configure the address filtering function.
|
||||
// This function is run before addresses are added to the peerstore.
|
||||
// It is most useful to avoid adding localhost / local addresses.
|
||||
func AddressFilter(f func([]ma.Multiaddr) []ma.Multiaddr) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.AddressFilter = f
|
||||
return nil
|
||||
}
|
||||
}
|
2478
go-libp2p-kad-dht/dht_test.go
Normal file
2478
go-libp2p-kad-dht/dht_test.go
Normal file
File diff suppressed because it is too large
Load Diff
3
go-libp2p-kad-dht/doc.go
Normal file
3
go-libp2p-kad-dht/doc.go
Normal file
@ -0,0 +1,3 @@
|
||||
// Package dht implements a distributed hash table that satisfies the ipfs routing
|
||||
// interface. This DHT is modeled after kademlia with S/Kademlia modifications.
|
||||
package dht
|
394
go-libp2p-kad-dht/dual/dual.go
Normal file
394
go-libp2p-kad-dht/dual/dual.go
Normal file
@ -0,0 +1,394 @@
|
||||
// Package dual provides an implementation of a split or "dual" dht, where two parallel instances
|
||||
// are maintained for the global internet and the local LAN respectively.
|
||||
package dual
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
dht "github.com/libp2p/go-libp2p-kad-dht"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
"github.com/libp2p/go-libp2p-routing-helpers/tracing"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p-kbucket/peerdiversity"
|
||||
helper "github.com/libp2p/go-libp2p-routing-helpers"
|
||||
ci "github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
)
|
||||
|
||||
const tracer = tracing.Tracer("go-libp2p-kad-dht/dual")
|
||||
const dualName = "Dual"
|
||||
|
||||
// DHT implements the routing interface to provide two concrete DHT implementationts for use
|
||||
// in IPFS that are used to support both global network users and disjoint LAN usecases.
|
||||
type DHT struct {
|
||||
WAN *dht.IpfsDHT
|
||||
LAN *dht.IpfsDHT
|
||||
}
|
||||
|
||||
// LanExtension is used to differentiate local protocol requests from those on the WAN DHT.
|
||||
const LanExtension protocol.ID = "/lan"
|
||||
|
||||
// Assert that IPFS assumptions about interfaces aren't broken. These aren't a
|
||||
// guarantee, but we can use them to aid refactoring.
|
||||
var (
|
||||
_ routing.ContentRouting = (*DHT)(nil)
|
||||
_ routing.Routing = (*DHT)(nil)
|
||||
_ routing.PeerRouting = (*DHT)(nil)
|
||||
_ routing.PubKeyFetcher = (*DHT)(nil)
|
||||
_ routing.ValueStore = (*DHT)(nil)
|
||||
)
|
||||
|
||||
var (
|
||||
maxPrefixCountPerCpl = 2
|
||||
maxPrefixCount = 3
|
||||
)
|
||||
|
||||
type config struct {
|
||||
wan, lan []dht.Option
|
||||
}
|
||||
|
||||
func (cfg *config) apply(opts ...Option) error {
|
||||
for i, o := range opts {
|
||||
if err := o(cfg); err != nil {
|
||||
return fmt.Errorf("dual dht option %d failed: %w", i, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Option is an option used to configure the Dual DHT.
|
||||
type Option func(*config) error
|
||||
|
||||
// WanDHTOption applies the given DHT options to the WAN DHT.
|
||||
func WanDHTOption(opts ...dht.Option) Option {
|
||||
return func(c *config) error {
|
||||
c.wan = append(c.wan, opts...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// LanDHTOption applies the given DHT options to the LAN DHT.
|
||||
func LanDHTOption(opts ...dht.Option) Option {
|
||||
return func(c *config) error {
|
||||
c.lan = append(c.lan, opts...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DHTOption applies the given DHT options to both the WAN and the LAN DHTs.
|
||||
func DHTOption(opts ...dht.Option) Option {
|
||||
return func(c *config) error {
|
||||
c.lan = append(c.lan, opts...)
|
||||
c.wan = append(c.wan, opts...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new DualDHT instance. Options provided are forwarded on to the two concrete
|
||||
// IpfsDHT internal constructions, modulo additional options used by the Dual DHT to enforce
|
||||
// the LAN-vs-WAN distinction.
|
||||
// Note: query or routing table functional options provided as arguments to this function
|
||||
// will be overriden by this constructor.
|
||||
func New(ctx context.Context, h host.Host, options ...Option) (*DHT, error) {
|
||||
var cfg config
|
||||
err := cfg.apply(
|
||||
WanDHTOption(
|
||||
dht.QueryFilter(dht.PublicQueryFilter),
|
||||
dht.RoutingTableFilter(dht.PublicRoutingTableFilter),
|
||||
dht.RoutingTablePeerDiversityFilter(dht.NewRTPeerDiversityFilter(h, maxPrefixCountPerCpl, maxPrefixCount)),
|
||||
// filter out all private addresses
|
||||
dht.AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
return ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool { is, err := manet.IsPublicAddr(a); return is && err == nil })
|
||||
}),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = cfg.apply(
|
||||
LanDHTOption(
|
||||
dht.ProtocolExtension(LanExtension),
|
||||
dht.QueryFilter(dht.PrivateQueryFilter),
|
||||
dht.RoutingTableFilter(dht.PrivateRoutingTableFilter),
|
||||
// filter out localhost IP addresses
|
||||
dht.AddressFilter(func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
return ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool { return !manet.IsIPLoopback(a) })
|
||||
}),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = cfg.apply(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wan, err := dht.New(ctx, h, cfg.wan...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unless overridden by user supplied options, the LAN DHT should default
|
||||
// to 'AutoServer' mode.
|
||||
if wan.Mode() != dht.ModeClient {
|
||||
cfg.lan = append(cfg.lan, dht.Mode(dht.ModeServer))
|
||||
}
|
||||
lan, err := dht.New(ctx, h, cfg.lan...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
impl := DHT{wan, lan}
|
||||
return &impl, nil
|
||||
}
|
||||
|
||||
// Close closes the DHT context.
|
||||
func (dht *DHT) Close() error {
|
||||
return combineErrors(dht.WAN.Close(), dht.LAN.Close())
|
||||
}
|
||||
|
||||
// WANActive returns true when the WAN DHT is active (has peers).
|
||||
func (dht *DHT) WANActive() bool {
|
||||
return dht.WAN.RoutingTable().Size() > 0
|
||||
}
|
||||
|
||||
// Provide adds the given cid to the content routing system.
|
||||
func (dht *DHT) Provide(ctx context.Context, key cid.Cid, announce bool) (err error) {
|
||||
ctx, end := tracer.Provide(dualName, ctx, key, announce)
|
||||
defer func() { end(err) }()
|
||||
|
||||
if dht.WANActive() {
|
||||
return dht.WAN.Provide(ctx, key, announce)
|
||||
}
|
||||
return dht.LAN.Provide(ctx, key, announce)
|
||||
}
|
||||
|
||||
// GetRoutingTableDiversityStats fetches the Routing Table Diversity Stats.
|
||||
func (dht *DHT) GetRoutingTableDiversityStats() []peerdiversity.CplDiversityStats {
|
||||
if dht.WANActive() {
|
||||
return dht.WAN.GetRoutingTableDiversityStats()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindProvidersAsync searches for peers who are able to provide a given key
|
||||
func (dht *DHT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) (ch <-chan peer.AddrInfo) {
|
||||
ctx, end := tracer.FindProvidersAsync(dualName, ctx, key, count)
|
||||
defer func() { ch = end(ch, nil) }()
|
||||
|
||||
reqCtx, cancel := context.WithCancel(ctx)
|
||||
outCh := make(chan peer.AddrInfo)
|
||||
|
||||
// Register for and merge query events if we care about them.
|
||||
subCtx := reqCtx
|
||||
var evtCh <-chan *routing.QueryEvent
|
||||
if routing.SubscribesToQueryEvents(ctx) {
|
||||
subCtx, evtCh = routing.RegisterForQueryEvents(reqCtx)
|
||||
}
|
||||
|
||||
subCtx, span := internal.StartSpan(subCtx, "Dual.worker")
|
||||
wanCh := dht.WAN.FindProvidersAsync(subCtx, key, count)
|
||||
lanCh := dht.LAN.FindProvidersAsync(subCtx, key, count)
|
||||
zeroCount := (count == 0)
|
||||
go func() {
|
||||
defer span.End()
|
||||
|
||||
defer cancel()
|
||||
defer close(outCh)
|
||||
|
||||
found := make(map[peer.ID]struct{}, count)
|
||||
var pi peer.AddrInfo
|
||||
var qEv *routing.QueryEvent
|
||||
for (zeroCount || count > 0) && (wanCh != nil || lanCh != nil) {
|
||||
var ok bool
|
||||
select {
|
||||
case qEv, ok = <-evtCh:
|
||||
if !ok {
|
||||
evtCh = nil
|
||||
} else if qEv != nil && qEv.Type != routing.QueryError {
|
||||
routing.PublishQueryEvent(reqCtx, qEv)
|
||||
}
|
||||
continue
|
||||
case pi, ok = <-wanCh:
|
||||
if !ok {
|
||||
span.AddEvent("wan finished")
|
||||
wanCh = nil
|
||||
continue
|
||||
}
|
||||
case pi, ok = <-lanCh:
|
||||
if !ok {
|
||||
span.AddEvent("lan finished")
|
||||
lanCh = nil
|
||||
continue
|
||||
}
|
||||
}
|
||||
// already found
|
||||
if _, ok = found[pi.ID]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case outCh <- pi:
|
||||
found[pi.ID] = struct{}{}
|
||||
count--
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
if qEv != nil && qEv.Type == routing.QueryError && len(found) == 0 {
|
||||
routing.PublishQueryEvent(reqCtx, qEv)
|
||||
}
|
||||
}()
|
||||
return outCh
|
||||
}
|
||||
|
||||
// FindPeer searches for a peer with given ID
|
||||
// Note: with signed peer records, we can change this to short circuit once either DHT returns.
|
||||
func (dht *DHT) FindPeer(ctx context.Context, pid peer.ID) (pi peer.AddrInfo, err error) {
|
||||
ctx, end := tracer.FindPeer(dualName, ctx, pid)
|
||||
defer func() { end(pi, err) }()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
var wanInfo, lanInfo peer.AddrInfo
|
||||
var wanErr, lanErr error
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
wanInfo, wanErr = dht.WAN.FindPeer(ctx, pid)
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
lanInfo, lanErr = dht.LAN.FindPeer(ctx, pid)
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Combine addresses. Try to avoid doing unnecessary work while we're at
|
||||
// it. Note: We're ignoring the errors for now as many of our DHT
|
||||
// commands can return both a result and an error.
|
||||
ai := peer.AddrInfo{ID: pid}
|
||||
if len(wanInfo.Addrs) == 0 {
|
||||
ai.Addrs = lanInfo.Addrs
|
||||
} else if len(lanInfo.Addrs) == 0 {
|
||||
ai.Addrs = wanInfo.Addrs
|
||||
} else {
|
||||
// combine addresses
|
||||
deduped := make(map[string]ma.Multiaddr, len(wanInfo.Addrs)+len(lanInfo.Addrs))
|
||||
for _, addr := range wanInfo.Addrs {
|
||||
deduped[string(addr.Bytes())] = addr
|
||||
}
|
||||
for _, addr := range lanInfo.Addrs {
|
||||
deduped[string(addr.Bytes())] = addr
|
||||
}
|
||||
ai.Addrs = make([]ma.Multiaddr, 0, len(deduped))
|
||||
for _, addr := range deduped {
|
||||
ai.Addrs = append(ai.Addrs, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// If one of the commands succeeded, don't return an error.
|
||||
if wanErr == nil || lanErr == nil {
|
||||
return ai, nil
|
||||
}
|
||||
|
||||
// Otherwise, return what we have _and_ return the error.
|
||||
return ai, combineErrors(wanErr, lanErr)
|
||||
}
|
||||
|
||||
func combineErrors(erra, errb error) error {
|
||||
// if the errors are the same, just return one.
|
||||
if erra == errb {
|
||||
return erra
|
||||
}
|
||||
|
||||
// If one of the errors is a kb lookup failure (no peers in routing
|
||||
// table), return the other.
|
||||
if erra == kb.ErrLookupFailure {
|
||||
return errb
|
||||
} else if errb == kb.ErrLookupFailure {
|
||||
return erra
|
||||
}
|
||||
return multierror.Append(erra, errb).ErrorOrNil()
|
||||
}
|
||||
|
||||
// Bootstrap allows callers to hint to the routing system to get into a
|
||||
// Boostrapped state and remain there.
|
||||
func (dht *DHT) Bootstrap(ctx context.Context) (err error) {
|
||||
ctx, end := tracer.Bootstrap(dualName, ctx)
|
||||
defer func() { end(err) }()
|
||||
|
||||
erra := dht.WAN.Bootstrap(ctx)
|
||||
errb := dht.LAN.Bootstrap(ctx)
|
||||
return combineErrors(erra, errb)
|
||||
}
|
||||
|
||||
// PutValue adds value corresponding to given Key.
|
||||
func (dht *DHT) PutValue(ctx context.Context, key string, val []byte, opts ...routing.Option) (err error) {
|
||||
ctx, end := tracer.PutValue(dualName, ctx, key, val, opts...)
|
||||
defer func() { end(err) }()
|
||||
|
||||
if dht.WANActive() {
|
||||
return dht.WAN.PutValue(ctx, key, val, opts...)
|
||||
}
|
||||
return dht.LAN.PutValue(ctx, key, val, opts...)
|
||||
}
|
||||
|
||||
// GetValue searches for the value corresponding to given Key.
|
||||
func (d *DHT) GetValue(ctx context.Context, key string, opts ...routing.Option) (result []byte, err error) {
|
||||
ctx, end := tracer.GetValue(dualName, ctx, key, opts...)
|
||||
defer func() { end(result, err) }()
|
||||
|
||||
lanCtx, cancelLan := context.WithCancel(ctx)
|
||||
defer cancelLan()
|
||||
|
||||
var (
|
||||
lanVal []byte
|
||||
lanErr error
|
||||
lanWaiter sync.WaitGroup
|
||||
)
|
||||
lanWaiter.Add(1)
|
||||
go func() {
|
||||
defer lanWaiter.Done()
|
||||
lanVal, lanErr = d.LAN.GetValue(lanCtx, key, opts...)
|
||||
}()
|
||||
|
||||
wanVal, wanErr := d.WAN.GetValue(ctx, key, opts...)
|
||||
if wanErr == nil {
|
||||
cancelLan()
|
||||
}
|
||||
lanWaiter.Wait()
|
||||
if wanErr == nil {
|
||||
return wanVal, nil
|
||||
}
|
||||
if lanErr == nil {
|
||||
return lanVal, nil
|
||||
}
|
||||
return nil, combineErrors(wanErr, lanErr)
|
||||
}
|
||||
|
||||
// SearchValue searches for better values from this value
|
||||
func (dht *DHT) SearchValue(ctx context.Context, key string, opts ...routing.Option) (ch <-chan []byte, err error) {
|
||||
ctx, end := tracer.SearchValue(dualName, ctx, key, opts...)
|
||||
defer func() { ch, err = end(ch, err) }()
|
||||
|
||||
p := helper.Parallel{Routers: []routing.Routing{dht.WAN, dht.LAN}, Validator: dht.WAN.Validator}
|
||||
return p.SearchValue(ctx, key, opts...)
|
||||
}
|
||||
|
||||
// GetPublicKey returns the public key for the given peer.
|
||||
func (dht *DHT) GetPublicKey(ctx context.Context, pid peer.ID) (ci.PubKey, error) {
|
||||
p := helper.Parallel{Routers: []routing.Routing{dht.WAN, dht.LAN}, Validator: dht.WAN.Validator}
|
||||
return p.GetPublicKey(ctx, pid)
|
||||
}
|
399
go-libp2p-kad-dht/dual/dual_test.go
Normal file
399
go-libp2p-kad-dht/dual/dual_test.go
Normal file
@ -0,0 +1,399 @@
|
||||
package dual
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
u "github.com/ipfs/boxo/util"
|
||||
"github.com/ipfs/go-cid"
|
||||
dht "github.com/libp2p/go-libp2p-kad-dht"
|
||||
test "github.com/libp2p/go-libp2p-kad-dht/internal/testing"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
peerstore "github.com/libp2p/go-libp2p/core/peerstore"
|
||||
bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
|
||||
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var wancid, lancid cid.Cid
|
||||
|
||||
func init() {
|
||||
wancid = cid.NewCidV1(cid.DagCBOR, u.Hash([]byte("wan cid -- value")))
|
||||
lancid = cid.NewCidV1(cid.DagCBOR, u.Hash([]byte("lan cid -- value")))
|
||||
}
|
||||
|
||||
type blankValidator struct{}
|
||||
|
||||
func (blankValidator) Validate(_ string, _ []byte) error { return nil }
|
||||
func (blankValidator) Select(_ string, _ [][]byte) (int, error) { return 0, nil }
|
||||
|
||||
type customRtHelper struct {
|
||||
allow peer.ID
|
||||
}
|
||||
|
||||
func MkFilterForPeer() (func(_ interface{}, p peer.ID) bool, *customRtHelper) {
|
||||
helper := customRtHelper{}
|
||||
|
||||
type hasHost interface {
|
||||
Host() host.Host
|
||||
}
|
||||
|
||||
f := func(dht interface{}, p peer.ID) bool {
|
||||
d := dht.(hasHost)
|
||||
conns := d.Host().Network().ConnsToPeer(p)
|
||||
|
||||
for _, c := range conns {
|
||||
if c.RemotePeer() == helper.allow {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return f, &helper
|
||||
}
|
||||
|
||||
func setupDHTWithFilters(ctx context.Context, t *testing.T, options ...dht.Option) (*DHT, []*customRtHelper) {
|
||||
h, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts))
|
||||
require.NoError(t, err)
|
||||
h.Start()
|
||||
t.Cleanup(func() { h.Close() })
|
||||
|
||||
wanFilter, wanRef := MkFilterForPeer()
|
||||
wanOpts := []dht.Option{
|
||||
dht.NamespacedValidator("v", blankValidator{}),
|
||||
dht.ProtocolPrefix("/test"),
|
||||
dht.DisableAutoRefresh(),
|
||||
dht.RoutingTableFilter(wanFilter),
|
||||
}
|
||||
wan, err := dht.New(ctx, h, wanOpts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
lanFilter, lanRef := MkFilterForPeer()
|
||||
lanOpts := []dht.Option{
|
||||
dht.NamespacedValidator("v", blankValidator{}),
|
||||
dht.ProtocolPrefix("/test"),
|
||||
dht.ProtocolExtension(LanExtension),
|
||||
dht.DisableAutoRefresh(),
|
||||
dht.RoutingTableFilter(lanFilter),
|
||||
dht.Mode(dht.ModeServer),
|
||||
}
|
||||
lan, err := dht.New(ctx, h, lanOpts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
impl := DHT{wan, lan}
|
||||
return &impl, []*customRtHelper{wanRef, lanRef}
|
||||
}
|
||||
|
||||
func setupDHT(ctx context.Context, t *testing.T, options ...dht.Option) *DHT {
|
||||
t.Helper()
|
||||
|
||||
host, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts))
|
||||
require.NoError(t, err)
|
||||
host.Start()
|
||||
t.Cleanup(func() { host.Close() })
|
||||
|
||||
baseOpts := []dht.Option{
|
||||
dht.NamespacedValidator("v", blankValidator{}),
|
||||
dht.ProtocolPrefix("/test"),
|
||||
dht.DisableAutoRefresh(),
|
||||
}
|
||||
|
||||
d, err := New(
|
||||
ctx,
|
||||
host,
|
||||
append([]Option{DHTOption(baseOpts...)}, DHTOption(options...))...,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func connect(ctx context.Context, t *testing.T, a, b *dht.IpfsDHT) {
|
||||
t.Helper()
|
||||
bid := b.PeerID()
|
||||
baddr := b.Host().Peerstore().Addrs(bid)
|
||||
if len(baddr) == 0 {
|
||||
t.Fatal("no addresses for connection.")
|
||||
}
|
||||
a.Host().Peerstore().AddAddrs(bid, baddr, peerstore.TempAddrTTL)
|
||||
if err := a.Host().Connect(ctx, peer.AddrInfo{ID: bid}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wait(ctx, t, a, b)
|
||||
}
|
||||
|
||||
func wait(ctx context.Context, t *testing.T, a, b *dht.IpfsDHT) {
|
||||
t.Helper()
|
||||
for a.RoutingTable().Find(b.PeerID()) == "" {
|
||||
// fmt.Fprintf(os.Stderr, "%v\n", a.RoutingTable().GetPeerInfos())
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal(ctx.Err())
|
||||
case <-time.After(time.Millisecond * 5):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setupTier(ctx context.Context, t *testing.T) (*DHT, *dht.IpfsDHT, *dht.IpfsDHT) {
|
||||
t.Helper()
|
||||
baseOpts := []dht.Option{
|
||||
dht.NamespacedValidator("v", blankValidator{}),
|
||||
dht.ProtocolPrefix("/test"),
|
||||
dht.DisableAutoRefresh(),
|
||||
}
|
||||
|
||||
d, hlprs := setupDHTWithFilters(ctx, t)
|
||||
|
||||
whost, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts))
|
||||
require.NoError(t, err)
|
||||
whost.Start()
|
||||
t.Cleanup(func() { whost.Close() })
|
||||
|
||||
wan, err := dht.New(
|
||||
ctx,
|
||||
whost,
|
||||
append(baseOpts, dht.Mode(dht.ModeServer))...,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hlprs[0].allow = wan.PeerID()
|
||||
connect(ctx, t, d.WAN, wan)
|
||||
|
||||
lhost, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts))
|
||||
require.NoError(t, err)
|
||||
lhost.Start()
|
||||
t.Cleanup(func() { lhost.Close() })
|
||||
|
||||
lan, err := dht.New(
|
||||
ctx,
|
||||
lhost,
|
||||
append(baseOpts, dht.Mode(dht.ModeServer), dht.ProtocolExtension("/lan"))...,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hlprs[1].allow = lan.PeerID()
|
||||
connect(ctx, t, d.LAN, lan)
|
||||
|
||||
return d, wan, lan
|
||||
}
|
||||
|
||||
func TestDualModes(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
d := setupDHT(ctx, t)
|
||||
defer d.Close()
|
||||
|
||||
if d.WAN.Mode() != dht.ModeAuto {
|
||||
t.Fatal("wrong default mode for wan")
|
||||
} else if d.LAN.Mode() != dht.ModeServer {
|
||||
t.Fatal("wrong default mode for lan")
|
||||
}
|
||||
|
||||
d2 := setupDHT(ctx, t, dht.Mode(dht.ModeClient))
|
||||
defer d2.Close()
|
||||
if d2.WAN.Mode() != dht.ModeClient ||
|
||||
d2.LAN.Mode() != dht.ModeClient {
|
||||
t.Fatal("wrong client mode operation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindProviderAsync(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
d, wan, lan := setupTier(ctx, t)
|
||||
defer d.Close()
|
||||
defer wan.Close()
|
||||
defer lan.Close()
|
||||
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
if err := wan.Provide(ctx, wancid, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := lan.Provide(ctx, lancid, true); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wpc := d.FindProvidersAsync(ctx, wancid, 1)
|
||||
select {
|
||||
case p := <-wpc:
|
||||
if p.ID != wan.PeerID() {
|
||||
t.Fatal("wrong wan provider")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatal("find provider timeout.")
|
||||
}
|
||||
|
||||
lpc := d.FindProvidersAsync(ctx, lancid, 1)
|
||||
select {
|
||||
case p := <-lpc:
|
||||
if p.ID != lan.PeerID() {
|
||||
t.Fatal("wrong lan provider")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatal("find provider timeout.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueGetSet(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
d, wan, lan := setupTier(ctx, t)
|
||||
defer d.Close()
|
||||
defer wan.Close()
|
||||
defer lan.Close()
|
||||
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
err := d.PutValue(ctx, "/v/hello", []byte("valid"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
val, err := wan.GetValue(ctx, "/v/hello")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(val) != "valid" {
|
||||
t.Fatal("failed to get expected string.")
|
||||
}
|
||||
|
||||
_, err = lan.GetValue(ctx, "/v/hello")
|
||||
if err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSearchValue(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
d, wan, lan := setupTier(ctx, t)
|
||||
defer d.Close()
|
||||
defer wan.Close()
|
||||
defer lan.Close()
|
||||
|
||||
d.WAN.Validator.(record.NamespacedValidator)["v"] = test.TestValidator{}
|
||||
d.LAN.Validator.(record.NamespacedValidator)["v"] = test.TestValidator{}
|
||||
|
||||
_ = wan.PutValue(ctx, "/v/hello", []byte("valid"))
|
||||
|
||||
valCh, err := d.SearchValue(ctx, "/v/hello", dht.Quorum(0))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case v := <-valCh:
|
||||
if string(v) != "valid" {
|
||||
t.Errorf("expected 'valid', got '%s'", string(v))
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatal(ctx.Err())
|
||||
}
|
||||
|
||||
select {
|
||||
case _, ok := <-valCh:
|
||||
if ok {
|
||||
t.Errorf("chan should close")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatal(ctx.Err())
|
||||
}
|
||||
|
||||
err = lan.PutValue(ctx, "/v/hello", []byte("newer"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
valCh, err = d.SearchValue(ctx, "/v/hello", dht.Quorum(0))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var lastVal []byte
|
||||
for c := range valCh {
|
||||
lastVal = c
|
||||
}
|
||||
if string(lastVal) != "newer" {
|
||||
t.Fatal("incorrect best search value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPublicKey(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
d, wan, lan := setupTier(ctx, t)
|
||||
defer d.Close()
|
||||
defer wan.Close()
|
||||
defer lan.Close()
|
||||
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
pk, err := d.GetPublicKey(ctx, wan.PeerID())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id, err := peer.IDFromPublicKey(pk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if id != wan.PeerID() {
|
||||
t.Fatal("incorrect PK")
|
||||
}
|
||||
|
||||
pk, err = d.GetPublicKey(ctx, lan.PeerID())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id, err = peer.IDFromPublicKey(pk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if id != lan.PeerID() {
|
||||
t.Fatal("incorrect PK")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindPeer(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
d, wan, lan := setupTier(ctx, t)
|
||||
defer d.Close()
|
||||
defer wan.Close()
|
||||
defer lan.Close()
|
||||
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
p, err := d.FindPeer(ctx, lan.PeerID())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertUniqueMultiaddrs(t, p.Addrs)
|
||||
p, err = d.FindPeer(ctx, wan.PeerID())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertUniqueMultiaddrs(t, p.Addrs)
|
||||
}
|
||||
|
||||
func assertUniqueMultiaddrs(t *testing.T, addrs []multiaddr.Multiaddr) {
|
||||
set := make(map[string]bool)
|
||||
for _, addr := range addrs {
|
||||
if set[string(addr.Bytes())] {
|
||||
t.Errorf("duplicate address %s", addr)
|
||||
}
|
||||
set[string(addr.Bytes())] = true
|
||||
}
|
||||
}
|
247
go-libp2p-kad-dht/events.go
Normal file
247
go-libp2p-kad-dht/events.go
Normal file
@ -0,0 +1,247 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
kbucket "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// KeyKadID contains the Kademlia key in string and binary form.
|
||||
type KeyKadID struct {
|
||||
Key string
|
||||
Kad kbucket.ID
|
||||
}
|
||||
|
||||
// NewKeyKadID creates a KeyKadID from a string Kademlia ID.
|
||||
func NewKeyKadID(k string) *KeyKadID {
|
||||
return &KeyKadID{
|
||||
Key: k,
|
||||
Kad: kbucket.ConvertKey(k),
|
||||
}
|
||||
}
|
||||
|
||||
// PeerKadID contains a libp2p Peer ID and a binary Kademlia ID.
|
||||
type PeerKadID struct {
|
||||
Peer peer.ID
|
||||
Kad kbucket.ID
|
||||
}
|
||||
|
||||
// NewPeerKadID creates a PeerKadID from a libp2p Peer ID.
|
||||
func NewPeerKadID(p peer.ID) *PeerKadID {
|
||||
return &PeerKadID{
|
||||
Peer: p,
|
||||
Kad: kbucket.ConvertPeerID(p),
|
||||
}
|
||||
}
|
||||
|
||||
// NewPeerKadIDSlice creates a slice of PeerKadID from the passed slice of libp2p Peer IDs.
|
||||
func NewPeerKadIDSlice(p []peer.ID) []*PeerKadID {
|
||||
r := make([]*PeerKadID, len(p))
|
||||
for i := range p {
|
||||
r[i] = NewPeerKadID(p[i])
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// OptPeerKadID returns a pointer to a PeerKadID or nil if the passed Peer ID is it's default value.
|
||||
func OptPeerKadID(p peer.ID) *PeerKadID {
|
||||
if p == "" {
|
||||
return nil
|
||||
}
|
||||
return NewPeerKadID(p)
|
||||
}
|
||||
|
||||
// NewLookupEvent creates a LookupEvent automatically converting the node
|
||||
// libp2p Peer ID to a PeerKadID and the string Kademlia key to a KeyKadID.
|
||||
func NewLookupEvent(
|
||||
node peer.ID,
|
||||
id uuid.UUID,
|
||||
key string,
|
||||
request *LookupUpdateEvent,
|
||||
response *LookupUpdateEvent,
|
||||
terminate *LookupTerminateEvent,
|
||||
) *LookupEvent {
|
||||
return &LookupEvent{
|
||||
Node: NewPeerKadID(node),
|
||||
ID: id,
|
||||
Key: NewKeyKadID(key),
|
||||
Request: request,
|
||||
Response: response,
|
||||
Terminate: terminate,
|
||||
}
|
||||
}
|
||||
|
||||
// LookupEvent is emitted for every notable event that happens during a DHT lookup.
|
||||
// LookupEvent supports JSON marshalling because all of its fields do, recursively.
|
||||
type LookupEvent struct {
|
||||
// Node is the ID of the node performing the lookup.
|
||||
Node *PeerKadID
|
||||
// ID is a unique identifier for the lookup instance.
|
||||
ID uuid.UUID
|
||||
// Key is the Kademlia key used as a lookup target.
|
||||
Key *KeyKadID
|
||||
// Request, if not nil, describes a state update event, associated with an outgoing query request.
|
||||
Request *LookupUpdateEvent
|
||||
// Response, if not nil, describes a state update event, associated with an outgoing query response.
|
||||
Response *LookupUpdateEvent
|
||||
// Terminate, if not nil, describe a termination event.
|
||||
Terminate *LookupTerminateEvent
|
||||
}
|
||||
|
||||
// NewLookupUpdateEvent creates a new lookup update event, automatically converting the passed peer IDs to peer Kad IDs.
|
||||
func NewLookupUpdateEvent(
|
||||
cause peer.ID,
|
||||
source peer.ID,
|
||||
heard []peer.ID,
|
||||
waiting []peer.ID,
|
||||
queried []peer.ID,
|
||||
unreachable []peer.ID,
|
||||
) *LookupUpdateEvent {
|
||||
return &LookupUpdateEvent{
|
||||
Cause: OptPeerKadID(cause),
|
||||
Source: OptPeerKadID(source),
|
||||
Heard: NewPeerKadIDSlice(heard),
|
||||
Waiting: NewPeerKadIDSlice(waiting),
|
||||
Queried: NewPeerKadIDSlice(queried),
|
||||
Unreachable: NewPeerKadIDSlice(unreachable),
|
||||
}
|
||||
}
|
||||
|
||||
// LookupUpdateEvent describes a lookup state update event.
|
||||
type LookupUpdateEvent struct {
|
||||
// Cause is the peer whose response (or lack of response) caused the update event.
|
||||
// If Cause is nil, this is the first update event in the lookup, caused by the seeding.
|
||||
Cause *PeerKadID
|
||||
// Source is the peer who informed us about the peer IDs in this update (below).
|
||||
Source *PeerKadID
|
||||
// Heard is a set of peers whose state in the lookup's peerset is being set to "heard".
|
||||
Heard []*PeerKadID
|
||||
// Waiting is a set of peers whose state in the lookup's peerset is being set to "waiting".
|
||||
Waiting []*PeerKadID
|
||||
// Queried is a set of peers whose state in the lookup's peerset is being set to "queried".
|
||||
Queried []*PeerKadID
|
||||
// Unreachable is a set of peers whose state in the lookup's peerset is being set to "unreachable".
|
||||
Unreachable []*PeerKadID
|
||||
}
|
||||
|
||||
// LookupTerminateEvent describes a lookup termination event.
|
||||
type LookupTerminateEvent struct {
|
||||
// Reason is the reason for lookup termination.
|
||||
Reason LookupTerminationReason
|
||||
}
|
||||
|
||||
// NewLookupTerminateEvent creates a new lookup termination event with a given reason.
|
||||
func NewLookupTerminateEvent(reason LookupTerminationReason) *LookupTerminateEvent {
|
||||
return &LookupTerminateEvent{Reason: reason}
|
||||
}
|
||||
|
||||
// LookupTerminationReason captures reasons for terminating a lookup.
|
||||
type LookupTerminationReason int
|
||||
|
||||
// MarshalJSON returns the JSON encoding of the passed lookup termination reason.
|
||||
func (r LookupTerminationReason) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(r.String())
|
||||
}
|
||||
|
||||
func (r LookupTerminationReason) String() string {
|
||||
switch r {
|
||||
case LookupStopped:
|
||||
return "stopped"
|
||||
case LookupCancelled:
|
||||
return "cancelled"
|
||||
case LookupStarvation:
|
||||
return "starvation"
|
||||
case LookupCompleted:
|
||||
return "completed"
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
const (
|
||||
// LookupStopped indicates that the lookup was aborted by the user's stopFn.
|
||||
LookupStopped LookupTerminationReason = iota
|
||||
// LookupCancelled indicates that the lookup was aborted by the context.
|
||||
LookupCancelled
|
||||
// LookupStarvation indicates that the lookup terminated due to lack of unqueried peers.
|
||||
LookupStarvation
|
||||
// LookupCompleted indicates that the lookup terminated successfully, reaching the Kademlia end condition.
|
||||
LookupCompleted
|
||||
)
|
||||
|
||||
type routingLookupKey struct{}
|
||||
|
||||
// TODO: lookupEventChannel copies the implementation of eventChanel.
|
||||
// The two should be refactored to use a common event channel implementation.
|
||||
// A common implementation needs to rethink the signature of RegisterForEvents,
|
||||
// because returning a typed channel cannot be made polymorphic without creating
|
||||
// additional "adapter" channels. This will be easier to handle when Go
|
||||
// introduces generics.
|
||||
type lookupEventChannel struct {
|
||||
mu sync.Mutex
|
||||
ctx context.Context
|
||||
ch chan<- *LookupEvent
|
||||
}
|
||||
|
||||
// waitThenClose is spawned in a goroutine when the channel is registered. This
|
||||
// safely cleans up the channel when the context has been canceled.
|
||||
func (e *lookupEventChannel) waitThenClose() {
|
||||
<-e.ctx.Done()
|
||||
e.mu.Lock()
|
||||
close(e.ch)
|
||||
// 1. Signals that we're done.
|
||||
// 2. Frees memory (in case we end up hanging on to this for a while).
|
||||
e.ch = nil
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
// send sends an event on the event channel, aborting if either the passed or
|
||||
// the internal context expire.
|
||||
func (e *lookupEventChannel) send(ctx context.Context, ev *LookupEvent) {
|
||||
e.mu.Lock()
|
||||
// Closed.
|
||||
if e.ch == nil {
|
||||
e.mu.Unlock()
|
||||
return
|
||||
}
|
||||
// in case the passed context is unrelated, wait on both.
|
||||
select {
|
||||
case e.ch <- ev:
|
||||
case <-e.ctx.Done():
|
||||
case <-ctx.Done():
|
||||
}
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
// RegisterForLookupEvents registers a lookup event channel with the given context.
|
||||
// The returned context can be passed to DHT queries to receive lookup events on
|
||||
// the returned channels.
|
||||
//
|
||||
// The passed context MUST be canceled when the caller is no longer interested
|
||||
// in query events.
|
||||
func RegisterForLookupEvents(ctx context.Context) (context.Context, <-chan *LookupEvent) {
|
||||
ch := make(chan *LookupEvent, LookupEventBufferSize)
|
||||
ech := &lookupEventChannel{ch: ch, ctx: ctx}
|
||||
go ech.waitThenClose()
|
||||
return context.WithValue(ctx, routingLookupKey{}, ech), ch
|
||||
}
|
||||
|
||||
// LookupEventBufferSize is the number of events to buffer.
|
||||
var LookupEventBufferSize = 16
|
||||
|
||||
// PublishLookupEvent publishes a query event to the query event channel
|
||||
// associated with the given context, if any.
|
||||
func PublishLookupEvent(ctx context.Context, ev *LookupEvent) {
|
||||
ich := ctx.Value(routingLookupKey{})
|
||||
if ich == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// We *want* to panic here.
|
||||
ech := ich.(*lookupEventChannel)
|
||||
ech.send(ctx, ev)
|
||||
}
|
48
go-libp2p-kad-dht/ext_test.go
Normal file
48
go-libp2p-kad-dht/ext_test.go
Normal file
@ -0,0 +1,48 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
||||
)
|
||||
|
||||
func TestInvalidRemotePeers(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
mn, err := mocknet.FullMeshLinked(5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mn.Close()
|
||||
hosts := mn.Hosts()
|
||||
|
||||
os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)}
|
||||
d, err := New(ctx, hosts[0], os...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, proto := range d.serverProtocols {
|
||||
// Hang on every request.
|
||||
hosts[1].SetStreamHandler(proto, func(s network.Stream) {
|
||||
defer s.Reset() // nolint
|
||||
<-ctx.Done()
|
||||
})
|
||||
}
|
||||
|
||||
err = mn.ConnectAllButSelf()
|
||||
if err != nil {
|
||||
t.Fatal("failed to connect peers", err)
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// hosts[1] isn't added to the routing table because it isn't responding to
|
||||
// the DHT request
|
||||
require.Equal(t, 0, d.routingTable.Size())
|
||||
}
|
1555
go-libp2p-kad-dht/fullrt/dht.go
Normal file
1555
go-libp2p-kad-dht/fullrt/dht.go
Normal file
File diff suppressed because it is too large
Load Diff
86
go-libp2p-kad-dht/fullrt/dht_test.go
Normal file
86
go-libp2p-kad-dht/fullrt/dht_test.go
Normal file
@ -0,0 +1,86 @@
|
||||
package fullrt
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func TestDivideByChunkSize(t *testing.T) {
|
||||
var keys []peer.ID
|
||||
for i := 0; i < 10; i++ {
|
||||
keys = append(keys, peer.ID(strconv.Itoa(i)))
|
||||
}
|
||||
|
||||
convertToStrings := func(peers []peer.ID) []string {
|
||||
var out []string
|
||||
for _, p := range peers {
|
||||
out = append(out, string(p))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
pidsEquals := func(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, v := range a {
|
||||
if v != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
t.Run("Divides", func(t *testing.T) {
|
||||
gr := divideByChunkSize(keys, 5)
|
||||
if len(gr) != 2 {
|
||||
t.Fatal("incorrect number of groups")
|
||||
}
|
||||
if g1, expected := convertToStrings(gr[0]), []string{"0", "1", "2", "3", "4"}; !pidsEquals(g1, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, g1)
|
||||
}
|
||||
if g2, expected := convertToStrings(gr[1]), []string{"5", "6", "7", "8", "9"}; !pidsEquals(g2, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, g2)
|
||||
}
|
||||
})
|
||||
t.Run("Remainder", func(t *testing.T) {
|
||||
gr := divideByChunkSize(keys, 3)
|
||||
if len(gr) != 4 {
|
||||
t.Fatal("incorrect number of groups")
|
||||
}
|
||||
if g, expected := convertToStrings(gr[0]), []string{"0", "1", "2"}; !pidsEquals(g, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, g)
|
||||
}
|
||||
if g, expected := convertToStrings(gr[1]), []string{"3", "4", "5"}; !pidsEquals(g, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, g)
|
||||
}
|
||||
if g, expected := convertToStrings(gr[2]), []string{"6", "7", "8"}; !pidsEquals(g, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, g)
|
||||
}
|
||||
if g, expected := convertToStrings(gr[3]), []string{"9"}; !pidsEquals(g, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, g)
|
||||
}
|
||||
})
|
||||
t.Run("OneEach", func(t *testing.T) {
|
||||
gr := divideByChunkSize(keys, 1)
|
||||
if len(gr) != 10 {
|
||||
t.Fatal("incorrect number of groups")
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
if g, expected := convertToStrings(gr[i]), []string{strconv.Itoa(i)}; !pidsEquals(g, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, g)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("ChunkSizeLargerThanKeys", func(t *testing.T) {
|
||||
gr := divideByChunkSize(keys, 11)
|
||||
if len(gr) != 1 {
|
||||
t.Fatal("incorrect number of groups")
|
||||
}
|
||||
if g, expected := convertToStrings(gr[0]), convertToStrings(keys); !pidsEquals(g, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, g)
|
||||
}
|
||||
})
|
||||
}
|
98
go-libp2p-kad-dht/fullrt/options.go
Normal file
98
go-libp2p-kad-dht/fullrt/options.go
Normal file
@ -0,0 +1,98 @@
|
||||
package fullrt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
kaddht "github.com/libp2p/go-libp2p-kad-dht"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/crawler"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/providers"
|
||||
)
|
||||
|
||||
type config struct {
|
||||
dhtOpts []kaddht.Option
|
||||
|
||||
crawlInterval time.Duration
|
||||
waitFrac float64
|
||||
bulkSendParallelism int
|
||||
timeoutPerOp time.Duration
|
||||
crawler crawler.Crawler
|
||||
pmOpts []providers.Option
|
||||
}
|
||||
|
||||
func (cfg *config) apply(opts ...Option) error {
|
||||
for i, o := range opts {
|
||||
if err := o(cfg); err != nil {
|
||||
return fmt.Errorf("fullrt dht option %d failed: %w", i, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Option func(opt *config) error
|
||||
|
||||
func DHTOption(opts ...kaddht.Option) Option {
|
||||
return func(c *config) error {
|
||||
c.dhtOpts = append(c.dhtOpts, opts...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithCrawler sets the crawler.Crawler to use in order to crawl the DHT network.
|
||||
// Defaults to crawler.DefaultCrawler with parallelism of 200.
|
||||
func WithCrawler(c crawler.Crawler) Option {
|
||||
return func(opt *config) error {
|
||||
opt.crawler = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithCrawlInterval sets the interval at which the DHT is crawled to refresh peer store.
|
||||
// Defaults to 1 hour if unspecified.
|
||||
func WithCrawlInterval(i time.Duration) Option {
|
||||
return func(opt *config) error {
|
||||
opt.crawlInterval = i
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSuccessWaitFraction sets the fraction of peers to wait for before considering an operation a success defined as a number between (0, 1].
|
||||
// Defaults to 30% if unspecified.
|
||||
func WithSuccessWaitFraction(f float64) Option {
|
||||
return func(opt *config) error {
|
||||
if f <= 0 || f > 1 {
|
||||
return fmt.Errorf("success wait fraction must be larger than 0 and smaller or equal to 1; got: %f", f)
|
||||
}
|
||||
opt.waitFrac = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBulkSendParallelism sets the maximum degree of parallelism at which messages are sent to other peers. It must be at least 1.
|
||||
// Defaults to 20 if unspecified.
|
||||
func WithBulkSendParallelism(b int) Option {
|
||||
return func(opt *config) error {
|
||||
if b < 1 {
|
||||
return fmt.Errorf("bulk send parallelism must be at least 1; got: %d", b)
|
||||
}
|
||||
opt.bulkSendParallelism = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithTimeoutPerOperation sets the timeout per operation, where operations include putting providers and querying the DHT.
|
||||
// Defaults to 5 seconds if unspecified.
|
||||
func WithTimeoutPerOperation(t time.Duration) Option {
|
||||
return func(opt *config) error {
|
||||
opt.timeoutPerOp = t
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithProviderManagerOptions sets the options to use when instantiating providers.ProviderManager.
|
||||
func WithProviderManagerOptions(pmOpts ...providers.Option) Option {
|
||||
return func(opt *config) error {
|
||||
opt.pmOpts = pmOpts
|
||||
return nil
|
||||
}
|
||||
}
|
144
go-libp2p-kad-dht/go.mod
Normal file
144
go-libp2p-kad-dht/go.mod
Normal file
@ -0,0 +1,144 @@
|
||||
module github.com/libp2p/go-libp2p-kad-dht
|
||||
|
||||
go 1.21
|
||||
|
||||
retract v0.24.3 // this includes a breaking change and should have been released as v0.25.0
|
||||
|
||||
replace github.com/multiformats/go-multiaddr => ../go-multiaddr
|
||||
|
||||
replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns
|
||||
|
||||
replace github.com/libp2p/go-libp2p => ../go-libp2p
|
||||
|
||||
require (
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/google/uuid v1.4.0
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/ipfs/boxo v0.10.0
|
||||
github.com/ipfs/go-cid v0.4.1
|
||||
github.com/ipfs/go-datastore v0.6.0
|
||||
github.com/ipfs/go-detect-race v0.0.1
|
||||
github.com/ipfs/go-log/v2 v2.5.1
|
||||
github.com/libp2p/go-libp2p v0.30.0
|
||||
github.com/libp2p/go-libp2p-kbucket v0.6.3
|
||||
github.com/libp2p/go-libp2p-record v0.2.0
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.7.2
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0
|
||||
github.com/libp2p/go-libp2p-xor v0.1.0
|
||||
github.com/libp2p/go-msgio v0.3.0
|
||||
github.com/libp2p/go-netroute v0.2.1
|
||||
github.com/multiformats/go-base32 v0.1.0
|
||||
github.com/multiformats/go-multiaddr v0.12.4
|
||||
github.com/multiformats/go-multibase v0.2.0
|
||||
github.com/multiformats/go-multihash v0.2.3
|
||||
github.com/multiformats/go-multistream v0.5.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1
|
||||
go.opencensus.io v0.24.0
|
||||
go.opentelemetry.io/otel v1.16.0
|
||||
go.opentelemetry.io/otel/trace v1.16.0
|
||||
go.uber.org/multierr v1.11.0
|
||||
go.uber.org/zap v1.27.0
|
||||
gonum.org/v1/gonum v0.13.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Jorropo/jsync v1.0.1 // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cloudflare/circl v1.3.9 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/elastic/gosigar v0.14.2 // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/ipfs/go-log v1.0.5 // indirect
|
||||
github.com/ipld/go-ipld-prime v0.20.0 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||
github.com/jbenet/goprocess v0.1.4 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/koron/go-ssdp v0.0.4 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/libp2p/go-cidranger v1.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-nat v0.2.0 // indirect
|
||||
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
||||
github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/miekg/dns v1.1.58 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.9.0 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.15.0 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pion/datachannel v1.5.6 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.11 // indirect
|
||||
github.com/pion/ice/v2 v2.3.24 // indirect
|
||||
github.com/pion/interceptor v0.1.29 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.12 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.14 // indirect
|
||||
github.com/pion/rtp v1.8.6 // indirect
|
||||
github.com/pion/sctp v1.8.16 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.9 // indirect
|
||||
github.com/pion/srtp/v2 v2.0.18 // indirect
|
||||
github.com/pion/stun v0.6.1 // indirect
|
||||
github.com/pion/transport/v2 v2.2.5 // indirect
|
||||
github.com/pion/turn/v2 v2.1.6 // indirect
|
||||
github.com/pion/webrtc/v3 v3.2.40 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/polydawn/refmt v0.89.0 // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/quic-go/qpack v0.4.0 // indirect
|
||||
github.com/quic-go/quic-go v0.44.0 // indirect
|
||||
github.com/quic-go/webtransport-go v0.8.0 // indirect
|
||||
github.com/raulk/go-watchdog v1.3.0 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
||||
go.uber.org/dig v1.17.1 // indirect
|
||||
go.uber.org/fx v1.21.1 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.23.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
golang.org/x/text v0.15.0 // indirect
|
||||
golang.org/x/tools v0.21.0 // indirect
|
||||
google.golang.org/protobuf v1.34.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/blake3 v1.2.1 // indirect
|
||||
)
|
820
go-libp2p-kad-dht/go.sum
Normal file
820
go-libp2p-kad-dht/go.sum
Normal file
@ -0,0 +1,820 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
|
||||
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU=
|
||||
github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.3.9 h1:QFrlgFYf2Qpi8bSpVPK1HBvWpx16v/1TZivyo7pGuBE=
|
||||
github.com/cloudflare/circl v1.3.9/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||
github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
|
||||
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
|
||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
||||
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
|
||||
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo=
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
|
||||
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/ipfs/boxo v0.10.0 h1:tdDAxq8jrsbRkYoF+5Rcqyeb91hgWe2hp7iLu7ORZLY=
|
||||
github.com/ipfs/boxo v0.10.0/go.mod h1:Fg+BnfxZ0RPzR0nOodzdIq3A7KgoWAOWsEIImrIQdBM=
|
||||
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
|
||||
github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
|
||||
github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
|
||||
github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
|
||||
github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw=
|
||||
github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk=
|
||||
github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8=
|
||||
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
|
||||
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
|
||||
github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk=
|
||||
github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8=
|
||||
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
|
||||
github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
|
||||
github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8=
|
||||
github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ=
|
||||
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
|
||||
github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8=
|
||||
github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo=
|
||||
github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g=
|
||||
github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
|
||||
github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
|
||||
github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g=
|
||||
github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
|
||||
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY=
|
||||
github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
|
||||
github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o=
|
||||
github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
|
||||
github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ=
|
||||
github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c=
|
||||
github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic=
|
||||
github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8=
|
||||
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g=
|
||||
github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs=
|
||||
github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0=
|
||||
github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk=
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.7.2 h1:xJMFyhQ3Iuqnk9Q2dYE1eUTzsah7NLw3Qs2zjUV78T0=
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.7.2/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
|
||||
github.com/libp2p/go-libp2p-xor v0.1.0 h1:hhQwT4uGrBcuAkUGXADuPltalOdpf9aag9kaYNT2tLA=
|
||||
github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY=
|
||||
github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
|
||||
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
|
||||
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
|
||||
github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
|
||||
github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
|
||||
github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
|
||||
github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
|
||||
github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
|
||||
github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
|
||||
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
|
||||
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
||||
github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
|
||||
github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
|
||||
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||
github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
|
||||
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
|
||||
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
|
||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||
github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
|
||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
|
||||
github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ=
|
||||
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
|
||||
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
|
||||
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
||||
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
||||
github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
|
||||
github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
|
||||
github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
|
||||
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||
github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
|
||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
||||
github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE=
|
||||
github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA=
|
||||
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
|
||||
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
|
||||
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pion/datachannel v1.5.6 h1:1IxKJntfSlYkpUj8LlYRSWpYiTTC02nUrOE8T3DqGeg=
|
||||
github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNIVb/NfGW4=
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks=
|
||||
github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI=
|
||||
github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M=
|
||||
github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4=
|
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
||||
github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8=
|
||||
github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk=
|
||||
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4=
|
||||
github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE=
|
||||
github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4=
|
||||
github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU=
|
||||
github.com/pion/rtp v1.8.6 h1:MTmn/b0aWWsAzux2AmP8WGllusBVw4NPYPVFFd7jUPw=
|
||||
github.com/pion/rtp v1.8.6/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU=
|
||||
github.com/pion/sctp v1.8.13/go.mod h1:YKSgO/bO/6aOMP9LCie1DuD7m+GamiK2yIiPM6vH+GA=
|
||||
github.com/pion/sctp v1.8.16 h1:PKrMs+o9EMLRvFfXq59WFsC+V8mN1wnKzqrv+3D/gYY=
|
||||
github.com/pion/sctp v1.8.16/go.mod h1:P6PbDVA++OJMrVNg2AL3XtYHV4uD6dvfyOovCgMs0PE=
|
||||
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
|
||||
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
|
||||
github.com/pion/srtp/v2 v2.0.18 h1:vKpAXfawO9RtTRKZJbG4y0v1b11NZxQnxRl85kGuUlo=
|
||||
github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA=
|
||||
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
|
||||
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
|
||||
github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
|
||||
github.com/pion/transport/v2 v2.2.2/go.mod h1:OJg3ojoBJopjEeECq2yJdXH9YVrUJ1uQ++NjXLOUorc=
|
||||
github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
|
||||
github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
|
||||
github.com/pion/transport/v2 v2.2.5 h1:iyi25i/21gQck4hfRhomF6SktmUQjRsRW4WJdhfc3Kc=
|
||||
github.com/pion/transport/v2 v2.2.5/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
|
||||
github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0=
|
||||
github.com/pion/transport/v3 v3.0.2 h1:r+40RJR25S9w3jbA6/5uEPTzcdn7ncyU44RWCbHkLg4=
|
||||
github.com/pion/transport/v3 v3.0.2/go.mod h1:nIToODoOlb5If2jF9y2Igfx3PFYWfuXi37m0IlWa/D0=
|
||||
github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
|
||||
github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc=
|
||||
github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
|
||||
github.com/pion/webrtc/v3 v3.2.40 h1:Wtfi6AZMQg+624cvCXUuSmrKWepSB7zfgYDOYqsSOVU=
|
||||
github.com/pion/webrtc/v3 v3.2.40/go.mod h1:M1RAe3TNTD1tzyvqHrbVODfwdPGSXOUo/OgpoGGJqFY=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
|
||||
github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
|
||||
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
|
||||
github.com/quic-go/quic-go v0.44.0 h1:So5wOr7jyO4vzL2sd8/pD9Kesciv91zSk8BoFngItQ0=
|
||||
github.com/quic-go/quic-go v0.44.0/go.mod h1:z4cx/9Ny9UtGITIPzmPTXh1ULfOyWh4qGQlpnPcWmek=
|
||||
github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg=
|
||||
github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM=
|
||||
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
|
||||
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
||||
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
||||
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
|
||||
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
|
||||
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
|
||||
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
|
||||
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
|
||||
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
|
||||
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
|
||||
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
|
||||
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
|
||||
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
|
||||
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
|
||||
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
|
||||
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
||||
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
|
||||
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
|
||||
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
|
||||
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
|
||||
github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY=
|
||||
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||
github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE=
|
||||
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ=
|
||||
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
|
||||
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM=
|
||||
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
|
||||
go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
|
||||
go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
|
||||
go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
|
||||
go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
|
||||
go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
|
||||
go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0=
|
||||
go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
|
||||
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
|
||||
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM=
|
||||
gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8=
|
||||
gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
|
||||
lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
|
||||
lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
|
||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
378
go-libp2p-kad-dht/handlers.go
Normal file
378
go-libp2p-kad-dht/handlers.go
Normal file
@ -0,0 +1,378 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/p2p/host/peerstore"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
u "github.com/ipfs/boxo/util"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
||||
recpb "github.com/libp2p/go-libp2p-record/pb"
|
||||
"github.com/multiformats/go-base32"
|
||||
)
|
||||
|
||||
// dhthandler specifies the signature of functions that handle DHT messages.
|
||||
type dhtHandler func(context.Context, peer.ID, *pb.Message) (*pb.Message, error)
|
||||
|
||||
func (dht *IpfsDHT) handlerForMsgType(t pb.Message_MessageType) dhtHandler {
|
||||
switch t {
|
||||
case pb.Message_FIND_NODE:
|
||||
return dht.handleFindPeer
|
||||
case pb.Message_PING:
|
||||
return dht.handlePing
|
||||
}
|
||||
|
||||
if dht.enableValues {
|
||||
switch t {
|
||||
case pb.Message_GET_VALUE:
|
||||
return dht.handleGetValue
|
||||
case pb.Message_PUT_VALUE:
|
||||
return dht.handlePutValue
|
||||
}
|
||||
}
|
||||
|
||||
if dht.enableProviders {
|
||||
switch t {
|
||||
case pb.Message_ADD_PROVIDER:
|
||||
return dht.handleAddProvider
|
||||
case pb.Message_GET_PROVIDERS:
|
||||
return dht.handleGetProviders
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, err error) {
|
||||
// first, is there even a key?
|
||||
k := pmes.GetKey()
|
||||
if len(k) == 0 {
|
||||
return nil, errors.New("handleGetValue but no key was provided")
|
||||
}
|
||||
|
||||
// setup response
|
||||
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
|
||||
|
||||
rec, err := dht.checkLocalDatastore(ctx, k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Record = rec
|
||||
|
||||
// Find closest peer on given cluster to desired key and reply with that info
|
||||
closer := dht.betterPeersToQuery(pmes, p, dht.bucketSize)
|
||||
if len(closer) > 0 {
|
||||
// TODO: pstore.PeerInfos should move to core (=> peerstore.AddrInfos).
|
||||
closerinfos := pstore.PeerInfos(dht.peerstore, closer)
|
||||
for _, pi := range closerinfos {
|
||||
logger.Debugf("handleGetValue returning closer peer: '%s'", pi.ID)
|
||||
if len(pi.Addrs) < 1 {
|
||||
logger.Warnw("no addresses on peer being sent",
|
||||
"local", dht.self,
|
||||
"to", p,
|
||||
"sending", pi.ID,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), closerinfos)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) checkLocalDatastore(ctx context.Context, k []byte) (*recpb.Record, error) {
|
||||
logger.Debugf("%s handleGetValue looking into ds", dht.self)
|
||||
dskey := convertToDsKey(k)
|
||||
buf, err := dht.datastore.Get(ctx, dskey)
|
||||
logger.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, buf)
|
||||
|
||||
if err == ds.ErrNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// if we got an unexpected error, bail.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if we have the value, send it back
|
||||
logger.Debugf("%s handleGetValue success!", dht.self)
|
||||
|
||||
rec := new(recpb.Record)
|
||||
err = proto.Unmarshal(buf, rec)
|
||||
if err != nil {
|
||||
logger.Debug("failed to unmarshal DHT record from datastore")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var recordIsBad bool
|
||||
recvtime, err := u.ParseRFC3339(rec.GetTimeReceived())
|
||||
if err != nil {
|
||||
logger.Info("either no receive time set on record, or it was invalid: ", err)
|
||||
recordIsBad = true
|
||||
}
|
||||
|
||||
if time.Since(recvtime) > dht.maxRecordAge {
|
||||
logger.Debug("old record found, tossing.")
|
||||
recordIsBad = true
|
||||
}
|
||||
|
||||
// NOTE: We do not verify the record here beyond checking these timestamps.
|
||||
// we put the burden of checking the records on the requester as checking a record
|
||||
// may be computationally expensive
|
||||
|
||||
if recordIsBad {
|
||||
err := dht.datastore.Delete(ctx, dskey)
|
||||
if err != nil {
|
||||
logger.Error("Failed to delete bad record from datastore: ", err)
|
||||
}
|
||||
|
||||
return nil, nil // can treat this as not having the record at all
|
||||
}
|
||||
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
// Cleans the record (to avoid storing arbitrary data).
|
||||
func cleanRecord(rec *recpb.Record) {
|
||||
rec.TimeReceived = ""
|
||||
}
|
||||
|
||||
// Store a value in this peer local storage
|
||||
func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, err error) {
|
||||
if len(pmes.GetKey()) == 0 {
|
||||
return nil, errors.New("handleGetValue but no key was provided")
|
||||
}
|
||||
|
||||
rec := pmes.GetRecord()
|
||||
if rec == nil {
|
||||
logger.Debugw("got nil record from", "from", p)
|
||||
return nil, errors.New("nil record")
|
||||
}
|
||||
|
||||
if !bytes.Equal(pmes.GetKey(), rec.GetKey()) {
|
||||
return nil, errors.New("put key doesn't match record key")
|
||||
}
|
||||
|
||||
cleanRecord(rec)
|
||||
|
||||
// Make sure the record is valid (not expired, valid signature etc)
|
||||
if err = dht.Validator.Validate(string(rec.GetKey()), rec.GetValue()); err != nil {
|
||||
logger.Infow("bad dht record in PUT", "from", p, "key", internal.LoggableRecordKeyBytes(rec.GetKey()), "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dskey := convertToDsKey(rec.GetKey())
|
||||
|
||||
// fetch the striped lock for this key
|
||||
var indexForLock byte
|
||||
if len(rec.GetKey()) == 0 {
|
||||
indexForLock = 0
|
||||
} else {
|
||||
indexForLock = rec.GetKey()[len(rec.GetKey())-1]
|
||||
}
|
||||
lk := &dht.stripedPutLocks[indexForLock]
|
||||
lk.Lock()
|
||||
defer lk.Unlock()
|
||||
|
||||
// Make sure the new record is "better" than the record we have locally.
|
||||
// This prevents a record with for example a lower sequence number from
|
||||
// overwriting a record with a higher sequence number.
|
||||
existing, err := dht.getRecordFromDatastore(ctx, dskey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if existing != nil {
|
||||
recs := [][]byte{rec.GetValue(), existing.GetValue()}
|
||||
i, err := dht.Validator.Select(string(rec.GetKey()), recs)
|
||||
if err != nil {
|
||||
logger.Warnw("dht record passed validation but failed select", "from", p, "key", internal.LoggableRecordKeyBytes(rec.GetKey()), "error", err)
|
||||
return nil, err
|
||||
}
|
||||
if i != 0 {
|
||||
logger.Infow("DHT record in PUT older than existing record (ignoring)", "peer", p, "key", internal.LoggableRecordKeyBytes(rec.GetKey()))
|
||||
return nil, errors.New("old record")
|
||||
}
|
||||
}
|
||||
|
||||
// record the time we receive every record
|
||||
rec.TimeReceived = u.FormatRFC3339(time.Now())
|
||||
|
||||
data, err := proto.Marshal(rec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dht.datastore.Put(ctx, dskey, data)
|
||||
return pmes, err
|
||||
}
|
||||
|
||||
// returns nil, nil when either nothing is found or the value found doesn't properly validate.
|
||||
// returns nil, some_error when there's a *datastore* error (i.e., something goes very wrong)
|
||||
func (dht *IpfsDHT) getRecordFromDatastore(ctx context.Context, dskey ds.Key) (*recpb.Record, error) {
|
||||
buf, err := dht.datastore.Get(ctx, dskey)
|
||||
if err == ds.ErrNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
logger.Errorw("error retrieving record from datastore", "key", dskey, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
rec := new(recpb.Record)
|
||||
err = proto.Unmarshal(buf, rec)
|
||||
if err != nil {
|
||||
// Bad data in datastore, log it but don't return an error, we'll just overwrite it
|
||||
logger.Errorw("failed to unmarshal record from datastore", "key", dskey, "error", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err = dht.Validator.Validate(string(rec.GetKey()), rec.GetValue())
|
||||
if err != nil {
|
||||
// Invalid record in datastore, probably expired but don't return an error,
|
||||
// we'll just overwrite it
|
||||
logger.Debugw("local record verify failed", "key", rec.GetKey(), "error", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) handlePing(_ context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
|
||||
logger.Debugf("%s Responding to ping from %s!\n", dht.self, p)
|
||||
return pmes, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) handleFindPeer(ctx context.Context, from peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) {
|
||||
resp := pb.NewMessage(pmes.GetType(), nil, pmes.GetClusterLevel())
|
||||
var closest []peer.ID
|
||||
|
||||
if len(pmes.GetKey()) == 0 {
|
||||
return nil, fmt.Errorf("handleFindPeer with empty key")
|
||||
}
|
||||
|
||||
// if looking for self... special case where we send it on CloserPeers.
|
||||
targetPid := peer.ID(pmes.GetKey())
|
||||
closest = dht.betterPeersToQuery(pmes, from, dht.bucketSize)
|
||||
|
||||
// Never tell a peer about itself.
|
||||
if targetPid != from {
|
||||
// Add the target peer to the set of closest peers if
|
||||
// not already present in our routing table.
|
||||
//
|
||||
// Later, when we lookup known addresses for all peers
|
||||
// in this set, we'll prune this peer if we don't
|
||||
// _actually_ know where it is.
|
||||
found := false
|
||||
for _, p := range closest {
|
||||
if targetPid == p {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
closest = append(closest, targetPid)
|
||||
}
|
||||
}
|
||||
|
||||
if closest == nil {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// TODO: pstore.PeerInfos should move to core (=> peerstore.AddrInfos).
|
||||
closestinfos := pstore.PeerInfos(dht.peerstore, closest)
|
||||
// possibly an over-allocation but this array is temporary anyways.
|
||||
withAddresses := make([]peer.AddrInfo, 0, len(closestinfos))
|
||||
for _, pi := range closestinfos {
|
||||
if len(pi.Addrs) > 0 {
|
||||
withAddresses = append(withAddresses, pi)
|
||||
}
|
||||
}
|
||||
|
||||
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) {
|
||||
key := pmes.GetKey()
|
||||
if len(key) > 80 {
|
||||
return nil, fmt.Errorf("handleGetProviders key size too large")
|
||||
} else if len(key) == 0 {
|
||||
return nil, fmt.Errorf("handleGetProviders key is empty")
|
||||
}
|
||||
|
||||
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
|
||||
|
||||
// setup providers
|
||||
providers, err := dht.providerStore.GetProviders(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filtered := make([]peer.AddrInfo, len(providers))
|
||||
for i, provider := range providers {
|
||||
filtered[i] = peer.AddrInfo{
|
||||
ID: provider.ID,
|
||||
Addrs: dht.filterAddrs(provider.Addrs),
|
||||
}
|
||||
}
|
||||
|
||||
resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), filtered)
|
||||
|
||||
// Also send closer peers.
|
||||
closer := dht.betterPeersToQuery(pmes, p, dht.bucketSize)
|
||||
if closer != nil {
|
||||
// TODO: pstore.PeerInfos should move to core (=> peerstore.AddrInfos).
|
||||
infos := pstore.PeerInfos(dht.peerstore, closer)
|
||||
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) {
|
||||
key := pmes.GetKey()
|
||||
if len(key) > 80 {
|
||||
return nil, fmt.Errorf("handleAddProvider key size too large")
|
||||
} else if len(key) == 0 {
|
||||
return nil, fmt.Errorf("handleAddProvider key is empty")
|
||||
}
|
||||
|
||||
logger.Debugw("adding provider", "from", p, "key", internal.LoggableProviderRecordBytes(key))
|
||||
|
||||
// add provider should use the address given in the message
|
||||
pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
|
||||
for _, pi := range pinfos {
|
||||
if pi.ID != p {
|
||||
// we should ignore this provider record! not from originator.
|
||||
// (we should sign them and check signature later...)
|
||||
logger.Debugw("received provider from wrong peer", "from", p, "peer", pi.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(pi.Addrs) < 1 {
|
||||
logger.Debugw("no valid addresses for provider", "from", p)
|
||||
continue
|
||||
}
|
||||
|
||||
// We run the addrs filter after checking for the length,
|
||||
// this allows transient nodes with varying /p2p-circuit addresses to still have their anouncement go through.
|
||||
addrs := dht.filterAddrs(pi.Addrs)
|
||||
dht.providerStore.AddProvider(ctx, key, peer.AddrInfo{ID: pi.ID, Addrs: addrs})
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func convertToDsKey(s []byte) ds.Key {
|
||||
return ds.NewKey(base32.RawStdEncoding.EncodeToString(s))
|
||||
}
|
141
go-libp2p-kad-dht/handlers_test.go
Normal file
141
go-libp2p-kad-dht/handlers_test.go
Normal file
@ -0,0 +1,141 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
||||
recpb "github.com/libp2p/go-libp2p-record/pb"
|
||||
crypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
func TestCleanRecordSigned(t *testing.T) {
|
||||
actual := new(recpb.Record)
|
||||
actual.TimeReceived = "time"
|
||||
actual.Value = []byte("value")
|
||||
actual.Key = []byte("key")
|
||||
|
||||
cleanRecord(actual)
|
||||
actualBytes, err := proto.Marshal(actual)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := new(recpb.Record)
|
||||
expected.Value = []byte("value")
|
||||
expected.Key = []byte("key")
|
||||
expectedBytes, err := proto.Marshal(expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(actualBytes, expectedBytes) {
|
||||
t.Error("failed to clean record")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanRecord(t *testing.T) {
|
||||
actual := new(recpb.Record)
|
||||
actual.TimeReceived = "time"
|
||||
actual.Key = []byte("key")
|
||||
actual.Value = []byte("value")
|
||||
|
||||
cleanRecord(actual)
|
||||
actualBytes, err := proto.Marshal(actual)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := new(recpb.Record)
|
||||
expected.Key = []byte("key")
|
||||
expected.Value = []byte("value")
|
||||
expectedBytes, err := proto.Marshal(expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(actualBytes, expectedBytes) {
|
||||
t.Error("failed to clean record")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBadMessage(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
dht := setupDHT(ctx, t, false)
|
||||
|
||||
for _, typ := range []pb.Message_MessageType{
|
||||
pb.Message_PUT_VALUE, pb.Message_GET_VALUE, pb.Message_ADD_PROVIDER,
|
||||
pb.Message_GET_PROVIDERS, pb.Message_FIND_NODE,
|
||||
} {
|
||||
msg := &pb.Message{
|
||||
Type: typ,
|
||||
// explicitly avoid the key.
|
||||
}
|
||||
_, err := dht.handlerForMsgType(typ)(ctx, dht.Host().ID(), msg)
|
||||
if err == nil {
|
||||
t.Fatalf("expected processing message to fail for type %s", pb.Message_FIND_NODE)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHandleFindPeer(b *testing.B) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
h, err := libp2p.New()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer h.Close()
|
||||
|
||||
d, err := New(ctx, h)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
rng := rand.New(rand.NewSource(150))
|
||||
var peers []peer.ID
|
||||
for i := 0; i < 1000; i++ {
|
||||
_, pubk, _ := crypto.GenerateEd25519Key(rng)
|
||||
id, err := peer.IDFromPublicKey(pubk)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
d.peerFound(id)
|
||||
|
||||
peers = append(peers, id)
|
||||
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
d.host.Peerstore().AddAddr(id, a, time.Minute*50)
|
||||
}
|
||||
|
||||
var reqs []*pb.Message
|
||||
for i := 0; i < b.N; i++ {
|
||||
reqs = append(reqs, &pb.Message{
|
||||
Key: []byte("asdasdasd"),
|
||||
})
|
||||
}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = d.handleFindPeer(ctx, peers[0], reqs[i])
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
172
go-libp2p-kad-dht/internal/config/config.go
Normal file
172
go-libp2p-kad-dht/internal/config/config.go
Normal file
@ -0,0 +1,172 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/boxo/ipns"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/providers"
|
||||
"github.com/libp2p/go-libp2p-kbucket/peerdiversity"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// DefaultPrefix is the application specific prefix attached to all DHT protocols by default.
|
||||
const DefaultPrefix protocol.ID = "/ipfs"
|
||||
|
||||
const defaultBucketSize = 20
|
||||
|
||||
// ModeOpt describes what mode the dht should operate in
|
||||
type ModeOpt int
|
||||
|
||||
// QueryFilterFunc is a filter applied when considering peers to dial when querying
|
||||
type QueryFilterFunc func(dht interface{}, ai peer.AddrInfo) bool
|
||||
|
||||
// RouteTableFilterFunc is a filter applied when considering connections to keep in
|
||||
// the local route table.
|
||||
type RouteTableFilterFunc func(dht interface{}, p peer.ID) bool
|
||||
|
||||
// Config is a structure containing all the options that can be used when constructing a DHT.
|
||||
type Config struct {
|
||||
Datastore ds.Batching
|
||||
Validator record.Validator
|
||||
ValidatorChanged bool // if true implies that the validator has been changed and that Defaults should not be used
|
||||
Mode ModeOpt
|
||||
ProtocolPrefix protocol.ID
|
||||
V1ProtocolOverride protocol.ID
|
||||
BucketSize int
|
||||
Concurrency int
|
||||
Resiliency int
|
||||
MaxRecordAge time.Duration
|
||||
EnableProviders bool
|
||||
EnableValues bool
|
||||
ProviderStore providers.ProviderStore
|
||||
QueryPeerFilter QueryFilterFunc
|
||||
LookupCheckConcurrency int
|
||||
|
||||
RoutingTable struct {
|
||||
RefreshQueryTimeout time.Duration
|
||||
RefreshInterval time.Duration
|
||||
AutoRefresh bool
|
||||
LatencyTolerance time.Duration
|
||||
CheckInterval time.Duration
|
||||
PeerFilter RouteTableFilterFunc
|
||||
DiversityFilter peerdiversity.PeerIPGroupFilter
|
||||
}
|
||||
|
||||
BootstrapPeers func() []peer.AddrInfo
|
||||
AddressFilter func([]ma.Multiaddr) []ma.Multiaddr
|
||||
|
||||
// test specific Config options
|
||||
DisableFixLowPeers bool
|
||||
TestAddressUpdateProcessing bool
|
||||
|
||||
EnableOptimisticProvide bool
|
||||
OptimisticProvideJobsPoolSize int
|
||||
}
|
||||
|
||||
func EmptyQueryFilter(_ interface{}, ai peer.AddrInfo) bool { return true }
|
||||
func EmptyRTFilter(_ interface{}, p peer.ID) bool { return true }
|
||||
|
||||
// Apply applies the given options to this Option
|
||||
func (c *Config) Apply(opts ...Option) error {
|
||||
for i, opt := range opts {
|
||||
if err := opt(c); err != nil {
|
||||
return fmt.Errorf("dht option %d failed: %s", i, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyFallbacks sets default values that could not be applied during config creation since they are dependent
|
||||
// on other configuration parameters (e.g. optA is by default 2x optB) and/or on the Host
|
||||
func (c *Config) ApplyFallbacks(h host.Host) error {
|
||||
if !c.ValidatorChanged {
|
||||
nsval, ok := c.Validator.(record.NamespacedValidator)
|
||||
if ok {
|
||||
if _, pkFound := nsval["pk"]; !pkFound {
|
||||
nsval["pk"] = record.PublicKeyValidator{}
|
||||
}
|
||||
if _, ipnsFound := nsval["ipns"]; !ipnsFound {
|
||||
nsval["ipns"] = ipns.Validator{KeyBook: h.Peerstore()}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("the default Validator was changed without being marked as changed")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Option DHT option type.
|
||||
type Option func(*Config) error
|
||||
|
||||
// Defaults are the default DHT options. This option will be automatically
|
||||
// prepended to any options you pass to the DHT constructor.
|
||||
var Defaults = func(o *Config) error {
|
||||
o.Validator = record.NamespacedValidator{}
|
||||
o.Datastore = dssync.MutexWrap(ds.NewMapDatastore())
|
||||
o.ProtocolPrefix = DefaultPrefix
|
||||
o.EnableProviders = true
|
||||
o.EnableValues = true
|
||||
o.QueryPeerFilter = EmptyQueryFilter
|
||||
|
||||
o.RoutingTable.LatencyTolerance = 10 * time.Second
|
||||
o.RoutingTable.RefreshQueryTimeout = 10 * time.Second
|
||||
o.RoutingTable.RefreshInterval = 10 * time.Minute
|
||||
o.RoutingTable.AutoRefresh = true
|
||||
o.RoutingTable.PeerFilter = EmptyRTFilter
|
||||
|
||||
o.MaxRecordAge = providers.ProvideValidity
|
||||
|
||||
o.BucketSize = defaultBucketSize
|
||||
o.Concurrency = 10
|
||||
o.Resiliency = 3
|
||||
o.LookupCheckConcurrency = 256
|
||||
|
||||
// MAGIC: It makes sense to set it to a multiple of OptProvReturnRatio * BucketSize. We chose a multiple of 4.
|
||||
o.OptimisticProvideJobsPoolSize = 60
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) Validate() error {
|
||||
if c.ProtocolPrefix != DefaultPrefix {
|
||||
return nil
|
||||
}
|
||||
if c.BucketSize != defaultBucketSize {
|
||||
return fmt.Errorf("protocol prefix %s must use bucket size %d", DefaultPrefix, defaultBucketSize)
|
||||
}
|
||||
if !c.EnableProviders {
|
||||
return fmt.Errorf("protocol prefix %s must have providers enabled", DefaultPrefix)
|
||||
}
|
||||
if !c.EnableValues {
|
||||
return fmt.Errorf("protocol prefix %s must have values enabled", DefaultPrefix)
|
||||
}
|
||||
|
||||
nsval, isNSVal := c.Validator.(record.NamespacedValidator)
|
||||
if !isNSVal {
|
||||
return fmt.Errorf("protocol prefix %s must use a namespaced Validator", DefaultPrefix)
|
||||
}
|
||||
|
||||
if len(nsval) != 2 {
|
||||
return fmt.Errorf("protocol prefix %s must have exactly two namespaced validators - /pk and /ipns", DefaultPrefix)
|
||||
}
|
||||
|
||||
if pkVal, pkValFound := nsval["pk"]; !pkValFound {
|
||||
return fmt.Errorf("protocol prefix %s must support the /pk namespaced Validator", DefaultPrefix)
|
||||
} else if _, ok := pkVal.(record.PublicKeyValidator); !ok {
|
||||
return fmt.Errorf("protocol prefix %s must use the record.PublicKeyValidator for the /pk namespace", DefaultPrefix)
|
||||
}
|
||||
|
||||
if ipnsVal, ipnsValFound := nsval["ipns"]; !ipnsValFound {
|
||||
return fmt.Errorf("protocol prefix %s must support the /ipns namespaced Validator", DefaultPrefix)
|
||||
} else if _, ok := ipnsVal.(ipns.Validator); !ok {
|
||||
return fmt.Errorf("protocol prefix %s must use ipns.Validator for the /ipns namespace", DefaultPrefix)
|
||||
}
|
||||
return nil
|
||||
}
|
16
go-libp2p-kad-dht/internal/config/quorum.go
Normal file
16
go-libp2p-kad-dht/internal/config/quorum.go
Normal file
@ -0,0 +1,16 @@
|
||||
package config
|
||||
|
||||
import "github.com/libp2p/go-libp2p/core/routing"
|
||||
|
||||
type QuorumOptionKey struct{}
|
||||
|
||||
const defaultQuorum = 0
|
||||
|
||||
// GetQuorum defaults to 0 if no option is found
|
||||
func GetQuorum(opts *routing.Options) int {
|
||||
responsesNeeded, ok := opts.Other[QuorumOptionKey{}].(int)
|
||||
if !ok {
|
||||
responsesNeeded = defaultQuorum
|
||||
}
|
||||
return responsesNeeded
|
||||
}
|
28
go-libp2p-kad-dht/internal/ctx_mutex.go
Normal file
28
go-libp2p-kad-dht/internal/ctx_mutex.go
Normal file
@ -0,0 +1,28 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type CtxMutex chan struct{}
|
||||
|
||||
func NewCtxMutex() CtxMutex {
|
||||
return make(CtxMutex, 1)
|
||||
}
|
||||
|
||||
func (m CtxMutex) Lock(ctx context.Context) error {
|
||||
select {
|
||||
case m <- struct{}{}:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (m CtxMutex) Unlock() {
|
||||
select {
|
||||
case <-m:
|
||||
default:
|
||||
panic("not locked")
|
||||
}
|
||||
}
|
5
go-libp2p-kad-dht/internal/errors.go
Normal file
5
go-libp2p-kad-dht/internal/errors.go
Normal file
@ -0,0 +1,5 @@
|
||||
package internal
|
||||
|
||||
import "errors"
|
||||
|
||||
var ErrIncorrectRecord = errors.New("received incorrect record")
|
92
go-libp2p-kad-dht/internal/logging.go
Normal file
92
go-libp2p-kad-dht/internal/logging.go
Normal file
@ -0,0 +1,92 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/multiformats/go-multibase"
|
||||
"github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
func multibaseB32Encode(k []byte) string {
|
||||
res, err := multibase.Encode(multibase.Base32, k)
|
||||
if err != nil {
|
||||
// Should be unreachable
|
||||
panic(err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func tryFormatLoggableRecordKey(k string) (string, error) {
|
||||
if len(k) == 0 {
|
||||
return "", fmt.Errorf("LoggableRecordKey is empty")
|
||||
}
|
||||
var proto, cstr string
|
||||
if k[0] == '/' {
|
||||
// it's a path (probably)
|
||||
protoEnd := strings.IndexByte(k[1:], '/')
|
||||
if protoEnd < 0 {
|
||||
return "", fmt.Errorf("LoggableRecordKey starts with '/' but is not a path: %s", multibaseB32Encode([]byte(k)))
|
||||
}
|
||||
proto = k[1 : protoEnd+1]
|
||||
cstr = k[protoEnd+2:]
|
||||
|
||||
encStr := multibaseB32Encode([]byte(cstr))
|
||||
return fmt.Sprintf("/%s/%s", proto, encStr), nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("LoggableRecordKey is not a path: %s", multibaseB32Encode([]byte(cstr)))
|
||||
}
|
||||
|
||||
type LoggableRecordKeyString string
|
||||
|
||||
func (lk LoggableRecordKeyString) String() string {
|
||||
k := string(lk)
|
||||
newKey, err := tryFormatLoggableRecordKey(k)
|
||||
if err == nil {
|
||||
return newKey
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
type LoggableRecordKeyBytes []byte
|
||||
|
||||
func (lk LoggableRecordKeyBytes) String() string {
|
||||
k := string(lk)
|
||||
newKey, err := tryFormatLoggableRecordKey(k)
|
||||
if err == nil {
|
||||
return newKey
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
type LoggableProviderRecordBytes []byte
|
||||
|
||||
func (lk LoggableProviderRecordBytes) String() string {
|
||||
newKey, err := tryFormatLoggableProviderKey(lk)
|
||||
if err == nil {
|
||||
return newKey
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
func tryFormatLoggableProviderKey(k []byte) (string, error) {
|
||||
if len(k) == 0 {
|
||||
return "", fmt.Errorf("LoggableProviderKey is empty")
|
||||
}
|
||||
|
||||
encodedKey := multibaseB32Encode(k)
|
||||
|
||||
// The DHT used to provide CIDs, but now provides multihashes
|
||||
// TODO: Drop this when enough of the network has upgraded
|
||||
if _, err := cid.Cast(k); err == nil {
|
||||
return encodedKey, nil
|
||||
}
|
||||
|
||||
if _, err := multihash.Cast(k); err == nil {
|
||||
return encodedKey, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("LoggableProviderKey is not a Multihash or CID: %s", encodedKey)
|
||||
}
|
76
go-libp2p-kad-dht/internal/logging_test.go
Normal file
76
go-libp2p-kad-dht/internal/logging_test.go
Normal file
@ -0,0 +1,76 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
func TestLoggableRecordKey(t *testing.T) {
|
||||
c, err := cid.Decode("QmfUvYQhL2GinafMbPDYz7VFoZv4iiuLuR33aRsPurXGag")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
k, err := tryFormatLoggableRecordKey("/proto/" + string(c.Bytes()))
|
||||
if err != nil {
|
||||
t.Errorf("failed to format key: %s", err)
|
||||
}
|
||||
if k != "/proto/"+multibaseB32Encode(c.Bytes()) {
|
||||
t.Error("expected path to be preserved as a loggable key")
|
||||
}
|
||||
|
||||
for _, s := range []string{"/bla", "", "bla bla"} {
|
||||
if _, err := tryFormatLoggableRecordKey(s); err == nil {
|
||||
t.Errorf("expected to fail formatting: %s", s)
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range []string{"/bla/asdf", "/a/b/c"} {
|
||||
if _, err := tryFormatLoggableRecordKey(s); err != nil {
|
||||
t.Errorf("expected to be formatable: %s", s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggableProviderKey(t *testing.T) {
|
||||
c0, err := cid.Decode("QmfUvYQhL2GinafMbPDYz7VFoZv4iiuLuR33aRsPurXGag")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Test logging CIDv0 provider
|
||||
b32MH := multibaseB32Encode(c0.Hash())
|
||||
k, err := tryFormatLoggableProviderKey(c0.Bytes())
|
||||
if err != nil {
|
||||
t.Errorf("failed to format key: %s", err)
|
||||
}
|
||||
if k != b32MH {
|
||||
t.Error("expected cidv0 to be converted into base32 multihash")
|
||||
}
|
||||
|
||||
// Test logging CIDv1 provider (from older DHT implementations)
|
||||
c1 := cid.NewCidV1(cid.DagProtobuf, c0.Hash())
|
||||
k, err = tryFormatLoggableProviderKey(c1.Hash())
|
||||
if err != nil {
|
||||
t.Errorf("failed to format key: %s", err)
|
||||
}
|
||||
if k != b32MH {
|
||||
t.Error("expected cidv1 to be converted into base32 multihash")
|
||||
}
|
||||
|
||||
// Test logging multihash provider
|
||||
k, err = tryFormatLoggableProviderKey(c1.Hash())
|
||||
if err != nil {
|
||||
t.Errorf("failed to format key: %s", err)
|
||||
}
|
||||
if k != b32MH {
|
||||
t.Error("expected multihash to be displayed in base32")
|
||||
}
|
||||
|
||||
for _, s := range []string{"/bla", "", "bla bla", "/bla/asdf", "/a/b/c"} {
|
||||
if _, err := tryFormatLoggableProviderKey([]byte(s)); err == nil {
|
||||
t.Errorf("expected to fail formatting: %s", s)
|
||||
}
|
||||
}
|
||||
}
|
387
go-libp2p-kad-dht/internal/net/message_manager.go
Normal file
387
go-libp2p-kad-dht/internal/net/message_manager.go
Normal file
@ -0,0 +1,387 @@
|
||||
package net
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-msgio"
|
||||
|
||||
//lint:ignore SA1019 TODO migrate away from gogo pb
|
||||
"github.com/libp2p/go-msgio/protoio"
|
||||
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/tag"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/metrics"
|
||||
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
||||
)
|
||||
|
||||
var dhtReadMessageTimeout = 10 * time.Second
|
||||
|
||||
// ErrReadTimeout is an error that occurs when no message is read within the timeout period.
|
||||
var ErrReadTimeout = fmt.Errorf("timed out reading response")
|
||||
|
||||
var logger = logging.Logger("dht")
|
||||
|
||||
// messageSenderImpl is responsible for sending requests and messages to peers efficiently, including reuse of streams.
|
||||
// It also tracks metrics for sent requests and messages.
|
||||
type messageSenderImpl struct {
|
||||
host host.Host // the network services we need
|
||||
smlk sync.Mutex
|
||||
strmap map[peer.ID]*peerMessageSender
|
||||
protocols []protocol.ID
|
||||
}
|
||||
|
||||
func NewMessageSenderImpl(h host.Host, protos []protocol.ID) pb.MessageSenderWithDisconnect {
|
||||
return &messageSenderImpl{
|
||||
host: h,
|
||||
strmap: make(map[peer.ID]*peerMessageSender),
|
||||
protocols: protos,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *messageSenderImpl) OnDisconnect(ctx context.Context, p peer.ID) {
|
||||
m.smlk.Lock()
|
||||
defer m.smlk.Unlock()
|
||||
ms, ok := m.strmap[p]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(m.strmap, p)
|
||||
|
||||
// Do this asynchronously as ms.lk can block for a while.
|
||||
go func() {
|
||||
if err := ms.lk.Lock(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
defer ms.lk.Unlock()
|
||||
ms.invalidate()
|
||||
}()
|
||||
}
|
||||
|
||||
// SendRequest sends out a request, but also makes sure to
|
||||
// measure the RTT for latency measurements.
|
||||
func (m *messageSenderImpl) SendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
|
||||
ctx, _ = tag.New(ctx, metrics.UpsertMessageType(pmes))
|
||||
|
||||
ms, err := m.messageSenderForPeer(ctx, p)
|
||||
if err != nil {
|
||||
stats.Record(ctx,
|
||||
metrics.SentRequests.M(1),
|
||||
metrics.SentRequestErrors.M(1),
|
||||
)
|
||||
logger.Debugw("request failed to open message sender", "error", err, "to", p)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
rpmes, err := ms.SendRequest(ctx, pmes)
|
||||
if err != nil {
|
||||
stats.Record(ctx,
|
||||
metrics.SentRequests.M(1),
|
||||
metrics.SentRequestErrors.M(1),
|
||||
)
|
||||
logger.Debugw("request failed", "error", err, "to", p)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Record(ctx,
|
||||
metrics.SentRequests.M(1),
|
||||
metrics.SentBytes.M(int64(pmes.Size())),
|
||||
metrics.OutboundRequestLatency.M(float64(time.Since(start))/float64(time.Millisecond)),
|
||||
)
|
||||
m.host.Peerstore().RecordLatency(p, time.Since(start))
|
||||
return rpmes, nil
|
||||
}
|
||||
|
||||
// SendMessage sends out a message
|
||||
func (m *messageSenderImpl) SendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error {
|
||||
ctx, _ = tag.New(ctx, metrics.UpsertMessageType(pmes))
|
||||
|
||||
ms, err := m.messageSenderForPeer(ctx, p)
|
||||
if err != nil {
|
||||
stats.Record(ctx,
|
||||
metrics.SentMessages.M(1),
|
||||
metrics.SentMessageErrors.M(1),
|
||||
)
|
||||
logger.Debugw("message failed to open message sender", "error", err, "to", p)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ms.SendMessage(ctx, pmes); err != nil {
|
||||
stats.Record(ctx,
|
||||
metrics.SentMessages.M(1),
|
||||
metrics.SentMessageErrors.M(1),
|
||||
)
|
||||
logger.Debugw("message failed", "error", err, "to", p)
|
||||
return err
|
||||
}
|
||||
|
||||
stats.Record(ctx,
|
||||
metrics.SentMessages.M(1),
|
||||
metrics.SentBytes.M(int64(pmes.Size())),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *messageSenderImpl) messageSenderForPeer(ctx context.Context, p peer.ID) (*peerMessageSender, error) {
|
||||
m.smlk.Lock()
|
||||
ms, ok := m.strmap[p]
|
||||
if ok {
|
||||
m.smlk.Unlock()
|
||||
return ms, nil
|
||||
}
|
||||
ms = &peerMessageSender{p: p, m: m, lk: internal.NewCtxMutex()}
|
||||
m.strmap[p] = ms
|
||||
m.smlk.Unlock()
|
||||
|
||||
if err := ms.prepOrInvalidate(ctx); err != nil {
|
||||
m.smlk.Lock()
|
||||
defer m.smlk.Unlock()
|
||||
|
||||
if msCur, ok := m.strmap[p]; ok {
|
||||
// Changed. Use the new one, old one is invalid and
|
||||
// not in the map so we can just throw it away.
|
||||
if ms != msCur {
|
||||
return msCur, nil
|
||||
}
|
||||
// Not changed, remove the now invalid stream from the
|
||||
// map.
|
||||
delete(m.strmap, p)
|
||||
}
|
||||
// Invalid but not in map. Must have been removed by a disconnect.
|
||||
return nil, err
|
||||
}
|
||||
// All ready to go.
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
// peerMessageSender is responsible for sending requests and messages to a particular peer
|
||||
type peerMessageSender struct {
|
||||
s network.Stream
|
||||
r msgio.ReadCloser
|
||||
lk internal.CtxMutex
|
||||
p peer.ID
|
||||
m *messageSenderImpl
|
||||
|
||||
invalid bool
|
||||
singleMes int
|
||||
}
|
||||
|
||||
// invalidate is called before this peerMessageSender is removed from the strmap.
|
||||
// It prevents the peerMessageSender from being reused/reinitialized and then
|
||||
// forgotten (leaving the stream open).
|
||||
func (ms *peerMessageSender) invalidate() {
|
||||
ms.invalid = true
|
||||
if ms.s != nil {
|
||||
_ = ms.s.Reset()
|
||||
ms.s = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *peerMessageSender) prepOrInvalidate(ctx context.Context) error {
|
||||
if err := ms.lk.Lock(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
defer ms.lk.Unlock()
|
||||
|
||||
if err := ms.prep(ctx); err != nil {
|
||||
ms.invalidate()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *peerMessageSender) prep(ctx context.Context) error {
|
||||
if ms.invalid {
|
||||
return fmt.Errorf("message sender has been invalidated")
|
||||
}
|
||||
if ms.s != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We only want to speak to peers using our primary protocols. We do not want to query any peer that only speaks
|
||||
// one of the secondary "server" protocols that we happen to support (e.g. older nodes that we can respond to for
|
||||
// backwards compatibility reasons).
|
||||
nstr, err := ms.m.host.NewStream(ctx, ms.p, ms.m.protocols...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ms.r = msgio.NewVarintReaderSize(nstr, network.MessageSizeMax)
|
||||
ms.s = nstr
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// streamReuseTries is the number of times we will try to reuse a stream to a
|
||||
// given peer before giving up and reverting to the old one-message-per-stream
|
||||
// behaviour.
|
||||
const streamReuseTries = 3
|
||||
|
||||
func (ms *peerMessageSender) SendMessage(ctx context.Context, pmes *pb.Message) error {
|
||||
if err := ms.lk.Lock(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
defer ms.lk.Unlock()
|
||||
|
||||
retry := false
|
||||
for {
|
||||
if err := ms.prep(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ms.writeMsg(pmes); err != nil {
|
||||
_ = ms.s.Reset()
|
||||
ms.s = nil
|
||||
|
||||
if retry {
|
||||
logger.Debugw("error writing message", "error", err)
|
||||
return err
|
||||
}
|
||||
logger.Debugw("error writing message", "error", err, "retrying", true)
|
||||
retry = true
|
||||
continue
|
||||
}
|
||||
|
||||
var err error
|
||||
if ms.singleMes > streamReuseTries {
|
||||
err = ms.s.Close()
|
||||
ms.s = nil
|
||||
} else if retry {
|
||||
ms.singleMes++
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *peerMessageSender) SendRequest(ctx context.Context, pmes *pb.Message) (*pb.Message, error) {
|
||||
if err := ms.lk.Lock(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer ms.lk.Unlock()
|
||||
|
||||
retry := false
|
||||
for {
|
||||
if err := ms.prep(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ms.writeMsg(pmes); err != nil {
|
||||
_ = ms.s.Reset()
|
||||
ms.s = nil
|
||||
|
||||
if retry {
|
||||
logger.Debugw("error writing message", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
logger.Debugw("error writing message", "error", err, "retrying", true)
|
||||
retry = true
|
||||
continue
|
||||
}
|
||||
|
||||
mes := new(pb.Message)
|
||||
if err := ms.ctxReadMsg(ctx, mes); err != nil {
|
||||
_ = ms.s.Reset()
|
||||
ms.s = nil
|
||||
if err == context.Canceled {
|
||||
// retry would be same error
|
||||
return nil, err
|
||||
}
|
||||
if retry {
|
||||
logger.Debugw("error reading message", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
logger.Debugw("error reading message", "error", err, "retrying", true)
|
||||
retry = true
|
||||
continue
|
||||
}
|
||||
|
||||
var err error
|
||||
if ms.singleMes > streamReuseTries {
|
||||
err = ms.s.Close()
|
||||
ms.s = nil
|
||||
} else if retry {
|
||||
ms.singleMes++
|
||||
}
|
||||
|
||||
return mes, err
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *peerMessageSender) writeMsg(pmes *pb.Message) error {
|
||||
return WriteMsg(ms.s, pmes)
|
||||
}
|
||||
|
||||
func (ms *peerMessageSender) ctxReadMsg(ctx context.Context, mes *pb.Message) error {
|
||||
errc := make(chan error, 1)
|
||||
go func(r msgio.ReadCloser) {
|
||||
defer close(errc)
|
||||
bytes, err := r.ReadMsg()
|
||||
defer r.ReleaseMsg(bytes)
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
errc <- mes.Unmarshal(bytes)
|
||||
}(ms.r)
|
||||
|
||||
t := time.NewTimer(dhtReadMessageTimeout)
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case err := <-errc:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-t.C:
|
||||
return ErrReadTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// The Protobuf writer performs multiple small writes when writing a message.
|
||||
// We need to buffer those writes, to make sure that we're not sending a new
|
||||
// packet for every single write.
|
||||
type bufferedDelimitedWriter struct {
|
||||
*bufio.Writer
|
||||
protoio.WriteCloser
|
||||
}
|
||||
|
||||
var writerPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
w := bufio.NewWriter(nil)
|
||||
return &bufferedDelimitedWriter{
|
||||
Writer: w,
|
||||
WriteCloser: protoio.NewDelimitedWriter(w),
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func WriteMsg(w io.Writer, mes *pb.Message) error {
|
||||
bw := writerPool.Get().(*bufferedDelimitedWriter)
|
||||
bw.Reset(w)
|
||||
err := bw.WriteMsg(mes)
|
||||
if err == nil {
|
||||
err = bw.Flush()
|
||||
}
|
||||
bw.Reset(nil)
|
||||
writerPool.Put(bw)
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *bufferedDelimitedWriter) Flush() error {
|
||||
return w.Writer.Flush()
|
||||
}
|
39
go-libp2p-kad-dht/internal/net/message_manager_test.go
Normal file
39
go-libp2p-kad-dht/internal/net/message_manager_test.go
Normal file
@ -0,0 +1,39 @@
|
||||
package net
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
|
||||
bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
|
||||
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInvalidMessageSenderTracking(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
foo := peer.ID("asdasd")
|
||||
|
||||
h, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts))
|
||||
require.NoError(t, err)
|
||||
h.Start()
|
||||
defer h.Close()
|
||||
|
||||
msgSender := NewMessageSenderImpl(h, []protocol.ID{"/test/kad/1.0.0"}).(*messageSenderImpl)
|
||||
|
||||
_, err = msgSender.messageSenderForPeer(ctx, foo)
|
||||
require.Error(t, err, "should have failed to find message sender")
|
||||
|
||||
msgSender.smlk.Lock()
|
||||
mscnt := len(msgSender.strmap)
|
||||
msgSender.smlk.Unlock()
|
||||
|
||||
if mscnt > 0 {
|
||||
t.Fatal("should have no message senders in map")
|
||||
}
|
||||
}
|
31
go-libp2p-kad-dht/internal/testing/helper.go
Normal file
31
go-libp2p-kad-dht/internal/testing/helper.go
Normal file
@ -0,0 +1,31 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type TestValidator struct{}
|
||||
|
||||
func (TestValidator) Select(_ string, bs [][]byte) (int, error) {
|
||||
index := -1
|
||||
for i, b := range bs {
|
||||
if bytes.Equal(b, []byte("newer")) {
|
||||
index = i
|
||||
} else if bytes.Equal(b, []byte("valid")) {
|
||||
if index == -1 {
|
||||
index = i
|
||||
}
|
||||
}
|
||||
}
|
||||
if index == -1 {
|
||||
return -1, errors.New("no rec found")
|
||||
}
|
||||
return index, nil
|
||||
}
|
||||
func (TestValidator) Validate(_ string, b []byte) error {
|
||||
if bytes.Equal(b, []byte("expired")) {
|
||||
return errors.New("expired")
|
||||
}
|
||||
return nil
|
||||
}
|
32
go-libp2p-kad-dht/internal/tracing.go
Normal file
32
go-libp2p-kad-dht/internal/tracing.go
Normal file
@ -0,0 +1,32 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/multiformats/go-multibase"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
|
||||
return otel.Tracer("go-libp2p-kad-dht").Start(ctx, fmt.Sprintf("KademliaDHT.%s", name), opts...)
|
||||
}
|
||||
|
||||
// KeyAsAttribute format a DHT key into a suitable tracing attribute.
|
||||
// DHT keys can be either valid utf-8 or binary, when they are derived from, for example, a multihash.
|
||||
// Tracing (and notably OpenTelemetry+grpc exporter) requires valid utf-8 for string attributes.
|
||||
func KeyAsAttribute(name string, key string) attribute.KeyValue {
|
||||
b := []byte(key)
|
||||
if utf8.Valid(b) {
|
||||
return attribute.String(name, key)
|
||||
}
|
||||
encoded, err := multibase.Encode(multibase.Base58BTC, b)
|
||||
if err != nil {
|
||||
// should be unreachable
|
||||
panic(err)
|
||||
}
|
||||
return attribute.String(name, encoded)
|
||||
}
|
7
go-libp2p-kad-dht/log_test.go
Normal file
7
go-libp2p-kad-dht/log_test.go
Normal file
@ -0,0 +1,7 @@
|
||||
package dht
|
||||
|
||||
import "log"
|
||||
|
||||
func init() {
|
||||
log.SetFlags(log.Flags() | log.Llongfile)
|
||||
}
|
85
go-libp2p-kad-dht/lookup.go
Normal file
85
go-libp2p-kad-dht/lookup.go
Normal file
@ -0,0 +1,85 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/metrics"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/qpeerset"
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// GetClosestPeers is a Kademlia 'node lookup' operation. Returns a channel of
|
||||
// the K closest peers to the given key.
|
||||
//
|
||||
// If the context is canceled, this function will return the context error along
|
||||
// with the closest K peers it has found so far.
|
||||
func (dht *IpfsDHT) GetClosestPeers(ctx context.Context, key string) ([]peer.ID, error) {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.GetClosestPeers", trace.WithAttributes(internal.KeyAsAttribute("Key", key)))
|
||||
defer span.End()
|
||||
|
||||
if key == "" {
|
||||
return nil, fmt.Errorf("can't lookup empty key")
|
||||
}
|
||||
|
||||
//TODO: I can break the interface! return []peer.ID
|
||||
lookupRes, err := dht.runLookupWithFollowup(ctx, key, dht.pmGetClosestPeers(key), func(*qpeerset.QueryPeerset) bool { return false })
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ctx.Err(); err != nil || !lookupRes.completed {
|
||||
return lookupRes.peers, err
|
||||
}
|
||||
|
||||
// tracking lookup results for network size estimator
|
||||
if err = dht.nsEstimator.Track(key, lookupRes.closest); err != nil {
|
||||
logger.Warnf("network size estimator track peers: %s", err)
|
||||
}
|
||||
|
||||
if ns, err := dht.nsEstimator.NetworkSize(); err == nil {
|
||||
metrics.NetworkSize.M(int64(ns))
|
||||
}
|
||||
|
||||
// refresh the cpl for this key as the query was successful
|
||||
dht.routingTable.ResetCplRefreshedAtForID(kb.ConvertKey(key), time.Now())
|
||||
|
||||
return lookupRes.peers, nil
|
||||
}
|
||||
|
||||
// pmGetClosestPeers is the protocol messenger version of the GetClosestPeer queryFn.
|
||||
func (dht *IpfsDHT) pmGetClosestPeers(key string) queryFn {
|
||||
return func(ctx context.Context, p peer.ID) ([]*peer.AddrInfo, error) {
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.SendingQuery,
|
||||
ID: p,
|
||||
})
|
||||
|
||||
peers, err := dht.protoMessenger.GetClosestPeers(ctx, p, peer.ID(key))
|
||||
if err != nil {
|
||||
logger.Debugf("error getting closer peers: %s", err)
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.QueryError,
|
||||
ID: p,
|
||||
Extra: err.Error(),
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.PeerResponse,
|
||||
ID: p,
|
||||
Responses: peers,
|
||||
})
|
||||
|
||||
return peers, err
|
||||
}
|
||||
}
|
313
go-libp2p-kad-dht/lookup_optim.go
Normal file
313
go-libp2p-kad-dht/lookup_optim.go
Normal file
@ -0,0 +1,313 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/metrics"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/netsize"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/qpeerset"
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multihash"
|
||||
ks "github.com/whyrusleeping/go-keyspace"
|
||||
"gonum.org/v1/gonum/mathext"
|
||||
)
|
||||
|
||||
const (
|
||||
// optProvIndividualThresholdCertainty describes how sure we want to be that an individual peer that
|
||||
// we find during walking the DHT actually belongs to the k-closest peers based on the current network size
|
||||
// estimation.
|
||||
optProvIndividualThresholdCertainty = 0.9
|
||||
|
||||
// optProvSetThresholdStrictness describes the probability that the set of closest peers is actually further
|
||||
// away then the calculated set threshold. Put differently, what is the probability that we are too strict and
|
||||
// don't terminate the process early because we can't find any closer peers.
|
||||
optProvSetThresholdStrictness = 0.1
|
||||
|
||||
// optProvReturnRatio corresponds to how many ADD_PROVIDER RPCs must have completed (regardless of success)
|
||||
// before we return to the user. The ratio of 0.75 equals 15 RPC as it is based on the Kademlia bucket size.
|
||||
optProvReturnRatio = 0.75
|
||||
)
|
||||
|
||||
type addProviderRPCState int
|
||||
|
||||
const (
|
||||
scheduled addProviderRPCState = iota + 1
|
||||
success
|
||||
failure
|
||||
)
|
||||
|
||||
type optimisticState struct {
|
||||
// context for all ADD_PROVIDER RPCs
|
||||
putCtx context.Context
|
||||
|
||||
// reference to the DHT
|
||||
dht *IpfsDHT
|
||||
|
||||
// the most recent network size estimation
|
||||
networkSize int32
|
||||
|
||||
// a channel indicating when an ADD_PROVIDER RPC completed (successful or not)
|
||||
doneChan chan struct{}
|
||||
|
||||
// tracks which peers we have stored the provider records with
|
||||
peerStatesLk sync.RWMutex
|
||||
peerStates map[peer.ID]addProviderRPCState
|
||||
|
||||
// the key to provide
|
||||
key string
|
||||
|
||||
// the key to provide transformed into the Kademlia key space
|
||||
ksKey ks.Key
|
||||
|
||||
// distance threshold for individual peers. If peers are closer than this number we store
|
||||
// the provider records right away.
|
||||
individualThreshold float64
|
||||
|
||||
// distance threshold for the set of bucketSize closest peers. If the average distance of the bucketSize
|
||||
// closest peers is below this number we stop the DHT walk and store the remaining provider records.
|
||||
// "remaining" because we have likely already stored some on peers that were below the individualThreshold.
|
||||
setThreshold float64
|
||||
|
||||
// number of completed (regardless of success) ADD_PROVIDER RPCs before we return control back to the user.
|
||||
returnThreshold int
|
||||
|
||||
// putProvDone counts the ADD_PROVIDER RPCs that have completed (successful and unsuccessful)
|
||||
putProvDone atomic.Int32
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) newOptimisticState(ctx context.Context, key string) (*optimisticState, error) {
|
||||
// get network size and err out if there is no reasonable estimate
|
||||
networkSize, err := dht.nsEstimator.NetworkSize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
individualThreshold := mathext.GammaIncRegInv(float64(dht.bucketSize), 1-optProvIndividualThresholdCertainty) / float64(networkSize)
|
||||
setThreshold := mathext.GammaIncRegInv(float64(dht.bucketSize)/2.0+1, 1-optProvSetThresholdStrictness) / float64(networkSize)
|
||||
returnThreshold := int(math.Ceil(float64(dht.bucketSize) * optProvReturnRatio))
|
||||
|
||||
return &optimisticState{
|
||||
putCtx: ctx,
|
||||
dht: dht,
|
||||
key: key,
|
||||
doneChan: make(chan struct{}, returnThreshold), // buffered channel to not miss events
|
||||
ksKey: ks.XORKeySpace.Key([]byte(key)),
|
||||
networkSize: networkSize,
|
||||
peerStates: map[peer.ID]addProviderRPCState{},
|
||||
individualThreshold: individualThreshold,
|
||||
setThreshold: setThreshold,
|
||||
returnThreshold: returnThreshold,
|
||||
putProvDone: atomic.Int32{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) optimisticProvide(outerCtx context.Context, keyMH multihash.Multihash) error {
|
||||
key := string(keyMH)
|
||||
|
||||
if key == "" {
|
||||
return fmt.Errorf("can't lookup empty key")
|
||||
}
|
||||
|
||||
// initialize new context for all putProvider operations.
|
||||
// We don't want to give the outer context to the put operations as we return early before all
|
||||
// put operations have finished to avoid the long tail of the latency distribution. If we
|
||||
// provided the outer context the put operations may be cancelled depending on what happens
|
||||
// with the context on the user side.
|
||||
putCtx, putCtxCancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
|
||||
es, err := dht.newOptimisticState(putCtx, key)
|
||||
if err != nil {
|
||||
putCtxCancel()
|
||||
return err
|
||||
}
|
||||
|
||||
// initialize context that finishes when this function returns
|
||||
innerCtx, innerCtxCancel := context.WithCancel(outerCtx)
|
||||
defer innerCtxCancel()
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case <-outerCtx.Done():
|
||||
// If the outer context gets cancelled while we're still in this function. We stop all
|
||||
// pending put operations.
|
||||
putCtxCancel()
|
||||
case <-innerCtx.Done():
|
||||
// We have returned from this function. Ignore cancellations of the outer context and continue
|
||||
// with the remaining put operations.
|
||||
}
|
||||
}()
|
||||
|
||||
lookupRes, err := dht.runLookupWithFollowup(outerCtx, key, dht.pmGetClosestPeers(key), es.stopFn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Store the provider records with all the closest peers we haven't already contacted/scheduled interaction with.
|
||||
es.peerStatesLk.Lock()
|
||||
for _, p := range lookupRes.peers {
|
||||
if _, found := es.peerStates[p]; found {
|
||||
continue
|
||||
}
|
||||
|
||||
go es.putProviderRecord(p)
|
||||
es.peerStates[p] = scheduled
|
||||
}
|
||||
es.peerStatesLk.Unlock()
|
||||
|
||||
// wait until a threshold number of RPCs have completed
|
||||
es.waitForRPCs()
|
||||
|
||||
if err := outerCtx.Err(); err != nil || !lookupRes.completed { // likely the "completed" field is false but that's not a given
|
||||
return err
|
||||
}
|
||||
|
||||
// tracking lookup results for network size estimator as "completed" is true
|
||||
if err = dht.nsEstimator.Track(key, lookupRes.closest); err != nil {
|
||||
logger.Warnf("network size estimator track peers: %s", err)
|
||||
}
|
||||
|
||||
if ns, err := dht.nsEstimator.NetworkSize(); err == nil {
|
||||
metrics.NetworkSize.M(int64(ns))
|
||||
}
|
||||
|
||||
// refresh the cpl for this key as the query was successful
|
||||
dht.routingTable.ResetCplRefreshedAtForID(kb.ConvertKey(key), time.Now())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (os *optimisticState) stopFn(qps *qpeerset.QueryPeerset) bool {
|
||||
os.peerStatesLk.Lock()
|
||||
defer os.peerStatesLk.Unlock()
|
||||
|
||||
// get currently known closest peers and check if any of them is already very close.
|
||||
// If so -> store provider records straight away.
|
||||
closest := qps.GetClosestNInStates(os.dht.bucketSize, qpeerset.PeerHeard, qpeerset.PeerWaiting, qpeerset.PeerQueried)
|
||||
distances := make([]float64, os.dht.bucketSize)
|
||||
for i, p := range closest {
|
||||
// calculate distance of peer p to the target key
|
||||
distances[i] = netsize.NormedDistance(p, os.ksKey)
|
||||
|
||||
// Check if we have already scheduled interaction or have actually interacted with that peer
|
||||
if _, found := os.peerStates[p]; found {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if peer is close enough to store the provider record with
|
||||
if distances[i] > os.individualThreshold {
|
||||
continue
|
||||
}
|
||||
|
||||
// peer is indeed very close already -> store the provider record directly with it!
|
||||
go os.putProviderRecord(p)
|
||||
|
||||
// keep track that we've scheduled storing a provider record with that peer
|
||||
os.peerStates[p] = scheduled
|
||||
}
|
||||
|
||||
// count number of peers we have scheduled to contact or have already successfully contacted via the above method
|
||||
scheduledAndSuccessCount := 0
|
||||
for _, s := range os.peerStates {
|
||||
if s == scheduled || s == success {
|
||||
scheduledAndSuccessCount += 1
|
||||
}
|
||||
}
|
||||
|
||||
// if we have already contacted/scheduled the RPC for more than bucketSize peers stop the procedure
|
||||
if scheduledAndSuccessCount >= os.dht.bucketSize {
|
||||
return true
|
||||
}
|
||||
|
||||
// calculate average distance of the set of closest peers
|
||||
sum := 0.0
|
||||
for _, d := range distances {
|
||||
sum += d
|
||||
}
|
||||
avg := sum / float64(len(distances))
|
||||
|
||||
// if the average is below the set threshold stop the procedure
|
||||
return avg < os.setThreshold
|
||||
}
|
||||
|
||||
func (os *optimisticState) putProviderRecord(pid peer.ID) {
|
||||
err := os.dht.protoMessenger.PutProviderAddrs(os.putCtx, pid, []byte(os.key), peer.AddrInfo{
|
||||
ID: os.dht.self,
|
||||
Addrs: os.dht.filterAddrs(os.dht.host.Addrs()),
|
||||
})
|
||||
os.peerStatesLk.Lock()
|
||||
if err != nil {
|
||||
os.peerStates[pid] = failure
|
||||
} else {
|
||||
os.peerStates[pid] = success
|
||||
}
|
||||
os.peerStatesLk.Unlock()
|
||||
|
||||
// indicate that this ADD_PROVIDER RPC has completed
|
||||
os.doneChan <- struct{}{}
|
||||
}
|
||||
|
||||
// waitForRPCs waits for a subset of ADD_PROVIDER RPCs to complete and then acquire a lease on
|
||||
// a bound channel to return early back to the user and prevent unbound asynchronicity. If
|
||||
// there are already too many requests in-flight we are just waiting for our current set to
|
||||
// finish.
|
||||
func (os *optimisticState) waitForRPCs() {
|
||||
os.peerStatesLk.RLock()
|
||||
rpcCount := len(os.peerStates)
|
||||
os.peerStatesLk.RUnlock()
|
||||
|
||||
// returnThreshold can't be larger than the total number issued RPCs
|
||||
if os.returnThreshold > rpcCount {
|
||||
os.returnThreshold = rpcCount
|
||||
}
|
||||
|
||||
// Wait until returnThreshold ADD_PROVIDER RPCs have returned
|
||||
for range os.doneChan {
|
||||
if int(os.putProvDone.Add(1)) == os.returnThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
// At this point only a subset of all ADD_PROVIDER RPCs have completed.
|
||||
// We want to give control back to the user as soon as possible because
|
||||
// it is highly likely that at least one of the remaining RPCs will time
|
||||
// out and thus slow down the whole processes. The provider records will
|
||||
// already be available with less than the total number of RPCs having
|
||||
// finished. This has been investigated here:
|
||||
// https://github.com/protocol/network-measurements/blob/master/results/rfm17-provider-record-liveness.md
|
||||
|
||||
// For the remaining ADD_PROVIDER RPCs try to acquire a lease on the optProvJobsPool channel.
|
||||
// If that worked we need to consume the doneChan and release the acquired lease on the
|
||||
// optProvJobsPool channel.
|
||||
remaining := rpcCount - int(os.putProvDone.Load())
|
||||
for i := 0; i < remaining; i++ {
|
||||
select {
|
||||
case os.dht.optProvJobsPool <- struct{}{}:
|
||||
// We were able to acquire a lease on the optProvJobsPool channel.
|
||||
// Consume doneChan to release the acquired lease again.
|
||||
go os.consumeDoneChan(rpcCount)
|
||||
case <-os.doneChan:
|
||||
// We were not able to acquire a lease but an ADD_PROVIDER RPC resolved.
|
||||
if int(os.putProvDone.Add(1)) == rpcCount {
|
||||
close(os.doneChan)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (os *optimisticState) consumeDoneChan(until int) {
|
||||
// Wait for an RPC to finish
|
||||
<-os.doneChan
|
||||
|
||||
// Release acquired lease for other's to get a spot
|
||||
<-os.dht.optProvJobsPool
|
||||
|
||||
// If all RPCs have finished, close the channel.
|
||||
if int(os.putProvDone.Add(1)) == until {
|
||||
close(os.doneChan)
|
||||
}
|
||||
}
|
106
go-libp2p-kad-dht/lookup_optim_test.go
Normal file
106
go-libp2p-kad-dht/lookup_optim_test.go
Normal file
@ -0,0 +1,106 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/netsize"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func randInt(rng *rand.Rand, n, except int) int {
|
||||
for {
|
||||
r := rng.Intn(n)
|
||||
if r != except {
|
||||
return r
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptimisticProvide(t *testing.T) {
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
// Order of events:
|
||||
// 1. setup DHTs
|
||||
// 2. connect each DHT with three others (but not to itself)
|
||||
// 3. select random DHT to be the privileged one (performs the optimistic provide)
|
||||
// 4. initialize network size estimator of privileged DHT
|
||||
// 5. perform provides
|
||||
// 6. let all other DHTs perform the lookup for all provided CIDs
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
dhtCount := 21
|
||||
|
||||
dhts := setupDHTS(t, ctx, dhtCount, EnableOptimisticProvide())
|
||||
defer func() {
|
||||
for i := 0; i < dhtCount; i++ {
|
||||
dhts[i].Close()
|
||||
defer dhts[i].host.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// connect each DHT with three random others
|
||||
for i, dht := range dhts {
|
||||
for j := 0; j < 3; j++ {
|
||||
r := randInt(rng, dhtCount, i)
|
||||
connect(t, ctx, dhts[r], dht)
|
||||
}
|
||||
}
|
||||
|
||||
// select privileged DHT that will perform the provide operation
|
||||
privIdx := rng.Intn(dhtCount)
|
||||
privDHT := dhts[privIdx]
|
||||
|
||||
peerIDs := make([]peer.ID, 20)
|
||||
for i := 0; i < dhtCount; i++ {
|
||||
if i == privIdx {
|
||||
continue
|
||||
}
|
||||
|
||||
if i >= privIdx {
|
||||
peerIDs[i-1] = dhts[i-1].self
|
||||
} else {
|
||||
peerIDs[i] = dhts[i].self
|
||||
}
|
||||
}
|
||||
nse := netsize.NewEstimator(privDHT.self, privDHT.routingTable, privDHT.bucketSize)
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
err := nse.Track(string(testCaseCids[i].Bytes()), peerIDs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
privDHT.nsEstimator = nse
|
||||
|
||||
for _, k := range testCaseCids {
|
||||
logger.Debugf("announcing provider for %s", k)
|
||||
if err := privDHT.optimisticProvide(ctx, k.Hash()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range testCaseCids {
|
||||
n := randInt(rng, dhtCount, privIdx)
|
||||
|
||||
ctxT, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
provchan := dhts[n].FindProvidersAsync(ctxT, c, 1)
|
||||
|
||||
select {
|
||||
case prov := <-provchan:
|
||||
if prov.ID == "" {
|
||||
t.Fatal("Got back nil provider")
|
||||
}
|
||||
if prov.ID != privDHT.self {
|
||||
t.Fatal("Got back wrong provider")
|
||||
}
|
||||
case <-ctxT.Done():
|
||||
t.Fatal("Did not get a provider back.")
|
||||
}
|
||||
}
|
||||
}
|
117
go-libp2p-kad-dht/metrics/metrics.go
Normal file
117
go-libp2p-kad-dht/metrics/metrics.go
Normal file
@ -0,0 +1,117 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
|
||||
defaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
|
||||
)
|
||||
|
||||
// Keys
|
||||
var (
|
||||
KeyMessageType, _ = tag.NewKey("message_type")
|
||||
KeyPeerID, _ = tag.NewKey("peer_id")
|
||||
// KeyInstanceID identifies a dht instance by the pointer address.
|
||||
// Useful for differentiating between different dhts that have the same peer id.
|
||||
KeyInstanceID, _ = tag.NewKey("instance_id")
|
||||
)
|
||||
|
||||
// UpsertMessageType is a convenience upserts the message type
|
||||
// of a pb.Message into the KeyMessageType.
|
||||
func UpsertMessageType(m *pb.Message) tag.Mutator {
|
||||
return tag.Upsert(KeyMessageType, m.Type.String())
|
||||
}
|
||||
|
||||
// Measures
|
||||
var (
|
||||
ReceivedMessages = stats.Int64("libp2p.io/dht/kad/received_messages", "Total number of messages received per RPC", stats.UnitDimensionless)
|
||||
ReceivedMessageErrors = stats.Int64("libp2p.io/dht/kad/received_message_errors", "Total number of errors for messages received per RPC", stats.UnitDimensionless)
|
||||
ReceivedBytes = stats.Int64("libp2p.io/dht/kad/received_bytes", "Total received bytes per RPC", stats.UnitBytes)
|
||||
InboundRequestLatency = stats.Float64("libp2p.io/dht/kad/inbound_request_latency", "Latency per RPC", stats.UnitMilliseconds)
|
||||
OutboundRequestLatency = stats.Float64("libp2p.io/dht/kad/outbound_request_latency", "Latency per RPC", stats.UnitMilliseconds)
|
||||
SentMessages = stats.Int64("libp2p.io/dht/kad/sent_messages", "Total number of messages sent per RPC", stats.UnitDimensionless)
|
||||
SentMessageErrors = stats.Int64("libp2p.io/dht/kad/sent_message_errors", "Total number of errors for messages sent per RPC", stats.UnitDimensionless)
|
||||
SentRequests = stats.Int64("libp2p.io/dht/kad/sent_requests", "Total number of requests sent per RPC", stats.UnitDimensionless)
|
||||
SentRequestErrors = stats.Int64("libp2p.io/dht/kad/sent_request_errors", "Total number of errors for requests sent per RPC", stats.UnitDimensionless)
|
||||
SentBytes = stats.Int64("libp2p.io/dht/kad/sent_bytes", "Total sent bytes per RPC", stats.UnitBytes)
|
||||
NetworkSize = stats.Int64("libp2p.io/dht/kad/network_size", "Network size estimation", stats.UnitDimensionless)
|
||||
)
|
||||
|
||||
// Views
|
||||
var (
|
||||
ReceivedMessagesView = &view.View{
|
||||
Measure: ReceivedMessages,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
ReceivedMessageErrorsView = &view.View{
|
||||
Measure: ReceivedMessageErrors,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
ReceivedBytesView = &view.View{
|
||||
Measure: ReceivedBytes,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: defaultBytesDistribution,
|
||||
}
|
||||
InboundRequestLatencyView = &view.View{
|
||||
Measure: InboundRequestLatency,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: defaultMillisecondsDistribution,
|
||||
}
|
||||
OutboundRequestLatencyView = &view.View{
|
||||
Measure: OutboundRequestLatency,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: defaultMillisecondsDistribution,
|
||||
}
|
||||
SentMessagesView = &view.View{
|
||||
Measure: SentMessages,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
SentMessageErrorsView = &view.View{
|
||||
Measure: SentMessageErrors,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
SentRequestsView = &view.View{
|
||||
Measure: SentRequests,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
SentRequestErrorsView = &view.View{
|
||||
Measure: SentRequestErrors,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
SentBytesView = &view.View{
|
||||
Measure: SentBytes,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: defaultBytesDistribution,
|
||||
}
|
||||
NetworkSizeView = &view.View{
|
||||
Measure: NetworkSize,
|
||||
TagKeys: []tag.Key{KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
)
|
||||
|
||||
// DefaultViews with all views in it.
|
||||
var DefaultViews = []*view.View{
|
||||
ReceivedMessagesView,
|
||||
ReceivedMessageErrorsView,
|
||||
ReceivedBytesView,
|
||||
InboundRequestLatencyView,
|
||||
OutboundRequestLatencyView,
|
||||
SentMessagesView,
|
||||
SentMessageErrorsView,
|
||||
SentRequestsView,
|
||||
SentRequestErrorsView,
|
||||
SentBytesView,
|
||||
NetworkSizeView,
|
||||
}
|
284
go-libp2p-kad-dht/netsize/netsize.go
Normal file
284
go-libp2p-kad-dht/netsize/netsize.go
Normal file
@ -0,0 +1,284 @@
|
||||
package netsize
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
kbucket "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ks "github.com/whyrusleeping/go-keyspace"
|
||||
)
|
||||
|
||||
// invalidEstimate indicates that we currently have no valid estimate cached.
|
||||
const invalidEstimate int32 = -1
|
||||
|
||||
var (
|
||||
ErrNotEnoughData = fmt.Errorf("not enough data")
|
||||
ErrWrongNumOfPeers = fmt.Errorf("expected bucket size number of peers")
|
||||
)
|
||||
|
||||
var (
|
||||
logger = logging.Logger("dht/netsize")
|
||||
MaxMeasurementAge = 2 * time.Hour
|
||||
MinMeasurementsThreshold = 5
|
||||
MaxMeasurementsThreshold = 150
|
||||
keyspaceMaxInt, _ = new(big.Int).SetString(strings.Repeat("1", 256), 2)
|
||||
keyspaceMaxFloat = new(big.Float).SetInt(keyspaceMaxInt)
|
||||
)
|
||||
|
||||
type Estimator struct {
|
||||
localID kbucket.ID
|
||||
rt *kbucket.RoutingTable
|
||||
bucketSize int
|
||||
|
||||
measurementsLk sync.RWMutex
|
||||
measurements map[int][]measurement
|
||||
|
||||
netSizeCache int32
|
||||
}
|
||||
|
||||
func NewEstimator(localID peer.ID, rt *kbucket.RoutingTable, bucketSize int) *Estimator {
|
||||
// initialize map to hold measurement observations
|
||||
measurements := map[int][]measurement{}
|
||||
for i := 0; i < bucketSize; i++ {
|
||||
measurements[i] = []measurement{}
|
||||
}
|
||||
|
||||
return &Estimator{
|
||||
localID: kbucket.ConvertPeerID(localID),
|
||||
rt: rt,
|
||||
bucketSize: bucketSize,
|
||||
measurements: measurements,
|
||||
netSizeCache: invalidEstimate,
|
||||
}
|
||||
}
|
||||
|
||||
// NormedDistance calculates the normed XOR distance of the given keys (from 0 to 1).
|
||||
func NormedDistance(p peer.ID, k ks.Key) float64 {
|
||||
pKey := ks.XORKeySpace.Key([]byte(p))
|
||||
ksDistance := new(big.Float).SetInt(pKey.Distance(k))
|
||||
normedDist, _ := new(big.Float).Quo(ksDistance, keyspaceMaxFloat).Float64()
|
||||
return normedDist
|
||||
}
|
||||
|
||||
type measurement struct {
|
||||
distance float64
|
||||
weight float64
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
// Track tracks the list of peers for the given key to incorporate in the next network size estimate.
|
||||
// key is expected **NOT** to be in the kademlia keyspace and peers is expected to be a sorted list of
|
||||
// the closest peers to the given key (the closest first).
|
||||
// This function expects peers to have the same length as the routing table bucket size. It also
|
||||
// strips old and limits the number of data points (favouring new).
|
||||
func (e *Estimator) Track(key string, peers []peer.ID) error {
|
||||
e.measurementsLk.Lock()
|
||||
defer e.measurementsLk.Unlock()
|
||||
|
||||
// sanity check
|
||||
if len(peers) != e.bucketSize {
|
||||
return ErrWrongNumOfPeers
|
||||
}
|
||||
|
||||
logger.Debugw("Tracking peers for key", "key", key)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// invalidate cache
|
||||
atomic.StoreInt32(&e.netSizeCache, invalidEstimate)
|
||||
|
||||
// Calculate weight for the peer distances.
|
||||
weight := e.calcWeight(key, peers)
|
||||
|
||||
// Map given key to the Kademlia key space (hash it)
|
||||
ksKey := ks.XORKeySpace.Key([]byte(key))
|
||||
|
||||
// the maximum age timestamp of the measurement data points
|
||||
maxAgeTs := now.Add(-MaxMeasurementAge)
|
||||
|
||||
for i, p := range peers {
|
||||
// Construct measurement struct
|
||||
m := measurement{
|
||||
distance: NormedDistance(p, ksKey),
|
||||
weight: weight,
|
||||
timestamp: now,
|
||||
}
|
||||
|
||||
measurements := append(e.measurements[i], m)
|
||||
|
||||
// find the smallest index of a measurement that is still in the allowed time window
|
||||
// all measurements with a lower index should be discarded as they are too old
|
||||
n := len(measurements)
|
||||
idx := sort.Search(n, func(j int) bool {
|
||||
return measurements[j].timestamp.After(maxAgeTs)
|
||||
})
|
||||
|
||||
// if measurements are outside the allowed time window remove them.
|
||||
// idx == n - there is no measurement in the allowed time window -> reset slice
|
||||
// idx == 0 - the normal case where we only have valid entries
|
||||
// idx != 0 - there is a mix of valid and obsolete entries
|
||||
if idx != 0 {
|
||||
x := make([]measurement, n-idx)
|
||||
copy(x, measurements[idx:])
|
||||
measurements = x
|
||||
}
|
||||
|
||||
// if the number of data points exceed the max threshold, strip oldest measurement data points.
|
||||
if len(measurements) > MaxMeasurementsThreshold {
|
||||
measurements = measurements[len(measurements)-MaxMeasurementsThreshold:]
|
||||
}
|
||||
|
||||
e.measurements[i] = measurements
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NetworkSize instructs the Estimator to calculate the current network size estimate.
|
||||
func (e *Estimator) NetworkSize() (int32, error) {
|
||||
|
||||
// return cached calculation lock-free (fast path)
|
||||
if estimate := atomic.LoadInt32(&e.netSizeCache); estimate != invalidEstimate {
|
||||
logger.Debugw("Cached network size estimation", "estimate", estimate)
|
||||
return estimate, nil
|
||||
}
|
||||
|
||||
e.measurementsLk.Lock()
|
||||
defer e.measurementsLk.Unlock()
|
||||
|
||||
// Check a second time. This is needed because we maybe had to wait on another goroutine doing the computation.
|
||||
// Then the computation was just finished by the other goroutine, and we don't need to redo it.
|
||||
if estimate := e.netSizeCache; estimate != invalidEstimate {
|
||||
logger.Debugw("Cached network size estimation", "estimate", estimate)
|
||||
return estimate, nil
|
||||
}
|
||||
|
||||
// remove obsolete data points
|
||||
e.garbageCollect()
|
||||
|
||||
// initialize slices for linear fit
|
||||
xs := make([]float64, e.bucketSize)
|
||||
ys := make([]float64, e.bucketSize)
|
||||
yerrs := make([]float64, e.bucketSize)
|
||||
|
||||
for i := 0; i < e.bucketSize; i++ {
|
||||
observationCount := len(e.measurements[i])
|
||||
|
||||
// If we don't have enough data to reasonably calculate the network size, return early
|
||||
if observationCount < MinMeasurementsThreshold {
|
||||
return 0, ErrNotEnoughData
|
||||
}
|
||||
|
||||
// Calculate Average Distance
|
||||
sumDistances := 0.0
|
||||
sumWeights := 0.0
|
||||
for _, m := range e.measurements[i] {
|
||||
sumDistances += m.weight * m.distance
|
||||
sumWeights += m.weight
|
||||
}
|
||||
distanceAvg := sumDistances / sumWeights
|
||||
|
||||
// Calculate standard deviation
|
||||
sumWeightedDiffs := 0.0
|
||||
for _, m := range e.measurements[i] {
|
||||
diff := m.distance - distanceAvg
|
||||
sumWeightedDiffs += m.weight * diff * diff
|
||||
}
|
||||
variance := sumWeightedDiffs / (float64(observationCount-1) / float64(observationCount) * sumWeights)
|
||||
distanceStd := math.Sqrt(variance)
|
||||
|
||||
// Track calculations
|
||||
xs[i] = float64(i + 1)
|
||||
ys[i] = distanceAvg
|
||||
yerrs[i] = distanceStd
|
||||
}
|
||||
|
||||
// Calculate linear regression (assumes the line goes through the origin)
|
||||
var x2Sum, xySum float64
|
||||
for i, xi := range xs {
|
||||
yi := ys[i]
|
||||
xySum += yerrs[i] * xi * yi
|
||||
x2Sum += yerrs[i] * xi * xi
|
||||
}
|
||||
slope := xySum / x2Sum
|
||||
|
||||
// calculate final network size
|
||||
netSize := int32(1/slope - 1)
|
||||
|
||||
// cache network size estimation
|
||||
atomic.StoreInt32(&e.netSizeCache, netSize)
|
||||
|
||||
logger.Debugw("New network size estimation", "estimate", netSize)
|
||||
return netSize, nil
|
||||
}
|
||||
|
||||
// calcWeight weighs data points exponentially less if they fall into a non-full bucket.
|
||||
// It weighs distance estimates based on their CPLs and bucket levels.
|
||||
// Bucket Level: 20 -> 1/2^0 -> weight: 1
|
||||
// Bucket Level: 17 -> 1/2^3 -> weight: 1/8
|
||||
// Bucket Level: 10 -> 1/2^10 -> weight: 1/1024
|
||||
//
|
||||
// It can happen that the routing table doesn't have a full bucket, but we are tracking here
|
||||
// a list of peers that would theoretically have been suitable for that bucket. Let's imagine
|
||||
// there are only 13 peers in bucket 3 although there is space for 20. Now, the Track function
|
||||
// gets a peers list (len 20) where all peers fall into bucket 3. The weight of this set of peers
|
||||
// should be 1 instead of 1/2^7.
|
||||
// I actually thought this cannot happen as peers would have been added to the routing table before
|
||||
// the Track function gets called. But they seem sometimes not to be added.
|
||||
func (e *Estimator) calcWeight(key string, peers []peer.ID) float64 {
|
||||
|
||||
cpl := kbucket.CommonPrefixLen(kbucket.ConvertKey(key), e.localID)
|
||||
bucketLevel := e.rt.NPeersForCpl(uint(cpl))
|
||||
|
||||
if bucketLevel < e.bucketSize {
|
||||
// routing table doesn't have a full bucket. Check how many peers would fit into that bucket
|
||||
peerLevel := 0
|
||||
for _, p := range peers {
|
||||
if cpl == kbucket.CommonPrefixLen(kbucket.ConvertPeerID(p), e.localID) {
|
||||
peerLevel += 1
|
||||
}
|
||||
}
|
||||
|
||||
if peerLevel > bucketLevel {
|
||||
return math.Pow(2, float64(peerLevel-e.bucketSize))
|
||||
}
|
||||
}
|
||||
|
||||
return math.Pow(2, float64(bucketLevel-e.bucketSize))
|
||||
}
|
||||
|
||||
// garbageCollect removes all measurements from the list that fell out of the measurement time window.
|
||||
func (e *Estimator) garbageCollect() {
|
||||
logger.Debug("Running garbage collection")
|
||||
|
||||
// the maximum age timestamp of the measurement data points
|
||||
maxAgeTs := time.Now().Add(-MaxMeasurementAge)
|
||||
|
||||
for i := 0; i < e.bucketSize; i++ {
|
||||
|
||||
// find the smallest index of a measurement that is still in the allowed time window
|
||||
// all measurements with a lower index should be discarded as they are too old
|
||||
n := len(e.measurements[i])
|
||||
idx := sort.Search(n, func(j int) bool {
|
||||
return e.measurements[i][j].timestamp.After(maxAgeTs)
|
||||
})
|
||||
|
||||
// if measurements are outside the allowed time window remove them.
|
||||
// idx == n - there is no measurement in the allowed time window -> reset slice
|
||||
// idx == 0 - the normal case where we only have valid entries
|
||||
// idx != 0 - there is a mix of valid and obsolete entries
|
||||
if idx == n {
|
||||
e.measurements[i] = []measurement{}
|
||||
} else if idx != 0 {
|
||||
e.measurements[i] = e.measurements[i][idx:]
|
||||
}
|
||||
}
|
||||
}
|
44
go-libp2p-kad-dht/netsize/netsize_test.go
Normal file
44
go-libp2p-kad-dht/netsize/netsize_test.go
Normal file
@ -0,0 +1,44 @@
|
||||
package netsize
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
kbucket "github.com/libp2p/go-libp2p-kbucket"
|
||||
pt "github.com/libp2p/go-libp2p/core/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
ks "github.com/whyrusleeping/go-keyspace"
|
||||
)
|
||||
|
||||
func TestNewEstimator(t *testing.T) {
|
||||
bucketSize := 20
|
||||
|
||||
pid, err := pt.RandPeerID()
|
||||
require.NoError(t, err)
|
||||
|
||||
rt, err := kbucket.NewRoutingTable(bucketSize, kbucket.ConvertPeerID(pid), time.Second, nil, time.Second, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
e := NewEstimator(pid, rt, bucketSize)
|
||||
|
||||
assert.Equal(t, rt, e.rt)
|
||||
assert.Equal(t, kbucket.ConvertPeerID(pid), e.localID)
|
||||
assert.Len(t, e.measurements, bucketSize)
|
||||
assert.Equal(t, invalidEstimate, e.netSizeCache)
|
||||
}
|
||||
|
||||
func TestNormedDistance(t *testing.T) {
|
||||
pid, err := pt.RandPeerID()
|
||||
require.NoError(t, err)
|
||||
|
||||
dist := NormedDistance(pid, ks.XORKeySpace.Key([]byte(pid)))
|
||||
assert.Zero(t, dist)
|
||||
|
||||
pid2, err := pt.RandPeerID()
|
||||
require.NoError(t, err)
|
||||
|
||||
dist = NormedDistance(pid, ks.XORKeySpace.Key([]byte(pid2)))
|
||||
assert.Greater(t, 1.0, dist)
|
||||
assert.Less(t, dist, 1.0)
|
||||
}
|
23
go-libp2p-kad-dht/nofile_test.go
Normal file
23
go-libp2p-kad-dht/nofile_test.go
Normal file
@ -0,0 +1,23 @@
|
||||
//go:build !windows && !wasm
|
||||
|
||||
package dht
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &syscall.Rlimit{
|
||||
Cur: 4096,
|
||||
Max: 4096,
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println("failed to increase open file descriptor limit, can't run tests")
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
7
go-libp2p-kad-dht/optimizations.md
Normal file
7
go-libp2p-kad-dht/optimizations.md
Normal file
@ -0,0 +1,7 @@
|
||||
# Client-side optimizations
|
||||
|
||||
This document reflects client-side optimizations that are implemented in this repository. Client-side optimizations are not part of the [Kademlia spec](https://github.com/libp2p/specs/tree/master/kad-dht), and are not required to be implemented on all clients.
|
||||
|
||||
## Checking before Adding
|
||||
|
||||
A Kademlia server should try to add remote peers querying it to its routing table. However, the Kademlia server has no guarantee that remote peers issuing requests are able to answer Kademlia requests correctly, even though they advertise speaking the Kademlia server protocol. It is important that only server nodes able to answer Kademlia requests end up in other peers' routing tables. Hence, before adding a remote peer to the Kademlia server's routing table, the Kademlia server will send a trivial `FIND_NODE` request to the remote peer, and add it to its routing table only if it is able to provide a valid response.
|
68
go-libp2p-kad-dht/opts/options.go
Normal file
68
go-libp2p-kad-dht/opts/options.go
Normal file
@ -0,0 +1,68 @@
|
||||
// Deprecated: Options are now defined in the root package.
|
||||
|
||||
package dhtopts
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dht "github.com/libp2p/go-libp2p-kad-dht"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
)
|
||||
|
||||
type Option = dht.Option
|
||||
|
||||
// Deprecated: use dht.RoutingTableLatencyTolerance
|
||||
func RoutingTableLatencyTolerance(latency time.Duration) dht.Option {
|
||||
return dht.RoutingTableLatencyTolerance(latency)
|
||||
}
|
||||
|
||||
// Deprecated: use dht.RoutingTableRefreshQueryTimeout
|
||||
func RoutingTableRefreshQueryTimeout(timeout time.Duration) dht.Option {
|
||||
return dht.RoutingTableRefreshQueryTimeout(timeout)
|
||||
}
|
||||
|
||||
// Deprecated: use dht.RoutingTableRefreshPeriod
|
||||
func RoutingTableRefreshPeriod(period time.Duration) dht.Option {
|
||||
return dht.RoutingTableRefreshPeriod(period)
|
||||
}
|
||||
|
||||
// Deprecated: use dht.Datastore
|
||||
func Datastore(ds ds.Batching) dht.Option { return dht.Datastore(ds) }
|
||||
|
||||
// Client configures whether or not the DHT operates in client-only mode.
|
||||
//
|
||||
// Defaults to false (which is ModeAuto).
|
||||
// Deprecated: use dht.Mode(ModeClient)
|
||||
func Client(only bool) dht.Option {
|
||||
if only {
|
||||
return dht.Mode(dht.ModeClient)
|
||||
}
|
||||
return dht.Mode(dht.ModeAuto)
|
||||
}
|
||||
|
||||
// Deprecated: use dht.Mode
|
||||
func Mode(m dht.ModeOpt) dht.Option { return dht.Mode(m) }
|
||||
|
||||
// Deprecated: use dht.Validator
|
||||
func Validator(v record.Validator) dht.Option { return dht.Validator(v) }
|
||||
|
||||
// Deprecated: use dht.NamespacedValidator
|
||||
func NamespacedValidator(ns string, v record.Validator) dht.Option {
|
||||
return dht.NamespacedValidator(ns, v)
|
||||
}
|
||||
|
||||
// Deprecated: use dht.BucketSize
|
||||
func BucketSize(bucketSize int) dht.Option { return dht.BucketSize(bucketSize) }
|
||||
|
||||
// Deprecated: use dht.MaxRecordAge
|
||||
func MaxRecordAge(maxAge time.Duration) dht.Option { return dht.MaxRecordAge(maxAge) }
|
||||
|
||||
// Deprecated: use dht.DisableAutoRefresh
|
||||
func DisableAutoRefresh() dht.Option { return dht.DisableAutoRefresh() }
|
||||
|
||||
// Deprecated: use dht.DisableProviders
|
||||
func DisableProviders() dht.Option { return dht.DisableProviders() }
|
||||
|
||||
// Deprecated: use dht.DisableValues
|
||||
func DisableValues() dht.Option { return dht.DisableValues() }
|
11
go-libp2p-kad-dht/pb/Makefile
Normal file
11
go-libp2p-kad-dht/pb/Makefile
Normal file
@ -0,0 +1,11 @@
|
||||
PB = $(wildcard *.proto)
|
||||
GO = $(PB:.proto=.pb.go)
|
||||
|
||||
all: $(GO)
|
||||
|
||||
%.pb.go: %.proto
|
||||
protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $<
|
||||
|
||||
clean:
|
||||
rm -f *.pb.go
|
||||
rm -f *.go
|
42
go-libp2p-kad-dht/pb/bytestring.go
Normal file
42
go-libp2p-kad-dht/pb/bytestring.go
Normal file
@ -0,0 +1,42 @@
|
||||
package dht_pb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
type byteString string
|
||||
|
||||
func (b byteString) Marshal() ([]byte, error) {
|
||||
return []byte(b), nil
|
||||
}
|
||||
|
||||
func (b *byteString) MarshalTo(data []byte) (int, error) {
|
||||
return copy(data, *b), nil
|
||||
}
|
||||
|
||||
func (b *byteString) Unmarshal(data []byte) error {
|
||||
*b = byteString(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *byteString) Size() int {
|
||||
return len(*b)
|
||||
}
|
||||
|
||||
func (b byteString) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal([]byte(b))
|
||||
}
|
||||
|
||||
func (b *byteString) UnmarshalJSON(data []byte) error {
|
||||
var buf []byte
|
||||
err := json.Unmarshal(data, &buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*b = byteString(buf)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b byteString) Equal(other byteString) bool {
|
||||
return b == other
|
||||
}
|
959
go-libp2p-kad-dht/pb/dht.pb.go
Normal file
959
go-libp2p-kad-dht/pb/dht.pb.go
Normal file
@ -0,0 +1,959 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: dht.proto
|
||||
|
||||
package dht_pb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
pb "github.com/libp2p/go-libp2p-record/pb"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type Message_MessageType int32
|
||||
|
||||
const (
|
||||
Message_PUT_VALUE Message_MessageType = 0
|
||||
Message_GET_VALUE Message_MessageType = 1
|
||||
Message_ADD_PROVIDER Message_MessageType = 2
|
||||
Message_GET_PROVIDERS Message_MessageType = 3
|
||||
Message_FIND_NODE Message_MessageType = 4
|
||||
Message_PING Message_MessageType = 5
|
||||
)
|
||||
|
||||
var Message_MessageType_name = map[int32]string{
|
||||
0: "PUT_VALUE",
|
||||
1: "GET_VALUE",
|
||||
2: "ADD_PROVIDER",
|
||||
3: "GET_PROVIDERS",
|
||||
4: "FIND_NODE",
|
||||
5: "PING",
|
||||
}
|
||||
|
||||
var Message_MessageType_value = map[string]int32{
|
||||
"PUT_VALUE": 0,
|
||||
"GET_VALUE": 1,
|
||||
"ADD_PROVIDER": 2,
|
||||
"GET_PROVIDERS": 3,
|
||||
"FIND_NODE": 4,
|
||||
"PING": 5,
|
||||
}
|
||||
|
||||
func (x Message_MessageType) String() string {
|
||||
return proto.EnumName(Message_MessageType_name, int32(x))
|
||||
}
|
||||
|
||||
func (Message_MessageType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_616a434b24c97ff4, []int{0, 0}
|
||||
}
|
||||
|
||||
type Message_ConnectionType int32
|
||||
|
||||
const (
|
||||
// sender does not have a connection to peer, and no extra information (default)
|
||||
Message_NOT_CONNECTED Message_ConnectionType = 0
|
||||
// sender has a live connection to peer
|
||||
Message_CONNECTED Message_ConnectionType = 1
|
||||
// sender recently connected to peer
|
||||
Message_CAN_CONNECT Message_ConnectionType = 2
|
||||
// sender recently tried to connect to peer repeatedly but failed to connect
|
||||
// ("try" here is loose, but this should signal "made strong effort, failed")
|
||||
Message_CANNOT_CONNECT Message_ConnectionType = 3
|
||||
)
|
||||
|
||||
var Message_ConnectionType_name = map[int32]string{
|
||||
0: "NOT_CONNECTED",
|
||||
1: "CONNECTED",
|
||||
2: "CAN_CONNECT",
|
||||
3: "CANNOT_CONNECT",
|
||||
}
|
||||
|
||||
var Message_ConnectionType_value = map[string]int32{
|
||||
"NOT_CONNECTED": 0,
|
||||
"CONNECTED": 1,
|
||||
"CAN_CONNECT": 2,
|
||||
"CANNOT_CONNECT": 3,
|
||||
}
|
||||
|
||||
func (x Message_ConnectionType) String() string {
|
||||
return proto.EnumName(Message_ConnectionType_name, int32(x))
|
||||
}
|
||||
|
||||
func (Message_ConnectionType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_616a434b24c97ff4, []int{0, 1}
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
// defines what type of message it is.
|
||||
Type Message_MessageType `protobuf:"varint,1,opt,name=type,proto3,enum=dht.pb.Message_MessageType" json:"type,omitempty"`
|
||||
// defines what coral cluster level this query/response belongs to.
|
||||
// in case we want to implement coral's cluster rings in the future.
|
||||
ClusterLevelRaw int32 `protobuf:"varint,10,opt,name=clusterLevelRaw,proto3" json:"clusterLevelRaw,omitempty"`
|
||||
// Used to specify the key associated with this message.
|
||||
// PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS
|
||||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
// Used to return a value
|
||||
// PUT_VALUE, GET_VALUE
|
||||
Record *pb.Record `protobuf:"bytes,3,opt,name=record,proto3" json:"record,omitempty"`
|
||||
// Used to return peers closer to a key in a query
|
||||
// GET_VALUE, GET_PROVIDERS, FIND_NODE
|
||||
CloserPeers []Message_Peer `protobuf:"bytes,8,rep,name=closerPeers,proto3" json:"closerPeers"`
|
||||
// Used to return Providers
|
||||
// GET_VALUE, ADD_PROVIDER, GET_PROVIDERS
|
||||
ProviderPeers []Message_Peer `protobuf:"bytes,9,rep,name=providerPeers,proto3" json:"providerPeers"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Message) Reset() { *m = Message{} }
|
||||
func (m *Message) String() string { return proto.CompactTextString(m) }
|
||||
func (*Message) ProtoMessage() {}
|
||||
func (*Message) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_616a434b24c97ff4, []int{0}
|
||||
}
|
||||
func (m *Message) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Message.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Message) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Message.Merge(m, src)
|
||||
}
|
||||
func (m *Message) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Message) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Message.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Message proto.InternalMessageInfo
|
||||
|
||||
func (m *Message) GetType() Message_MessageType {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return Message_PUT_VALUE
|
||||
}
|
||||
|
||||
func (m *Message) GetClusterLevelRaw() int32 {
|
||||
if m != nil {
|
||||
return m.ClusterLevelRaw
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Message) GetKey() []byte {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetRecord() *pb.Record {
|
||||
if m != nil {
|
||||
return m.Record
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetCloserPeers() []Message_Peer {
|
||||
if m != nil {
|
||||
return m.CloserPeers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetProviderPeers() []Message_Peer {
|
||||
if m != nil {
|
||||
return m.ProviderPeers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Message_Peer struct {
|
||||
// ID of a given peer.
|
||||
Id byteString `protobuf:"bytes,1,opt,name=id,proto3,customtype=byteString" json:"id"`
|
||||
// multiaddrs for a given peer
|
||||
Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"`
|
||||
// used to signal the sender's connection capabilities to the peer
|
||||
Connection Message_ConnectionType `protobuf:"varint,3,opt,name=connection,proto3,enum=dht.pb.Message_ConnectionType" json:"connection,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Message_Peer) Reset() { *m = Message_Peer{} }
|
||||
func (m *Message_Peer) String() string { return proto.CompactTextString(m) }
|
||||
func (*Message_Peer) ProtoMessage() {}
|
||||
func (*Message_Peer) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_616a434b24c97ff4, []int{0, 0}
|
||||
}
|
||||
func (m *Message_Peer) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Message_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Message_Peer.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Message_Peer) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Message_Peer.Merge(m, src)
|
||||
}
|
||||
func (m *Message_Peer) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Message_Peer) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Message_Peer.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Message_Peer proto.InternalMessageInfo
|
||||
|
||||
func (m *Message_Peer) GetAddrs() [][]byte {
|
||||
if m != nil {
|
||||
return m.Addrs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message_Peer) GetConnection() Message_ConnectionType {
|
||||
if m != nil {
|
||||
return m.Connection
|
||||
}
|
||||
return Message_NOT_CONNECTED
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("dht.pb.Message_MessageType", Message_MessageType_name, Message_MessageType_value)
|
||||
proto.RegisterEnum("dht.pb.Message_ConnectionType", Message_ConnectionType_name, Message_ConnectionType_value)
|
||||
proto.RegisterType((*Message)(nil), "dht.pb.Message")
|
||||
proto.RegisterType((*Message_Peer)(nil), "dht.pb.Message.Peer")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("dht.proto", fileDescriptor_616a434b24c97ff4) }
|
||||
|
||||
var fileDescriptor_616a434b24c97ff4 = []byte{
|
||||
// 469 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xb1, 0x6f, 0x9b, 0x40,
|
||||
0x18, 0xc5, 0x73, 0x80, 0xdd, 0xf8, 0x03, 0x3b, 0xe4, 0x94, 0x01, 0xb9, 0x92, 0x83, 0x3c, 0xd1,
|
||||
0xc1, 0x20, 0xd1, 0xb5, 0xaa, 0x6a, 0x03, 0x8d, 0x2c, 0xa5, 0xd8, 0xba, 0x38, 0xe9, 0x68, 0x19,
|
||||
0xb8, 0x12, 0x54, 0xd7, 0x87, 0x00, 0xa7, 0xf2, 0xd6, 0x3f, 0x2f, 0x63, 0xe7, 0x0e, 0x51, 0xe5,
|
||||
0xa9, 0x7f, 0x46, 0xc5, 0x11, 0x5a, 0xec, 0x25, 0x13, 0xef, 0x7d, 0xf7, 0x7e, 0xe2, 0xdd, 0xa7,
|
||||
0x83, 0x4e, 0x74, 0x5f, 0x98, 0x69, 0xc6, 0x0a, 0x86, 0xdb, 0x5c, 0x06, 0x7d, 0x3b, 0x4e, 0x8a,
|
||||
0xfb, 0x6d, 0x60, 0x86, 0xec, 0x9b, 0xb5, 0x4e, 0x82, 0xd4, 0x4e, 0xad, 0x98, 0x8d, 0x2a, 0x35,
|
||||
0xca, 0x68, 0xc8, 0xb2, 0xc8, 0x4a, 0x03, 0xab, 0x52, 0x15, 0xdb, 0x1f, 0x35, 0x98, 0x98, 0xc5,
|
||||
0xcc, 0xe2, 0xe3, 0x60, 0xfb, 0x85, 0x3b, 0x6e, 0xb8, 0xaa, 0xe2, 0xc3, 0x3f, 0x12, 0xbc, 0xfa,
|
||||
0x44, 0xf3, 0x7c, 0x15, 0x53, 0x6c, 0x81, 0x54, 0xec, 0x52, 0xaa, 0x21, 0x1d, 0x19, 0x3d, 0xfb,
|
||||
0xb5, 0x59, 0xb5, 0x30, 0x9f, 0x8f, 0xeb, 0xef, 0x62, 0x97, 0x52, 0xc2, 0x83, 0xd8, 0x80, 0xb3,
|
||||
0x70, 0xbd, 0xcd, 0x0b, 0x9a, 0x5d, 0xd3, 0x07, 0xba, 0x26, 0xab, 0xef, 0x1a, 0xe8, 0xc8, 0x68,
|
||||
0x91, 0xe3, 0x31, 0x56, 0x41, 0xfc, 0x4a, 0x77, 0x9a, 0xa0, 0x23, 0x43, 0x21, 0xa5, 0xc4, 0x6f,
|
||||
0xa0, 0x5d, 0xf5, 0xd6, 0x44, 0x1d, 0x19, 0xb2, 0x7d, 0x6e, 0xd6, 0xd7, 0x08, 0x4c, 0xc2, 0x15,
|
||||
0x79, 0x0e, 0xe0, 0x77, 0x20, 0x87, 0x6b, 0x96, 0xd3, 0x6c, 0x4e, 0x69, 0x96, 0x6b, 0xa7, 0xba,
|
||||
0x68, 0xc8, 0xf6, 0xc5, 0x71, 0xbd, 0xf2, 0x70, 0x22, 0x3d, 0x3e, 0x5d, 0x9e, 0x90, 0x66, 0x1c,
|
||||
0x7f, 0x80, 0x6e, 0x9a, 0xb1, 0x87, 0x24, 0xaa, 0xf9, 0xce, 0x8b, 0xfc, 0x21, 0xd0, 0xff, 0x81,
|
||||
0x40, 0x2a, 0x15, 0x1e, 0x82, 0x90, 0x44, 0x7c, 0x3d, 0xca, 0x04, 0x97, 0xc9, 0x5f, 0x4f, 0x97,
|
||||
0x10, 0xec, 0x0a, 0x7a, 0x53, 0x64, 0xc9, 0x26, 0x26, 0x42, 0x12, 0xe1, 0x0b, 0x68, 0xad, 0xa2,
|
||||
0x28, 0xcb, 0x35, 0x41, 0x17, 0x0d, 0x85, 0x54, 0x06, 0xbf, 0x07, 0x08, 0xd9, 0x66, 0x43, 0xc3,
|
||||
0x22, 0x61, 0x1b, 0x7e, 0xe3, 0x9e, 0x3d, 0x38, 0x6e, 0xe0, 0xfc, 0x4b, 0xf0, 0x1d, 0x37, 0x88,
|
||||
0x61, 0x02, 0x72, 0x63, 0xfd, 0xb8, 0x0b, 0x9d, 0xf9, 0xed, 0x62, 0x79, 0x37, 0xbe, 0xbe, 0xf5,
|
||||
0xd4, 0x93, 0xd2, 0x5e, 0x79, 0xb5, 0x45, 0x58, 0x05, 0x65, 0xec, 0xba, 0xcb, 0x39, 0x99, 0xdd,
|
||||
0x4d, 0x5d, 0x8f, 0xa8, 0x02, 0x3e, 0x87, 0x6e, 0x19, 0xa8, 0x27, 0x37, 0xaa, 0x58, 0x32, 0x1f,
|
||||
0xa7, 0xbe, 0xbb, 0xf4, 0x67, 0xae, 0xa7, 0x4a, 0xf8, 0x14, 0xa4, 0xf9, 0xd4, 0xbf, 0x52, 0x5b,
|
||||
0xc3, 0xcf, 0xd0, 0x3b, 0x2c, 0x52, 0xd2, 0xfe, 0x6c, 0xb1, 0x74, 0x66, 0xbe, 0xef, 0x39, 0x0b,
|
||||
0xcf, 0xad, 0xfe, 0xf8, 0xdf, 0x22, 0x7c, 0x06, 0xb2, 0x33, 0xf6, 0xeb, 0x84, 0x2a, 0x60, 0x0c,
|
||||
0x3d, 0x67, 0xec, 0x37, 0x28, 0x55, 0x9c, 0x28, 0x8f, 0xfb, 0x01, 0xfa, 0xb9, 0x1f, 0xa0, 0xdf,
|
||||
0xfb, 0x01, 0x0a, 0xda, 0xfc, 0xfd, 0xbd, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x1a, 0xa1,
|
||||
0xbe, 0xf7, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Message) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Message) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.ClusterLevelRaw != 0 {
|
||||
i = encodeVarintDht(dAtA, i, uint64(m.ClusterLevelRaw))
|
||||
i--
|
||||
dAtA[i] = 0x50
|
||||
}
|
||||
if len(m.ProviderPeers) > 0 {
|
||||
for iNdEx := len(m.ProviderPeers) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.ProviderPeers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDht(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x4a
|
||||
}
|
||||
}
|
||||
if len(m.CloserPeers) > 0 {
|
||||
for iNdEx := len(m.CloserPeers) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.CloserPeers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDht(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x42
|
||||
}
|
||||
}
|
||||
if m.Record != nil {
|
||||
{
|
||||
size, err := m.Record.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDht(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
}
|
||||
if len(m.Key) > 0 {
|
||||
i -= len(m.Key)
|
||||
copy(dAtA[i:], m.Key)
|
||||
i = encodeVarintDht(dAtA, i, uint64(len(m.Key)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if m.Type != 0 {
|
||||
i = encodeVarintDht(dAtA, i, uint64(m.Type))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *Message_Peer) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Message_Peer) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Message_Peer) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.Connection != 0 {
|
||||
i = encodeVarintDht(dAtA, i, uint64(m.Connection))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if len(m.Addrs) > 0 {
|
||||
for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- {
|
||||
i -= len(m.Addrs[iNdEx])
|
||||
copy(dAtA[i:], m.Addrs[iNdEx])
|
||||
i = encodeVarintDht(dAtA, i, uint64(len(m.Addrs[iNdEx])))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
{
|
||||
size := m.Id.Size()
|
||||
i -= size
|
||||
if _, err := m.Id.MarshalTo(dAtA[i:]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i = encodeVarintDht(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintDht(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovDht(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *Message) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Type != 0 {
|
||||
n += 1 + sovDht(uint64(m.Type))
|
||||
}
|
||||
l = len(m.Key)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDht(uint64(l))
|
||||
}
|
||||
if m.Record != nil {
|
||||
l = m.Record.Size()
|
||||
n += 1 + l + sovDht(uint64(l))
|
||||
}
|
||||
if len(m.CloserPeers) > 0 {
|
||||
for _, e := range m.CloserPeers {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDht(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.ProviderPeers) > 0 {
|
||||
for _, e := range m.ProviderPeers {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDht(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.ClusterLevelRaw != 0 {
|
||||
n += 1 + sovDht(uint64(m.ClusterLevelRaw))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Message_Peer) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = m.Id.Size()
|
||||
n += 1 + l + sovDht(uint64(l))
|
||||
if len(m.Addrs) > 0 {
|
||||
for _, b := range m.Addrs {
|
||||
l = len(b)
|
||||
n += 1 + l + sovDht(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Connection != 0 {
|
||||
n += 1 + sovDht(uint64(m.Connection))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovDht(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozDht(x uint64) (n int) {
|
||||
return sovDht(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *Message) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Message: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
|
||||
}
|
||||
m.Type = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Type |= Message_MessageType(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Key == nil {
|
||||
m.Key = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Record == nil {
|
||||
m.Record = &pb.Record{}
|
||||
}
|
||||
if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 8:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field CloserPeers", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.CloserPeers = append(m.CloserPeers, Message_Peer{})
|
||||
if err := m.CloserPeers[len(m.CloserPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 9:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ProviderPeers", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.ProviderPeers = append(m.ProviderPeers, Message_Peer{})
|
||||
if err := m.ProviderPeers[len(m.ProviderPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 10:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ClusterLevelRaw", wireType)
|
||||
}
|
||||
m.ClusterLevelRaw = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ClusterLevelRaw |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDht(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Message_Peer) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Peer: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx))
|
||||
copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType)
|
||||
}
|
||||
m.Connection = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Connection |= Message_ConnectionType(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDht(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipDht(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDht
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupDht
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDht
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthDht = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDht = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupDht = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
72
go-libp2p-kad-dht/pb/dht.proto
Normal file
72
go-libp2p-kad-dht/pb/dht.proto
Normal file
@ -0,0 +1,72 @@
|
||||
// In order to re-generate the golang packages for `Message` you will need...
|
||||
// 1. Protobuf binary (tested with protoc 3.0.0). - https://github.com/gogo/protobuf/releases
|
||||
// 2. Gogo Protobuf (tested with gogo 0.3). - https://github.com/gogo/protobuf
|
||||
// 3. To have cloned `libp2p/go-libp2p-{record,kad-dht}` under the same directory.
|
||||
// Now from `libp2p/go-libp2p-kad-dht/pb` you can run...
|
||||
// `protoc --gogo_out=. --proto_path=../../go-libp2p-record/pb/ --proto_path=./ dht.proto`
|
||||
|
||||
syntax = "proto3";
|
||||
package dht.pb;
|
||||
|
||||
import "github.com/libp2p/go-libp2p-record/pb/record.proto";
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
message Message {
|
||||
enum MessageType {
|
||||
PUT_VALUE = 0;
|
||||
GET_VALUE = 1;
|
||||
ADD_PROVIDER = 2;
|
||||
GET_PROVIDERS = 3;
|
||||
FIND_NODE = 4;
|
||||
PING = 5;
|
||||
}
|
||||
|
||||
enum ConnectionType {
|
||||
// sender does not have a connection to peer, and no extra information (default)
|
||||
NOT_CONNECTED = 0;
|
||||
|
||||
// sender has a live connection to peer
|
||||
CONNECTED = 1;
|
||||
|
||||
// sender recently connected to peer
|
||||
CAN_CONNECT = 2;
|
||||
|
||||
// sender recently tried to connect to peer repeatedly but failed to connect
|
||||
// ("try" here is loose, but this should signal "made strong effort, failed")
|
||||
CANNOT_CONNECT = 3;
|
||||
}
|
||||
|
||||
message Peer {
|
||||
// ID of a given peer.
|
||||
bytes id = 1 [(gogoproto.customtype) = "byteString", (gogoproto.nullable) = false];
|
||||
|
||||
// multiaddrs for a given peer
|
||||
repeated bytes addrs = 2;
|
||||
|
||||
// used to signal the sender's connection capabilities to the peer
|
||||
ConnectionType connection = 3;
|
||||
}
|
||||
|
||||
// defines what type of message it is.
|
||||
MessageType type = 1;
|
||||
|
||||
// defines what coral cluster level this query/response belongs to.
|
||||
// in case we want to implement coral's cluster rings in the future.
|
||||
int32 clusterLevelRaw = 10;
|
||||
|
||||
// Used to specify the key associated with this message.
|
||||
// PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS
|
||||
bytes key = 2;
|
||||
|
||||
// Used to return a value
|
||||
// PUT_VALUE, GET_VALUE
|
||||
record.pb.Record record = 3;
|
||||
|
||||
// Used to return peers closer to a key in a query
|
||||
// GET_VALUE, GET_PROVIDERS, FIND_NODE
|
||||
repeated Peer closerPeers = 8 [(gogoproto.nullable) = false];
|
||||
|
||||
// Used to return Providers
|
||||
// GET_VALUE, ADD_PROVIDER, GET_PROVIDERS
|
||||
repeated Peer providerPeers = 9 [(gogoproto.nullable) = false];
|
||||
}
|
171
go-libp2p-kad-dht/pb/message.go
Normal file
171
go-libp2p-kad-dht/pb/message.go
Normal file
@ -0,0 +1,171 @@
|
||||
package dht_pb
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var log = logging.Logger("dht.pb")
|
||||
|
||||
type PeerRoutingInfo struct {
|
||||
peer.AddrInfo
|
||||
network.Connectedness
|
||||
}
|
||||
|
||||
// NewMessage constructs a new dht message with given type, key, and level
|
||||
func NewMessage(typ Message_MessageType, key []byte, level int) *Message {
|
||||
m := &Message{
|
||||
Type: typ,
|
||||
Key: key,
|
||||
}
|
||||
m.SetClusterLevel(level)
|
||||
return m
|
||||
}
|
||||
|
||||
func peerRoutingInfoToPBPeer(p PeerRoutingInfo) Message_Peer {
|
||||
var pbp Message_Peer
|
||||
|
||||
pbp.Addrs = make([][]byte, len(p.Addrs))
|
||||
for i, maddr := range p.Addrs {
|
||||
pbp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed.
|
||||
}
|
||||
pbp.Id = byteString(p.ID)
|
||||
pbp.Connection = ConnectionType(p.Connectedness)
|
||||
return pbp
|
||||
}
|
||||
|
||||
func peerInfoToPBPeer(p peer.AddrInfo) Message_Peer {
|
||||
var pbp Message_Peer
|
||||
|
||||
pbp.Addrs = make([][]byte, len(p.Addrs))
|
||||
for i, maddr := range p.Addrs {
|
||||
pbp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed.
|
||||
}
|
||||
pbp.Id = byteString(p.ID)
|
||||
return pbp
|
||||
}
|
||||
|
||||
// PBPeerToPeer turns a *Message_Peer into its peer.AddrInfo counterpart
|
||||
func PBPeerToPeerInfo(pbp Message_Peer) peer.AddrInfo {
|
||||
return peer.AddrInfo{
|
||||
ID: peer.ID(pbp.Id),
|
||||
Addrs: pbp.Addresses(),
|
||||
}
|
||||
}
|
||||
|
||||
// RawPeerInfosToPBPeers converts a slice of Peers into a slice of *Message_Peers,
|
||||
// ready to go out on the wire.
|
||||
func RawPeerInfosToPBPeers(peers []peer.AddrInfo) []Message_Peer {
|
||||
pbpeers := make([]Message_Peer, len(peers))
|
||||
for i, p := range peers {
|
||||
pbpeers[i] = peerInfoToPBPeer(p)
|
||||
}
|
||||
return pbpeers
|
||||
}
|
||||
|
||||
// PeersToPBPeers converts given []peer.Peer into a set of []*Message_Peer,
|
||||
// which can be written to a message and sent out. the key thing this function
|
||||
// does (in addition to PeersToPBPeers) is set the ConnectionType with
|
||||
// information from the given network.Network.
|
||||
func PeerInfosToPBPeers(n network.Network, peers []peer.AddrInfo) []Message_Peer {
|
||||
pbps := RawPeerInfosToPBPeers(peers)
|
||||
for i, pbp := range pbps {
|
||||
c := ConnectionType(n.Connectedness(peers[i].ID))
|
||||
pbp.Connection = c
|
||||
}
|
||||
return pbps
|
||||
}
|
||||
|
||||
func PeerRoutingInfosToPBPeers(peers []PeerRoutingInfo) []Message_Peer {
|
||||
pbpeers := make([]Message_Peer, len(peers))
|
||||
for i, p := range peers {
|
||||
pbpeers[i] = peerRoutingInfoToPBPeer(p)
|
||||
}
|
||||
return pbpeers
|
||||
}
|
||||
|
||||
// PBPeersToPeerInfos converts given []*Message_Peer into []peer.AddrInfo
|
||||
// Invalid addresses will be silently omitted.
|
||||
func PBPeersToPeerInfos(pbps []Message_Peer) []*peer.AddrInfo {
|
||||
peers := make([]*peer.AddrInfo, 0, len(pbps))
|
||||
for _, pbp := range pbps {
|
||||
ai := PBPeerToPeerInfo(pbp)
|
||||
peers = append(peers, &ai)
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// Addresses returns a multiaddr associated with the Message_Peer entry
|
||||
func (m *Message_Peer) Addresses() []ma.Multiaddr {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
maddrs := make([]ma.Multiaddr, 0, len(m.Addrs))
|
||||
for _, addr := range m.Addrs {
|
||||
maddr, err := ma.NewMultiaddrBytes(addr)
|
||||
if err != nil {
|
||||
log.Debugw("error decoding multiaddr for peer", "peer", peer.ID(m.Id), "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
maddrs = append(maddrs, maddr)
|
||||
}
|
||||
return maddrs
|
||||
}
|
||||
|
||||
// GetClusterLevel gets and adjusts the cluster level on the message.
|
||||
// a +/- 1 adjustment is needed to distinguish a valid first level (1) and
|
||||
// default "no value" protobuf behavior (0)
|
||||
func (m *Message) GetClusterLevel() int {
|
||||
level := m.GetClusterLevelRaw() - 1
|
||||
if level < 0 {
|
||||
return 0
|
||||
}
|
||||
return int(level)
|
||||
}
|
||||
|
||||
// SetClusterLevel adjusts and sets the cluster level on the message.
|
||||
// a +/- 1 adjustment is needed to distinguish a valid first level (1) and
|
||||
// default "no value" protobuf behavior (0)
|
||||
func (m *Message) SetClusterLevel(level int) {
|
||||
lvl := int32(level)
|
||||
m.ClusterLevelRaw = lvl + 1
|
||||
}
|
||||
|
||||
// ConnectionType returns a Message_ConnectionType associated with the
|
||||
// network.Connectedness.
|
||||
func ConnectionType(c network.Connectedness) Message_ConnectionType {
|
||||
switch c {
|
||||
default:
|
||||
return Message_NOT_CONNECTED
|
||||
case network.NotConnected:
|
||||
return Message_NOT_CONNECTED
|
||||
case network.Connected:
|
||||
return Message_CONNECTED
|
||||
case network.CanConnect:
|
||||
return Message_CAN_CONNECT
|
||||
case network.CannotConnect:
|
||||
return Message_CANNOT_CONNECT
|
||||
}
|
||||
}
|
||||
|
||||
// Connectedness returns an network.Connectedness associated with the
|
||||
// Message_ConnectionType.
|
||||
func Connectedness(c Message_ConnectionType) network.Connectedness {
|
||||
switch c {
|
||||
default:
|
||||
return network.NotConnected
|
||||
case Message_NOT_CONNECTED:
|
||||
return network.NotConnected
|
||||
case Message_CONNECTED:
|
||||
return network.Connected
|
||||
case Message_CAN_CONNECT:
|
||||
return network.CanConnect
|
||||
case Message_CANNOT_CONNECT:
|
||||
return network.CannotConnect
|
||||
}
|
||||
}
|
15
go-libp2p-kad-dht/pb/message_test.go
Normal file
15
go-libp2p-kad-dht/pb/message_test.go
Normal file
@ -0,0 +1,15 @@
|
||||
package dht_pb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBadAddrsDontReturnNil(t *testing.T) {
|
||||
mp := new(Message_Peer)
|
||||
mp.Addrs = [][]byte{[]byte("NOT A VALID MULTIADDR")}
|
||||
|
||||
addrs := mp.Addresses()
|
||||
if len(addrs) > 0 {
|
||||
t.Fatal("shouldnt have any multiaddrs")
|
||||
}
|
||||
}
|
261
go-libp2p-kad-dht/pb/protocol_messenger.go
Normal file
261
go-libp2p-kad-dht/pb/protocol_messenger.go
Normal file
@ -0,0 +1,261 @@
|
||||
package dht_pb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
recpb "github.com/libp2p/go-libp2p-record/pb"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multihash"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
)
|
||||
|
||||
var logger = logging.Logger("dht")
|
||||
|
||||
// ProtocolMessenger can be used for sending DHT messages to peers and processing their responses.
|
||||
// This decouples the wire protocol format from both the DHT protocol implementation and from the implementation of the
|
||||
// routing.Routing interface.
|
||||
//
|
||||
// Note: the ProtocolMessenger's MessageSender still needs to deal with some wire protocol details such as using
|
||||
// varint-delineated protobufs
|
||||
type ProtocolMessenger struct {
|
||||
m MessageSender
|
||||
}
|
||||
|
||||
type ProtocolMessengerOption func(*ProtocolMessenger) error
|
||||
|
||||
// NewProtocolMessenger creates a new ProtocolMessenger that is used for sending DHT messages to peers and processing
|
||||
// their responses.
|
||||
func NewProtocolMessenger(msgSender MessageSender, opts ...ProtocolMessengerOption) (*ProtocolMessenger, error) {
|
||||
pm := &ProtocolMessenger{
|
||||
m: msgSender,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(pm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
type MessageSenderWithDisconnect interface {
|
||||
MessageSender
|
||||
|
||||
OnDisconnect(context.Context, peer.ID)
|
||||
}
|
||||
|
||||
// MessageSender handles sending wire protocol messages to a given peer
|
||||
type MessageSender interface {
|
||||
// SendRequest sends a peer a message and waits for its response
|
||||
SendRequest(ctx context.Context, p peer.ID, pmes *Message) (*Message, error)
|
||||
// SendMessage sends a peer a message without waiting on a response
|
||||
SendMessage(ctx context.Context, p peer.ID, pmes *Message) error
|
||||
}
|
||||
|
||||
// PutValue asks a peer to store the given key/value pair.
|
||||
func (pm *ProtocolMessenger) PutValue(ctx context.Context, p peer.ID, rec *recpb.Record) (err error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.PutValue")
|
||||
defer span.End()
|
||||
if span.IsRecording() {
|
||||
span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("record", rec))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
pmes := NewMessage(Message_PUT_VALUE, rec.Key, 0)
|
||||
pmes.Record = rec
|
||||
rpmes, err := pm.m.SendRequest(ctx, p, pmes)
|
||||
if err != nil {
|
||||
logger.Debugw("failed to put value to peer", "to", p, "key", internal.LoggableRecordKeyBytes(rec.Key), "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(rpmes.GetRecord().Value, pmes.GetRecord().Value) {
|
||||
const errStr = "value not put correctly"
|
||||
logger.Infow(errStr, "put-message", pmes, "get-message", rpmes)
|
||||
return errors.New(errStr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetValue asks a peer for the value corresponding to the given key. Also returns the K closest peers to the key
|
||||
// as described in GetClosestPeers.
|
||||
func (pm *ProtocolMessenger) GetValue(ctx context.Context, p peer.ID, key string) (record *recpb.Record, closerPeers []*peer.AddrInfo, err error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.GetValue")
|
||||
defer span.End()
|
||||
if span.IsRecording() {
|
||||
span.SetAttributes(attribute.Stringer("to", p), internal.KeyAsAttribute("key", key))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
} else {
|
||||
peers := make([]string, len(closerPeers))
|
||||
for i, v := range closerPeers {
|
||||
peers[i] = v.String()
|
||||
}
|
||||
span.SetAttributes(
|
||||
attribute.Stringer("record", record),
|
||||
attribute.StringSlice("closestPeers", peers),
|
||||
)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
pmes := NewMessage(Message_GET_VALUE, []byte(key), 0)
|
||||
respMsg, err := pm.m.SendRequest(ctx, p, pmes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Perhaps we were given closer peers
|
||||
peers := PBPeersToPeerInfos(respMsg.GetCloserPeers())
|
||||
|
||||
if rec := respMsg.GetRecord(); rec != nil {
|
||||
// Success! We were given the value
|
||||
logger.Debug("got value")
|
||||
|
||||
// Check that record matches the one we are looking for (validation of the record does not happen here)
|
||||
if !bytes.Equal([]byte(key), rec.GetKey()) {
|
||||
logger.Debug("received incorrect record")
|
||||
return nil, nil, internal.ErrIncorrectRecord
|
||||
}
|
||||
|
||||
return rec, peers, err
|
||||
}
|
||||
|
||||
return nil, peers, nil
|
||||
}
|
||||
|
||||
// GetClosestPeers asks a peer to return the K (a DHT-wide parameter) DHT server peers closest in XOR space to the id
|
||||
// Note: If the peer happens to know another peer whose peerID exactly matches the given id it will return that peer
|
||||
// even if that peer is not a DHT server node.
|
||||
func (pm *ProtocolMessenger) GetClosestPeers(ctx context.Context, p peer.ID, id peer.ID) (closerPeers []*peer.AddrInfo, err error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.GetClosestPeers")
|
||||
defer span.End()
|
||||
if span.IsRecording() {
|
||||
span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("key", id))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
} else {
|
||||
peers := make([]string, len(closerPeers))
|
||||
for i, v := range closerPeers {
|
||||
peers[i] = v.String()
|
||||
}
|
||||
span.SetAttributes(attribute.StringSlice("peers", peers))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
pmes := NewMessage(Message_FIND_NODE, []byte(id), 0)
|
||||
respMsg, err := pm.m.SendRequest(ctx, p, pmes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peers := PBPeersToPeerInfos(respMsg.GetCloserPeers())
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
// PutProvider is deprecated please use [ProtocolMessenger.PutProviderAddrs].
|
||||
func (pm *ProtocolMessenger) PutProvider(ctx context.Context, p peer.ID, key multihash.Multihash, h host.Host) error {
|
||||
return pm.PutProviderAddrs(ctx, p, key, peer.AddrInfo{
|
||||
ID: h.ID(),
|
||||
Addrs: h.Addrs(),
|
||||
})
|
||||
}
|
||||
|
||||
// PutProviderAddrs asks a peer to store that we are a provider for the given key.
|
||||
func (pm *ProtocolMessenger) PutProviderAddrs(ctx context.Context, p peer.ID, key multihash.Multihash, self peer.AddrInfo) (err error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.PutProvider")
|
||||
defer span.End()
|
||||
if span.IsRecording() {
|
||||
span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("key", key))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// TODO: We may want to limit the type of addresses in our provider records
|
||||
// For example, in a WAN-only DHT prohibit sharing non-WAN addresses (e.g. 192.168.0.100)
|
||||
if len(self.Addrs) < 1 {
|
||||
return fmt.Errorf("no known addresses for self, cannot put provider")
|
||||
}
|
||||
|
||||
pmes := NewMessage(Message_ADD_PROVIDER, key, 0)
|
||||
pmes.ProviderPeers = RawPeerInfosToPBPeers([]peer.AddrInfo{self})
|
||||
|
||||
return pm.m.SendMessage(ctx, p, pmes)
|
||||
}
|
||||
|
||||
// GetProviders asks a peer for the providers it knows of for a given key. Also returns the K closest peers to the key
|
||||
// as described in GetClosestPeers.
|
||||
func (pm *ProtocolMessenger) GetProviders(ctx context.Context, p peer.ID, key multihash.Multihash) (provs []*peer.AddrInfo, closerPeers []*peer.AddrInfo, err error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.GetProviders")
|
||||
defer span.End()
|
||||
if span.IsRecording() {
|
||||
span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("key", key))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
} else {
|
||||
provsStr := make([]string, len(provs))
|
||||
for i, v := range provs {
|
||||
provsStr[i] = v.String()
|
||||
}
|
||||
closerPeersStr := make([]string, len(provs))
|
||||
for i, v := range provs {
|
||||
closerPeersStr[i] = v.String()
|
||||
}
|
||||
span.SetAttributes(attribute.StringSlice("provs", provsStr), attribute.StringSlice("closestPeers", closerPeersStr))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
pmes := NewMessage(Message_GET_PROVIDERS, key, 0)
|
||||
respMsg, err := pm.m.SendRequest(ctx, p, pmes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
provs = PBPeersToPeerInfos(respMsg.GetProviderPeers())
|
||||
closerPeers = PBPeersToPeerInfos(respMsg.GetCloserPeers())
|
||||
return provs, closerPeers, nil
|
||||
}
|
||||
|
||||
// Ping sends a ping message to the passed peer and waits for a response.
|
||||
func (pm *ProtocolMessenger) Ping(ctx context.Context, p peer.ID) (err error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.Ping")
|
||||
defer span.End()
|
||||
if span.IsRecording() {
|
||||
span.SetAttributes(attribute.Stringer("to", p))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
req := NewMessage(Message_PING, nil, 0)
|
||||
resp, err := pm.m.SendRequest(ctx, p, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("sending request: %w", err)
|
||||
}
|
||||
if resp.Type != Message_PING {
|
||||
return fmt.Errorf("got unexpected response type: %v", resp.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
12
go-libp2p-kad-dht/protocol.go
Normal file
12
go-libp2p-kad-dht/protocol.go
Normal file
@ -0,0 +1,12 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
// ProtocolDHT is the default DHT protocol.
|
||||
ProtocolDHT protocol.ID = "/ipfs/kad/1.0.0"
|
||||
// DefaultProtocols spoken by the DHT.
|
||||
DefaultProtocols = []protocol.ID{ProtocolDHT}
|
||||
)
|
34
go-libp2p-kad-dht/providers/provider_set.go
Normal file
34
go-libp2p-kad-dht/providers/provider_set.go
Normal file
@ -0,0 +1,34 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// A providerSet has the list of providers and the time that they were added
|
||||
// It is used as an intermediary data struct between what is stored in the datastore
|
||||
// and the list of providers that get passed to the consumer of a .GetProviders call
|
||||
type providerSet struct {
|
||||
providers []peer.ID
|
||||
set map[peer.ID]time.Time
|
||||
}
|
||||
|
||||
func newProviderSet() *providerSet {
|
||||
return &providerSet{
|
||||
set: make(map[peer.ID]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *providerSet) Add(p peer.ID) {
|
||||
ps.setVal(p, time.Now())
|
||||
}
|
||||
|
||||
func (ps *providerSet) setVal(p peer.ID, t time.Time) {
|
||||
_, found := ps.set[p]
|
||||
if !found {
|
||||
ps.providers = append(ps.providers, p)
|
||||
}
|
||||
|
||||
ps.set[p] = t
|
||||
}
|
412
go-libp2p-kad-dht/providers/providers_manager.go
Normal file
412
go-libp2p-kad-dht/providers/providers_manager.go
Normal file
@ -0,0 +1,412 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/simplelru"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/autobatch"
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
peerstoreImpl "github.com/libp2p/go-libp2p/p2p/host/peerstore"
|
||||
"github.com/multiformats/go-base32"
|
||||
)
|
||||
|
||||
const (
|
||||
// ProvidersKeyPrefix is the prefix/namespace for ALL provider record
|
||||
// keys stored in the data store.
|
||||
ProvidersKeyPrefix = "/providers/"
|
||||
|
||||
// ProviderAddrTTL is the TTL to keep the multi addresses of provider
|
||||
// peers around. Those addresses are returned alongside provider. After
|
||||
// it expires, the returned records will require an extra lookup, to
|
||||
// find the multiaddress associated with the returned peer id.
|
||||
ProviderAddrTTL = 24 * time.Hour
|
||||
)
|
||||
|
||||
// ProvideValidity is the default time that a Provider Record should last on DHT
|
||||
// This value is also known as Provider Record Expiration Interval.
|
||||
var ProvideValidity = time.Hour * 48
|
||||
var defaultCleanupInterval = time.Hour
|
||||
var lruCacheSize = 256
|
||||
var batchBufferSize = 256
|
||||
var log = logging.Logger("providers")
|
||||
|
||||
// ProviderStore represents a store that associates peers and their addresses to keys.
|
||||
type ProviderStore interface {
|
||||
AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error
|
||||
GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error)
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// ProviderManager adds and pulls providers out of the datastore,
|
||||
// caching them in between
|
||||
type ProviderManager struct {
|
||||
self peer.ID
|
||||
// all non channel fields are meant to be accessed only within
|
||||
// the run method
|
||||
cache lru.LRUCache
|
||||
pstore peerstore.Peerstore
|
||||
dstore *autobatch.Datastore
|
||||
|
||||
newprovs chan *addProv
|
||||
getprovs chan *getProv
|
||||
|
||||
cleanupInterval time.Duration
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
var _ ProviderStore = (*ProviderManager)(nil)
|
||||
|
||||
// Option is a function that sets a provider manager option.
|
||||
type Option func(*ProviderManager) error
|
||||
|
||||
func (pm *ProviderManager) applyOptions(opts ...Option) error {
|
||||
for i, opt := range opts {
|
||||
if err := opt(pm); err != nil {
|
||||
return fmt.Errorf("provider manager option %d failed: %s", i, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanupInterval sets the time between GC runs.
|
||||
// Defaults to 1h.
|
||||
func CleanupInterval(d time.Duration) Option {
|
||||
return func(pm *ProviderManager) error {
|
||||
pm.cleanupInterval = d
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Cache sets the LRU cache implementation.
|
||||
// Defaults to a simple LRU cache.
|
||||
func Cache(c lru.LRUCache) Option {
|
||||
return func(pm *ProviderManager) error {
|
||||
pm.cache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type addProv struct {
|
||||
ctx context.Context
|
||||
key []byte
|
||||
val peer.ID
|
||||
}
|
||||
|
||||
type getProv struct {
|
||||
ctx context.Context
|
||||
key []byte
|
||||
resp chan []peer.ID
|
||||
}
|
||||
|
||||
// NewProviderManager constructor
|
||||
func NewProviderManager(local peer.ID, ps peerstore.Peerstore, dstore ds.Batching, opts ...Option) (*ProviderManager, error) {
|
||||
pm := new(ProviderManager)
|
||||
pm.self = local
|
||||
pm.getprovs = make(chan *getProv)
|
||||
pm.newprovs = make(chan *addProv)
|
||||
pm.pstore = ps
|
||||
pm.dstore = autobatch.NewAutoBatching(dstore, batchBufferSize)
|
||||
cache, err := lru.NewLRU(lruCacheSize, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pm.cache = cache
|
||||
pm.cleanupInterval = defaultCleanupInterval
|
||||
if err := pm.applyOptions(opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pm.ctx, pm.cancel = context.WithCancel(context.Background())
|
||||
pm.run()
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *ProviderManager) run() {
|
||||
pm.wg.Add(1)
|
||||
go func() {
|
||||
defer pm.wg.Done()
|
||||
|
||||
var gcQuery dsq.Results
|
||||
gcTimer := time.NewTimer(pm.cleanupInterval)
|
||||
|
||||
defer func() {
|
||||
gcTimer.Stop()
|
||||
if gcQuery != nil {
|
||||
// don't really care if this fails.
|
||||
_ = gcQuery.Close()
|
||||
}
|
||||
if err := pm.dstore.Flush(context.Background()); err != nil {
|
||||
log.Error("failed to flush datastore: ", err)
|
||||
}
|
||||
}()
|
||||
|
||||
var gcQueryRes <-chan dsq.Result
|
||||
var gcSkip map[string]struct{}
|
||||
var gcTime time.Time
|
||||
for {
|
||||
select {
|
||||
case np := <-pm.newprovs:
|
||||
err := pm.addProv(np.ctx, np.key, np.val)
|
||||
if err != nil {
|
||||
log.Error("error adding new providers: ", err)
|
||||
continue
|
||||
}
|
||||
if gcSkip != nil {
|
||||
// we have an gc, tell it to skip this provider
|
||||
// as we've updated it since the GC started.
|
||||
gcSkip[mkProvKeyFor(np.key, np.val)] = struct{}{}
|
||||
}
|
||||
case gp := <-pm.getprovs:
|
||||
provs, err := pm.getProvidersForKey(gp.ctx, gp.key)
|
||||
if err != nil && err != ds.ErrNotFound {
|
||||
log.Error("error reading providers: ", err)
|
||||
}
|
||||
|
||||
// set the cap so the user can't append to this.
|
||||
gp.resp <- provs[0:len(provs):len(provs)]
|
||||
case res, ok := <-gcQueryRes:
|
||||
if !ok {
|
||||
if err := gcQuery.Close(); err != nil {
|
||||
log.Error("failed to close provider GC query: ", err)
|
||||
}
|
||||
gcTimer.Reset(pm.cleanupInterval)
|
||||
|
||||
// cleanup GC round
|
||||
gcQueryRes = nil
|
||||
gcSkip = nil
|
||||
gcQuery = nil
|
||||
continue
|
||||
}
|
||||
if res.Error != nil {
|
||||
log.Error("got error from GC query: ", res.Error)
|
||||
continue
|
||||
}
|
||||
if _, ok := gcSkip[res.Key]; ok {
|
||||
// We've updated this record since starting the
|
||||
// GC round, skip it.
|
||||
continue
|
||||
}
|
||||
|
||||
// check expiration time
|
||||
t, err := readTimeValue(res.Value)
|
||||
switch {
|
||||
case err != nil:
|
||||
// couldn't parse the time
|
||||
log.Error("parsing providers record from disk: ", err)
|
||||
fallthrough
|
||||
case gcTime.Sub(t) > ProvideValidity:
|
||||
// or expired
|
||||
err = pm.dstore.Delete(pm.ctx, ds.RawKey(res.Key))
|
||||
if err != nil && err != ds.ErrNotFound {
|
||||
log.Error("failed to remove provider record from disk: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
case gcTime = <-gcTimer.C:
|
||||
// You know the wonderful thing about caches? You can
|
||||
// drop them.
|
||||
//
|
||||
// Much faster than GCing.
|
||||
pm.cache.Purge()
|
||||
|
||||
// Now, kick off a GC of the datastore.
|
||||
q, err := pm.dstore.Query(pm.ctx, dsq.Query{
|
||||
Prefix: ProvidersKeyPrefix,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("provider record GC query failed: ", err)
|
||||
continue
|
||||
}
|
||||
gcQuery = q
|
||||
gcQueryRes = q.Next()
|
||||
gcSkip = make(map[string]struct{})
|
||||
case <-pm.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (pm *ProviderManager) Close() error {
|
||||
pm.cancel()
|
||||
pm.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddProvider adds a provider
|
||||
func (pm *ProviderManager) AddProvider(ctx context.Context, k []byte, provInfo peer.AddrInfo) error {
|
||||
ctx, span := internal.StartSpan(ctx, "ProviderManager.AddProvider")
|
||||
defer span.End()
|
||||
|
||||
if provInfo.ID != pm.self { // don't add own addrs.
|
||||
pm.pstore.AddAddrs(provInfo.ID, provInfo.Addrs, ProviderAddrTTL)
|
||||
}
|
||||
prov := &addProv{
|
||||
ctx: ctx,
|
||||
key: k,
|
||||
val: provInfo.ID,
|
||||
}
|
||||
select {
|
||||
case pm.newprovs <- prov:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// addProv updates the cache if needed
|
||||
func (pm *ProviderManager) addProv(ctx context.Context, k []byte, p peer.ID) error {
|
||||
now := time.Now()
|
||||
if provs, ok := pm.cache.Get(string(k)); ok {
|
||||
provs.(*providerSet).setVal(p, now)
|
||||
} // else not cached, just write through
|
||||
|
||||
return writeProviderEntry(ctx, pm.dstore, k, p, now)
|
||||
}
|
||||
|
||||
// writeProviderEntry writes the provider into the datastore
|
||||
func writeProviderEntry(ctx context.Context, dstore ds.Datastore, k []byte, p peer.ID, t time.Time) error {
|
||||
dsk := mkProvKeyFor(k, p)
|
||||
|
||||
buf := make([]byte, 16)
|
||||
n := binary.PutVarint(buf, t.UnixNano())
|
||||
|
||||
return dstore.Put(ctx, ds.NewKey(dsk), buf[:n])
|
||||
}
|
||||
|
||||
func mkProvKeyFor(k []byte, p peer.ID) string {
|
||||
return mkProvKey(k) + "/" + base32.RawStdEncoding.EncodeToString([]byte(p))
|
||||
}
|
||||
|
||||
func mkProvKey(k []byte) string {
|
||||
return ProvidersKeyPrefix + base32.RawStdEncoding.EncodeToString(k)
|
||||
}
|
||||
|
||||
// GetProviders returns the set of providers for the given key.
|
||||
// This method _does not_ copy the set. Do not modify it.
|
||||
func (pm *ProviderManager) GetProviders(ctx context.Context, k []byte) ([]peer.AddrInfo, error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProviderManager.GetProviders")
|
||||
defer span.End()
|
||||
|
||||
gp := &getProv{
|
||||
ctx: ctx,
|
||||
key: k,
|
||||
resp: make(chan []peer.ID, 1), // buffered to prevent sender from blocking
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case pm.getprovs <- gp:
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case peers := <-gp.resp:
|
||||
return peerstoreImpl.PeerInfos(pm.pstore, peers), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *ProviderManager) getProvidersForKey(ctx context.Context, k []byte) ([]peer.ID, error) {
|
||||
pset, err := pm.getProviderSetForKey(ctx, k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pset.providers, nil
|
||||
}
|
||||
|
||||
// returns the ProviderSet if it already exists on cache, otherwise loads it from datasatore
|
||||
func (pm *ProviderManager) getProviderSetForKey(ctx context.Context, k []byte) (*providerSet, error) {
|
||||
cached, ok := pm.cache.Get(string(k))
|
||||
if ok {
|
||||
return cached.(*providerSet), nil
|
||||
}
|
||||
|
||||
pset, err := loadProviderSet(ctx, pm.dstore, k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(pset.providers) > 0 {
|
||||
pm.cache.Add(string(k), pset)
|
||||
}
|
||||
|
||||
return pset, nil
|
||||
}
|
||||
|
||||
// loads the ProviderSet out of the datastore
|
||||
func loadProviderSet(ctx context.Context, dstore ds.Datastore, k []byte) (*providerSet, error) {
|
||||
res, err := dstore.Query(ctx, dsq.Query{Prefix: mkProvKey(k)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Close()
|
||||
|
||||
now := time.Now()
|
||||
out := newProviderSet()
|
||||
for {
|
||||
e, ok := res.NextSync()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if e.Error != nil {
|
||||
log.Error("got an error: ", e.Error)
|
||||
continue
|
||||
}
|
||||
|
||||
// check expiration time
|
||||
t, err := readTimeValue(e.Value)
|
||||
switch {
|
||||
case err != nil:
|
||||
// couldn't parse the time
|
||||
log.Error("parsing providers record from disk: ", err)
|
||||
fallthrough
|
||||
case now.Sub(t) > ProvideValidity:
|
||||
// or just expired
|
||||
err = dstore.Delete(ctx, ds.RawKey(e.Key))
|
||||
if err != nil && err != ds.ErrNotFound {
|
||||
log.Error("failed to remove provider record from disk: ", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
lix := strings.LastIndex(e.Key, "/")
|
||||
|
||||
decstr, err := base32.RawStdEncoding.DecodeString(e.Key[lix+1:])
|
||||
if err != nil {
|
||||
log.Error("base32 decoding error: ", err)
|
||||
err = dstore.Delete(ctx, ds.RawKey(e.Key))
|
||||
if err != nil && err != ds.ErrNotFound {
|
||||
log.Error("failed to remove provider record from disk: ", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pid := peer.ID(decstr)
|
||||
|
||||
out.setVal(pid, t)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func readTimeValue(data []byte) (time.Time, error) {
|
||||
nsec, n := binary.Varint(data)
|
||||
if n <= 0 {
|
||||
return time.Time{}, fmt.Errorf("failed to parse time")
|
||||
}
|
||||
|
||||
return time.Unix(0, nsec), nil
|
||||
}
|
366
go-libp2p-kad-dht/providers/providers_manager_test.go
Normal file
366
go-libp2p-kad-dht/providers/providers_manager_test.go
Normal file
@ -0,0 +1,366 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
|
||||
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
|
||||
u "github.com/ipfs/boxo/util"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
//
|
||||
// used by TestLargeProvidersSet: do not remove
|
||||
// lds "github.com/ipfs/go-ds-leveldb"
|
||||
)
|
||||
|
||||
func TestProviderManager(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
mid := peer.ID("testing")
|
||||
ps, err := pstoremem.NewPeerstore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p, err := NewProviderManager(mid, ps, dssync.MutexWrap(ds.NewMapDatastore()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
a := u.Hash([]byte("test"))
|
||||
p.AddProvider(ctx, a, peer.AddrInfo{ID: peer.ID("testingprovider")})
|
||||
|
||||
// Not cached
|
||||
// TODO verify that cache is empty
|
||||
resp, _ := p.GetProviders(ctx, a)
|
||||
if len(resp) != 1 {
|
||||
t.Fatal("Could not retrieve provider.")
|
||||
}
|
||||
|
||||
// Cached
|
||||
// TODO verify that cache is populated
|
||||
resp, _ = p.GetProviders(ctx, a)
|
||||
if len(resp) != 1 {
|
||||
t.Fatal("Could not retrieve provider.")
|
||||
}
|
||||
|
||||
p.AddProvider(ctx, a, peer.AddrInfo{ID: peer.ID("testingprovider2")})
|
||||
p.AddProvider(ctx, a, peer.AddrInfo{ID: peer.ID("testingprovider3")})
|
||||
// TODO verify that cache is already up to date
|
||||
resp, _ = p.GetProviders(ctx, a)
|
||||
if len(resp) != 3 {
|
||||
t.Fatalf("Should have got 3 providers, got %d", len(resp))
|
||||
}
|
||||
|
||||
p.Close()
|
||||
}
|
||||
|
||||
func TestProvidersDatastore(t *testing.T) {
|
||||
old := lruCacheSize
|
||||
lruCacheSize = 10
|
||||
defer func() { lruCacheSize = old }()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
mid := peer.ID("testing")
|
||||
ps, err := pstoremem.NewPeerstore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p, err := NewProviderManager(mid, ps, dssync.MutexWrap(ds.NewMapDatastore()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer p.Close()
|
||||
|
||||
friend := peer.ID("friend")
|
||||
var mhs []mh.Multihash
|
||||
for i := 0; i < 100; i++ {
|
||||
h := u.Hash([]byte(fmt.Sprint(i)))
|
||||
mhs = append(mhs, h)
|
||||
p.AddProvider(ctx, h, peer.AddrInfo{ID: friend})
|
||||
}
|
||||
|
||||
for _, c := range mhs {
|
||||
resp, _ := p.GetProviders(ctx, c)
|
||||
if len(resp) != 1 {
|
||||
t.Fatal("Could not retrieve provider.")
|
||||
}
|
||||
if resp[0].ID != friend {
|
||||
t.Fatal("expected provider to be 'friend'")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvidersSerialization(t *testing.T) {
|
||||
dstore := dssync.MutexWrap(ds.NewMapDatastore())
|
||||
|
||||
k := u.Hash(([]byte("my key!")))
|
||||
p1 := peer.ID("peer one")
|
||||
p2 := peer.ID("peer two")
|
||||
pt1 := time.Now()
|
||||
pt2 := pt1.Add(time.Hour)
|
||||
|
||||
err := writeProviderEntry(context.Background(), dstore, k, p1, pt1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = writeProviderEntry(context.Background(), dstore, k, p2, pt2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pset, err := loadProviderSet(context.Background(), dstore, k)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
lt1, ok := pset.set[p1]
|
||||
if !ok {
|
||||
t.Fatal("failed to load set correctly")
|
||||
}
|
||||
|
||||
if !pt1.Equal(lt1) {
|
||||
t.Fatalf("time wasnt serialized correctly, %v != %v", pt1, lt1)
|
||||
}
|
||||
|
||||
lt2, ok := pset.set[p2]
|
||||
if !ok {
|
||||
t.Fatal("failed to load set correctly")
|
||||
}
|
||||
|
||||
if !pt2.Equal(lt2) {
|
||||
t.Fatalf("time wasnt serialized correctly, %v != %v", pt1, lt1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvidesExpire(t *testing.T) {
|
||||
t.Skip("This test is flaky, see https://github.com/libp2p/go-libp2p-kad-dht/issues/725.")
|
||||
|
||||
pval := ProvideValidity
|
||||
cleanup := defaultCleanupInterval
|
||||
ProvideValidity = time.Second / 2
|
||||
defaultCleanupInterval = time.Second / 2
|
||||
defer func() {
|
||||
ProvideValidity = pval
|
||||
defaultCleanupInterval = cleanup
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ds := dssync.MutexWrap(ds.NewMapDatastore())
|
||||
mid := peer.ID("testing")
|
||||
ps, err := pstoremem.NewPeerstore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p, err := NewProviderManager(mid, ps, ds)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
peers := []peer.ID{"a", "b"}
|
||||
var mhs []mh.Multihash
|
||||
for i := 0; i < 10; i++ {
|
||||
h := u.Hash([]byte(fmt.Sprint(i)))
|
||||
mhs = append(mhs, h)
|
||||
}
|
||||
|
||||
for _, h := range mhs[:5] {
|
||||
p.AddProvider(ctx, h, peer.AddrInfo{ID: peers[0]})
|
||||
p.AddProvider(ctx, h, peer.AddrInfo{ID: peers[1]})
|
||||
}
|
||||
|
||||
time.Sleep(time.Second / 4)
|
||||
|
||||
for _, h := range mhs[5:] {
|
||||
p.AddProvider(ctx, h, peer.AddrInfo{ID: peers[0]})
|
||||
p.AddProvider(ctx, h, peer.AddrInfo{ID: peers[1]})
|
||||
}
|
||||
|
||||
for _, h := range mhs {
|
||||
out, _ := p.GetProviders(ctx, h)
|
||||
if len(out) != 2 {
|
||||
t.Fatal("expected providers to still be there")
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second / 8)
|
||||
|
||||
for _, h := range mhs[:5] {
|
||||
out, _ := p.GetProviders(ctx, h)
|
||||
if len(out) > 0 {
|
||||
t.Fatal("expected providers to be cleaned up, got: ", out)
|
||||
}
|
||||
}
|
||||
|
||||
for _, h := range mhs[5:] {
|
||||
out, _ := p.GetProviders(ctx, h)
|
||||
if len(out) != 2 {
|
||||
t.Fatal("expected providers to still be there")
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(time.Second / 2)
|
||||
|
||||
// Stop to prevent data races
|
||||
p.Close()
|
||||
|
||||
if p.cache.Len() != 0 {
|
||||
t.Fatal("providers map not cleaned up")
|
||||
}
|
||||
|
||||
res, err := ds.Query(context.Background(), dsq.Query{Prefix: ProvidersKeyPrefix})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rest, err := res.Rest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rest) > 0 {
|
||||
t.Fatal("expected everything to be cleaned out of the datastore")
|
||||
}
|
||||
}
|
||||
|
||||
var _ = io.NopCloser
|
||||
var _ = os.DevNull
|
||||
|
||||
// TestLargeProvidersSet can be used for profiling.
|
||||
// The datastore can be switched to levelDB by uncommenting the section below and the import above
|
||||
func TestLargeProvidersSet(t *testing.T) {
|
||||
t.Skip("This can be used for profiling. Skipping it for now to avoid incurring extra CI time")
|
||||
old := lruCacheSize
|
||||
lruCacheSize = 10
|
||||
defer func() { lruCacheSize = old }()
|
||||
|
||||
dstore := ds.NewMapDatastore()
|
||||
|
||||
//dirn, err := os.MkdirTemp("", "provtest")
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
//
|
||||
// opts := &lds.Options{
|
||||
// NoSync: true,
|
||||
// Compression: 1,
|
||||
// }
|
||||
// lds, err := lds.NewDatastore(dirn, opts)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// dstore = lds
|
||||
//
|
||||
// defer func() {
|
||||
// os.RemoveAll(dirn)
|
||||
// }()
|
||||
|
||||
ctx := context.Background()
|
||||
var peers []peer.ID
|
||||
for i := 0; i < 3000; i++ {
|
||||
peers = append(peers, peer.ID(fmt.Sprint(i)))
|
||||
}
|
||||
|
||||
mid := peer.ID("myself")
|
||||
ps, err := pstoremem.NewPeerstore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p, err := NewProviderManager(mid, ps, dstore)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer p.Close()
|
||||
|
||||
var mhs []mh.Multihash
|
||||
for i := 0; i < 1000; i++ {
|
||||
h := u.Hash([]byte(fmt.Sprint(i)))
|
||||
mhs = append(mhs, h)
|
||||
for _, pid := range peers {
|
||||
p.AddProvider(ctx, h, peer.AddrInfo{ID: pid})
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
start := time.Now()
|
||||
for _, h := range mhs {
|
||||
_, _ = p.GetProviders(ctx, h)
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
fmt.Printf("query %f ms\n", elapsed.Seconds()*1000)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUponCacheMissProvidersAreReadFromDatastore(t *testing.T) {
|
||||
old := lruCacheSize
|
||||
lruCacheSize = 1
|
||||
defer func() { lruCacheSize = old }()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
p1, p2 := peer.ID("a"), peer.ID("b")
|
||||
h1 := u.Hash([]byte("1"))
|
||||
h2 := u.Hash([]byte("2"))
|
||||
ps, err := pstoremem.NewPeerstore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pm, err := NewProviderManager(p1, ps, dssync.MutexWrap(ds.NewMapDatastore()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// add provider
|
||||
pm.AddProvider(ctx, h1, peer.AddrInfo{ID: p1})
|
||||
// make the cached provider for h1 go to datastore
|
||||
pm.AddProvider(ctx, h2, peer.AddrInfo{ID: p1})
|
||||
// now just offloaded record should be brought back and joined with p2
|
||||
pm.AddProvider(ctx, h1, peer.AddrInfo{ID: p2})
|
||||
|
||||
h1Provs, _ := pm.GetProviders(ctx, h1)
|
||||
if len(h1Provs) != 2 {
|
||||
t.Fatalf("expected h1 to be provided by 2 peers, is by %d", len(h1Provs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteUpdatesCache(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
p1, p2 := peer.ID("a"), peer.ID("b")
|
||||
h1 := u.Hash([]byte("1"))
|
||||
ps, err := pstoremem.NewPeerstore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pm, err := NewProviderManager(p1, ps, dssync.MutexWrap(ds.NewMapDatastore()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// add provider
|
||||
pm.AddProvider(ctx, h1, peer.AddrInfo{ID: p1})
|
||||
// force into the cache
|
||||
pm.GetProviders(ctx, h1)
|
||||
// add a second provider
|
||||
pm.AddProvider(ctx, h1, peer.AddrInfo{ID: p2})
|
||||
|
||||
c1Provs, _ := pm.GetProviders(ctx, h1)
|
||||
if len(c1Provs) != 2 {
|
||||
t.Fatalf("expected h1 to be provided by 2 peers, is by %d", len(c1Provs))
|
||||
}
|
||||
}
|
159
go-libp2p-kad-dht/qpeerset/qpeerset.go
Normal file
159
go-libp2p-kad-dht/qpeerset/qpeerset.go
Normal file
@ -0,0 +1,159 @@
|
||||
package qpeerset
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"sort"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ks "github.com/whyrusleeping/go-keyspace"
|
||||
)
|
||||
|
||||
// PeerState describes the state of a peer ID during the lifecycle of an individual lookup.
|
||||
type PeerState int
|
||||
|
||||
const (
|
||||
// PeerHeard is applied to peers which have not been queried yet.
|
||||
PeerHeard PeerState = iota
|
||||
// PeerWaiting is applied to peers that are currently being queried.
|
||||
PeerWaiting
|
||||
// PeerQueried is applied to peers who have been queried and a response was retrieved successfully.
|
||||
PeerQueried
|
||||
// PeerUnreachable is applied to peers who have been queried and a response was not retrieved successfully.
|
||||
PeerUnreachable
|
||||
)
|
||||
|
||||
// QueryPeerset maintains the state of a Kademlia asynchronous lookup.
|
||||
// The lookup state is a set of peers, each labeled with a peer state.
|
||||
type QueryPeerset struct {
|
||||
// the key being searched for
|
||||
key ks.Key
|
||||
|
||||
// all known peers
|
||||
all []queryPeerState
|
||||
|
||||
// sorted is true if all is currently in sorted order
|
||||
sorted bool
|
||||
}
|
||||
|
||||
type queryPeerState struct {
|
||||
id peer.ID
|
||||
distance *big.Int
|
||||
state PeerState
|
||||
referredBy peer.ID
|
||||
}
|
||||
|
||||
type sortedQueryPeerset QueryPeerset
|
||||
|
||||
func (sqp *sortedQueryPeerset) Len() int {
|
||||
return len(sqp.all)
|
||||
}
|
||||
|
||||
func (sqp *sortedQueryPeerset) Swap(i, j int) {
|
||||
sqp.all[i], sqp.all[j] = sqp.all[j], sqp.all[i]
|
||||
}
|
||||
|
||||
func (sqp *sortedQueryPeerset) Less(i, j int) bool {
|
||||
di, dj := sqp.all[i].distance, sqp.all[j].distance
|
||||
return di.Cmp(dj) == -1
|
||||
}
|
||||
|
||||
// NewQueryPeerset creates a new empty set of peers.
|
||||
// key is the target key of the lookup that this peer set is for.
|
||||
func NewQueryPeerset(key string) *QueryPeerset {
|
||||
return &QueryPeerset{
|
||||
key: ks.XORKeySpace.Key([]byte(key)),
|
||||
all: []queryPeerState{},
|
||||
sorted: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (qp *QueryPeerset) find(p peer.ID) int {
|
||||
for i := range qp.all {
|
||||
if qp.all[i].id == p {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (qp *QueryPeerset) distanceToKey(p peer.ID) *big.Int {
|
||||
return ks.XORKeySpace.Key([]byte(p)).Distance(qp.key)
|
||||
}
|
||||
|
||||
// TryAdd adds the peer p to the peer set.
|
||||
// If the peer is already present, no action is taken.
|
||||
// Otherwise, the peer is added with state set to PeerHeard.
|
||||
// TryAdd returns true iff the peer was not already present.
|
||||
func (qp *QueryPeerset) TryAdd(p, referredBy peer.ID) bool {
|
||||
if qp.find(p) >= 0 {
|
||||
return false
|
||||
} else {
|
||||
qp.all = append(qp.all,
|
||||
queryPeerState{id: p, distance: qp.distanceToKey(p), state: PeerHeard, referredBy: referredBy})
|
||||
qp.sorted = false
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (qp *QueryPeerset) sort() {
|
||||
if qp.sorted {
|
||||
return
|
||||
}
|
||||
sort.Sort((*sortedQueryPeerset)(qp))
|
||||
qp.sorted = true
|
||||
}
|
||||
|
||||
// SetState sets the state of peer p to s.
|
||||
// If p is not in the peerset, SetState panics.
|
||||
func (qp *QueryPeerset) SetState(p peer.ID, s PeerState) {
|
||||
qp.all[qp.find(p)].state = s
|
||||
}
|
||||
|
||||
// GetState returns the state of peer p.
|
||||
// If p is not in the peerset, GetState panics.
|
||||
func (qp *QueryPeerset) GetState(p peer.ID) PeerState {
|
||||
return qp.all[qp.find(p)].state
|
||||
}
|
||||
|
||||
// GetReferrer returns the peer that referred us to the peer p.
|
||||
// If p is not in the peerset, GetReferrer panics.
|
||||
func (qp *QueryPeerset) GetReferrer(p peer.ID) peer.ID {
|
||||
return qp.all[qp.find(p)].referredBy
|
||||
}
|
||||
|
||||
// GetClosestNInStates returns the closest to the key peers, which are in one of the given states.
|
||||
// It returns n peers or less, if fewer peers meet the condition.
|
||||
// The returned peers are sorted in ascending order by their distance to the key.
|
||||
func (qp *QueryPeerset) GetClosestNInStates(n int, states ...PeerState) (result []peer.ID) {
|
||||
qp.sort()
|
||||
m := make(map[PeerState]struct{}, len(states))
|
||||
for i := range states {
|
||||
m[states[i]] = struct{}{}
|
||||
}
|
||||
|
||||
for _, p := range qp.all {
|
||||
if _, ok := m[p.state]; ok {
|
||||
result = append(result, p.id)
|
||||
}
|
||||
}
|
||||
if len(result) >= n {
|
||||
return result[:n]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetClosestInStates returns the peers, which are in one of the given states.
|
||||
// The returned peers are sorted in ascending order by their distance to the key.
|
||||
func (qp *QueryPeerset) GetClosestInStates(states ...PeerState) (result []peer.ID) {
|
||||
return qp.GetClosestNInStates(len(qp.all), states...)
|
||||
}
|
||||
|
||||
// NumHeard returns the number of peers in state PeerHeard.
|
||||
func (qp *QueryPeerset) NumHeard() int {
|
||||
return len(qp.GetClosestInStates(PeerHeard))
|
||||
}
|
||||
|
||||
// NumWaiting returns the number of peers in state PeerWaiting.
|
||||
func (qp *QueryPeerset) NumWaiting() int {
|
||||
return len(qp.GetClosestInStates(PeerWaiting))
|
||||
}
|
86
go-libp2p-kad-dht/qpeerset/qpeerset_test.go
Normal file
86
go-libp2p-kad-dht/qpeerset/qpeerset_test.go
Normal file
@ -0,0 +1,86 @@
|
||||
package qpeerset
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/test"
|
||||
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestQPeerSet(t *testing.T) {
|
||||
key := "test"
|
||||
qp := NewQueryPeerset(key)
|
||||
|
||||
// -----------------Ordering between peers for the Test -----
|
||||
// KEY < peer3 < peer1 < peer4 < peer2
|
||||
// ----------------------------------------------------------
|
||||
peer2 := test.RandPeerIDFatal(t)
|
||||
var peer4 peer.ID
|
||||
for {
|
||||
peer4 = test.RandPeerIDFatal(t)
|
||||
if kb.Closer(peer4, peer2, key) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var peer1 peer.ID
|
||||
for {
|
||||
peer1 = test.RandPeerIDFatal(t)
|
||||
if kb.Closer(peer1, peer4, key) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var peer3 peer.ID
|
||||
for {
|
||||
peer3 = test.RandPeerIDFatal(t)
|
||||
if kb.Closer(peer3, peer1, key) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
oracle := test.RandPeerIDFatal(t)
|
||||
|
||||
// find fails
|
||||
require.Equal(t, -1, qp.find(peer2))
|
||||
|
||||
// add peer2,assert state & then another add fails
|
||||
require.True(t, qp.TryAdd(peer2, oracle))
|
||||
require.Equal(t, PeerHeard, qp.GetState(peer2))
|
||||
require.False(t, qp.TryAdd(peer2, oracle))
|
||||
require.Equal(t, 0, qp.NumWaiting())
|
||||
|
||||
// add peer4
|
||||
require.True(t, qp.TryAdd(peer4, oracle))
|
||||
cl := qp.GetClosestNInStates(2, PeerHeard, PeerWaiting, PeerQueried)
|
||||
require.Equal(t, []peer.ID{peer4, peer2}, cl)
|
||||
cl = qp.GetClosestNInStates(3, PeerHeard, PeerWaiting, PeerQueried)
|
||||
require.Equal(t, []peer.ID{peer4, peer2}, cl)
|
||||
cl = qp.GetClosestNInStates(1, PeerHeard, PeerWaiting, PeerQueried)
|
||||
require.Equal(t, []peer.ID{peer4}, cl)
|
||||
|
||||
// mark as unreachable & try to get it
|
||||
qp.SetState(peer4, PeerUnreachable)
|
||||
cl = qp.GetClosestNInStates(1, PeerHeard, PeerWaiting, PeerQueried)
|
||||
require.Equal(t, []peer.ID{peer2}, cl)
|
||||
|
||||
// add peer1
|
||||
require.True(t, qp.TryAdd(peer1, oracle))
|
||||
cl = qp.GetClosestNInStates(1, PeerHeard, PeerWaiting, PeerQueried)
|
||||
require.Equal(t, []peer.ID{peer1}, cl)
|
||||
cl = qp.GetClosestNInStates(2, PeerHeard, PeerWaiting, PeerQueried)
|
||||
require.Equal(t, []peer.ID{peer1, peer2}, cl)
|
||||
|
||||
// mark as waiting and assert
|
||||
qp.SetState(peer2, PeerWaiting)
|
||||
require.Equal(t, []peer.ID{peer2}, qp.GetClosestInStates(PeerWaiting))
|
||||
|
||||
require.Equal(t, []peer.ID{peer1}, qp.GetClosestInStates(PeerHeard))
|
||||
require.True(t, qp.TryAdd(peer3, oracle))
|
||||
require.Equal(t, []peer.ID{peer3, peer1}, qp.GetClosestInStates(PeerHeard))
|
||||
require.Equal(t, 2, qp.NumHeard())
|
||||
}
|
556
go-libp2p-kad-dht/query.go
Normal file
556
go-libp2p-kad-dht/query.go
Normal file
@ -0,0 +1,556 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/qpeerset"
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
)
|
||||
|
||||
// ErrNoPeersQueried is returned when we failed to connect to any peers.
|
||||
var ErrNoPeersQueried = errors.New("failed to query any peers")
|
||||
|
||||
type queryFn func(context.Context, peer.ID) ([]*peer.AddrInfo, error)
|
||||
type stopFn func(*qpeerset.QueryPeerset) bool
|
||||
|
||||
// query represents a single DHT query.
|
||||
type query struct {
|
||||
// unique identifier for the lookup instance
|
||||
id uuid.UUID
|
||||
|
||||
// target key for the lookup
|
||||
key string
|
||||
|
||||
// the query context.
|
||||
ctx context.Context
|
||||
|
||||
dht *IpfsDHT
|
||||
|
||||
// seedPeers is the set of peers that seed the query
|
||||
seedPeers []peer.ID
|
||||
|
||||
// peerTimes contains the duration of each successful query to a peer
|
||||
peerTimes map[peer.ID]time.Duration
|
||||
|
||||
// queryPeers is the set of peers known by this query and their respective states.
|
||||
queryPeers *qpeerset.QueryPeerset
|
||||
|
||||
// terminated is set when the first worker thread encounters the termination condition.
|
||||
// Its role is to make sure that once termination is determined, it is sticky.
|
||||
terminated bool
|
||||
|
||||
// waitGroup ensures lookup does not end until all query goroutines complete.
|
||||
waitGroup sync.WaitGroup
|
||||
|
||||
// the function that will be used to query a single peer.
|
||||
queryFn queryFn
|
||||
|
||||
// stopFn is used to determine if we should stop the WHOLE disjoint query.
|
||||
stopFn stopFn
|
||||
}
|
||||
|
||||
type lookupWithFollowupResult struct {
|
||||
peers []peer.ID // the top K not unreachable peers at the end of the query
|
||||
state []qpeerset.PeerState // the peer states at the end of the query of the peers slice (not closest)
|
||||
closest []peer.ID // the top K peers at the end of the query
|
||||
|
||||
// indicates that neither the lookup nor the followup has been prematurely terminated by an external condition such
|
||||
// as context cancellation or the stop function being called.
|
||||
completed bool
|
||||
}
|
||||
|
||||
// runLookupWithFollowup executes the lookup on the target using the given query function and stopping when either the
|
||||
// context is cancelled or the stop function returns true. Note: if the stop function is not sticky, i.e. it does not
|
||||
// return true every time after the first time it returns true, it is not guaranteed to cause a stop to occur just
|
||||
// because it momentarily returns true.
|
||||
//
|
||||
// After the lookup is complete the query function is run (unless stopped) against all of the top K peers from the
|
||||
// lookup that have not already been successfully queried.
|
||||
func (dht *IpfsDHT) runLookupWithFollowup(ctx context.Context, target string, queryFn queryFn, stopFn stopFn) (*lookupWithFollowupResult, error) {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.RunLookupWithFollowup", trace.WithAttributes(internal.KeyAsAttribute("Target", target)))
|
||||
defer span.End()
|
||||
|
||||
// run the query
|
||||
lookupRes, qps, err := dht.runQuery(ctx, target, queryFn, stopFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// query all of the top K peers we've either Heard about or have outstanding queries we're Waiting on.
|
||||
// This ensures that all of the top K results have been queried which adds to resiliency against churn for query
|
||||
// functions that carry state (e.g. FindProviders and GetValue) as well as establish connections that are needed
|
||||
// by stateless query functions (e.g. GetClosestPeers and therefore Provide and PutValue)
|
||||
queryPeers := make([]peer.ID, 0, len(lookupRes.peers))
|
||||
for i, p := range lookupRes.peers {
|
||||
if state := lookupRes.state[i]; state == qpeerset.PeerHeard || state == qpeerset.PeerWaiting {
|
||||
queryPeers = append(queryPeers, p)
|
||||
}
|
||||
}
|
||||
|
||||
if len(queryPeers) == 0 {
|
||||
return lookupRes, nil
|
||||
}
|
||||
|
||||
// return if the lookup has been externally stopped
|
||||
if ctx.Err() != nil || stopFn(qps) {
|
||||
lookupRes.completed = false
|
||||
return lookupRes, nil
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{}, len(queryPeers))
|
||||
followUpCtx, cancelFollowUp := context.WithCancel(ctx)
|
||||
defer cancelFollowUp()
|
||||
for _, p := range queryPeers {
|
||||
qp := p
|
||||
go func() {
|
||||
_, _ = queryFn(followUpCtx, qp)
|
||||
doneCh <- struct{}{}
|
||||
}()
|
||||
}
|
||||
|
||||
// wait for all queries to complete before returning, aborting ongoing queries if we've been externally stopped
|
||||
followupsCompleted := 0
|
||||
processFollowUp:
|
||||
for i := 0; i < len(queryPeers); i++ {
|
||||
select {
|
||||
case <-doneCh:
|
||||
followupsCompleted++
|
||||
if stopFn(qps) {
|
||||
cancelFollowUp()
|
||||
if i < len(queryPeers)-1 {
|
||||
lookupRes.completed = false
|
||||
}
|
||||
break processFollowUp
|
||||
}
|
||||
case <-ctx.Done():
|
||||
lookupRes.completed = false
|
||||
cancelFollowUp()
|
||||
break processFollowUp
|
||||
}
|
||||
}
|
||||
|
||||
if !lookupRes.completed {
|
||||
for i := followupsCompleted; i < len(queryPeers); i++ {
|
||||
<-doneCh
|
||||
}
|
||||
}
|
||||
|
||||
return lookupRes, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) runQuery(ctx context.Context, target string, queryFn queryFn, stopFn stopFn) (*lookupWithFollowupResult, *qpeerset.QueryPeerset, error) {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.RunQuery")
|
||||
defer span.End()
|
||||
|
||||
// pick the K closest peers to the key in our Routing table.
|
||||
targetKadID := kb.ConvertKey(target)
|
||||
seedPeers := dht.routingTable.NearestPeers(targetKadID, dht.bucketSize)
|
||||
if len(seedPeers) == 0 {
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.QueryError,
|
||||
Extra: kb.ErrLookupFailure.Error(),
|
||||
})
|
||||
return nil, nil, kb.ErrLookupFailure
|
||||
}
|
||||
|
||||
q := &query{
|
||||
id: uuid.New(),
|
||||
key: target,
|
||||
ctx: ctx,
|
||||
dht: dht,
|
||||
queryPeers: qpeerset.NewQueryPeerset(target),
|
||||
seedPeers: seedPeers,
|
||||
peerTimes: make(map[peer.ID]time.Duration),
|
||||
terminated: false,
|
||||
queryFn: queryFn,
|
||||
stopFn: stopFn,
|
||||
}
|
||||
|
||||
// run the query
|
||||
q.run()
|
||||
|
||||
if ctx.Err() == nil {
|
||||
q.recordValuablePeers()
|
||||
}
|
||||
|
||||
res := q.constructLookupResult(targetKadID)
|
||||
return res, q.queryPeers, nil
|
||||
}
|
||||
|
||||
func (q *query) recordPeerIsValuable(p peer.ID) {
|
||||
if !q.dht.routingTable.UpdateLastUsefulAt(p, time.Now()) {
|
||||
// not in routing table
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (q *query) recordValuablePeers() {
|
||||
// Valuable peers algorithm:
|
||||
// Label the seed peer that responded to a query in the shortest amount of time as the "most valuable peer" (MVP)
|
||||
// Each seed peer that responded to a query within some range (i.e. 2x) of the MVP's time is a valuable peer
|
||||
// Mark the MVP and all the other valuable peers as valuable
|
||||
mvpDuration := time.Duration(math.MaxInt64)
|
||||
for _, p := range q.seedPeers {
|
||||
if queryTime, ok := q.peerTimes[p]; ok && queryTime < mvpDuration {
|
||||
mvpDuration = queryTime
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range q.seedPeers {
|
||||
if queryTime, ok := q.peerTimes[p]; ok && queryTime < mvpDuration*2 {
|
||||
q.recordPeerIsValuable(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// constructLookupResult takes the query information and uses it to construct the lookup result
|
||||
func (q *query) constructLookupResult(target kb.ID) *lookupWithFollowupResult {
|
||||
// determine if the query terminated early
|
||||
completed := true
|
||||
|
||||
// Lookup and starvation are both valid ways for a lookup to complete. (Starvation does not imply failure.)
|
||||
// Lookup termination (as defined in isLookupTermination) is not possible in small networks.
|
||||
// Starvation is a successful query termination in small networks.
|
||||
if !(q.isLookupTermination() || q.isStarvationTermination()) {
|
||||
completed = false
|
||||
}
|
||||
|
||||
// extract the top K not unreachable peers
|
||||
var peers []peer.ID
|
||||
peerState := make(map[peer.ID]qpeerset.PeerState)
|
||||
qp := q.queryPeers.GetClosestNInStates(q.dht.bucketSize, qpeerset.PeerHeard, qpeerset.PeerWaiting, qpeerset.PeerQueried)
|
||||
for _, p := range qp {
|
||||
state := q.queryPeers.GetState(p)
|
||||
peerState[p] = state
|
||||
peers = append(peers, p)
|
||||
}
|
||||
|
||||
// get the top K overall peers
|
||||
sortedPeers := kb.SortClosestPeers(peers, target)
|
||||
if len(sortedPeers) > q.dht.bucketSize {
|
||||
sortedPeers = sortedPeers[:q.dht.bucketSize]
|
||||
}
|
||||
|
||||
closest := q.queryPeers.GetClosestNInStates(q.dht.bucketSize, qpeerset.PeerHeard, qpeerset.PeerWaiting, qpeerset.PeerQueried, qpeerset.PeerUnreachable)
|
||||
|
||||
// return the top K not unreachable peers as well as their states at the end of the query
|
||||
res := &lookupWithFollowupResult{
|
||||
peers: sortedPeers,
|
||||
state: make([]qpeerset.PeerState, len(sortedPeers)),
|
||||
completed: completed,
|
||||
closest: closest,
|
||||
}
|
||||
|
||||
for i, p := range sortedPeers {
|
||||
res.state[i] = peerState[p]
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
type queryUpdate struct {
|
||||
cause peer.ID
|
||||
queried []peer.ID
|
||||
heard []peer.ID
|
||||
unreachable []peer.ID
|
||||
|
||||
queryDuration time.Duration
|
||||
}
|
||||
|
||||
func (q *query) run() {
|
||||
ctx, span := internal.StartSpan(q.ctx, "IpfsDHT.Query.Run")
|
||||
defer span.End()
|
||||
|
||||
pathCtx, cancelPath := context.WithCancel(ctx)
|
||||
defer cancelPath()
|
||||
|
||||
alpha := q.dht.alpha
|
||||
|
||||
ch := make(chan *queryUpdate, alpha)
|
||||
ch <- &queryUpdate{cause: q.dht.self, heard: q.seedPeers}
|
||||
|
||||
// return only once all outstanding queries have completed.
|
||||
defer q.waitGroup.Wait()
|
||||
for {
|
||||
var cause peer.ID
|
||||
select {
|
||||
case update := <-ch:
|
||||
q.updateState(pathCtx, update)
|
||||
cause = update.cause
|
||||
case <-pathCtx.Done():
|
||||
q.terminate(pathCtx, cancelPath, LookupCancelled)
|
||||
}
|
||||
|
||||
// calculate the maximum number of queries we could be spawning.
|
||||
// Note: NumWaiting will be updated in spawnQuery
|
||||
maxNumQueriesToSpawn := alpha - q.queryPeers.NumWaiting()
|
||||
|
||||
// termination is triggered on end-of-lookup conditions or starvation of unused peers
|
||||
// it also returns the peers we should query next for a maximum of `maxNumQueriesToSpawn` peers.
|
||||
ready, reason, qPeers := q.isReadyToTerminate(pathCtx, maxNumQueriesToSpawn)
|
||||
if ready {
|
||||
q.terminate(pathCtx, cancelPath, reason)
|
||||
}
|
||||
|
||||
if q.terminated {
|
||||
return
|
||||
}
|
||||
|
||||
// try spawning the queries, if there are no available peers to query then we won't spawn them
|
||||
for _, p := range qPeers {
|
||||
q.spawnQuery(pathCtx, cause, p, ch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// spawnQuery starts one query, if an available heard peer is found
|
||||
func (q *query) spawnQuery(ctx context.Context, cause peer.ID, queryPeer peer.ID, ch chan<- *queryUpdate) {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.SpawnQuery", trace.WithAttributes(
|
||||
attribute.String("Cause", cause.String()),
|
||||
attribute.String("QueryPeer", queryPeer.String()),
|
||||
))
|
||||
defer span.End()
|
||||
|
||||
PublishLookupEvent(ctx,
|
||||
NewLookupEvent(
|
||||
q.dht.self,
|
||||
q.id,
|
||||
q.key,
|
||||
NewLookupUpdateEvent(
|
||||
cause,
|
||||
q.queryPeers.GetReferrer(queryPeer),
|
||||
nil, // heard
|
||||
[]peer.ID{queryPeer}, // waiting
|
||||
nil, // queried
|
||||
nil, // unreachable
|
||||
),
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
)
|
||||
q.queryPeers.SetState(queryPeer, qpeerset.PeerWaiting)
|
||||
q.waitGroup.Add(1)
|
||||
go q.queryPeer(ctx, ch, queryPeer)
|
||||
}
|
||||
|
||||
func (q *query) isReadyToTerminate(ctx context.Context, nPeersToQuery int) (bool, LookupTerminationReason, []peer.ID) {
|
||||
// give the application logic a chance to terminate
|
||||
if q.stopFn(q.queryPeers) {
|
||||
return true, LookupStopped, nil
|
||||
}
|
||||
if q.isStarvationTermination() {
|
||||
return true, LookupStarvation, nil
|
||||
}
|
||||
if q.isLookupTermination() {
|
||||
return true, LookupCompleted, nil
|
||||
}
|
||||
|
||||
// The peers we query next should be ones that we have only Heard about.
|
||||
var peersToQuery []peer.ID
|
||||
peers := q.queryPeers.GetClosestInStates(qpeerset.PeerHeard)
|
||||
count := 0
|
||||
for _, p := range peers {
|
||||
peersToQuery = append(peersToQuery, p)
|
||||
count++
|
||||
if count == nPeersToQuery {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return false, -1, peersToQuery
|
||||
}
|
||||
|
||||
// From the set of all nodes that are not unreachable,
|
||||
// if the closest beta nodes are all queried, the lookup can terminate.
|
||||
func (q *query) isLookupTermination() bool {
|
||||
peers := q.queryPeers.GetClosestNInStates(q.dht.beta, qpeerset.PeerHeard, qpeerset.PeerWaiting, qpeerset.PeerQueried)
|
||||
for _, p := range peers {
|
||||
if q.queryPeers.GetState(p) != qpeerset.PeerQueried {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (q *query) isStarvationTermination() bool {
|
||||
return q.queryPeers.NumHeard() == 0 && q.queryPeers.NumWaiting() == 0
|
||||
}
|
||||
|
||||
func (q *query) terminate(ctx context.Context, cancel context.CancelFunc, reason LookupTerminationReason) {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.Query.Terminate", trace.WithAttributes(attribute.Stringer("Reason", reason)))
|
||||
defer span.End()
|
||||
|
||||
if q.terminated {
|
||||
return
|
||||
}
|
||||
|
||||
PublishLookupEvent(ctx,
|
||||
NewLookupEvent(
|
||||
q.dht.self,
|
||||
q.id,
|
||||
q.key,
|
||||
nil,
|
||||
nil,
|
||||
NewLookupTerminateEvent(reason),
|
||||
),
|
||||
)
|
||||
cancel() // abort outstanding queries
|
||||
q.terminated = true
|
||||
}
|
||||
|
||||
// queryPeer queries a single peer and reports its findings on the channel.
|
||||
// queryPeer does not access the query state in queryPeers!
|
||||
func (q *query) queryPeer(ctx context.Context, ch chan<- *queryUpdate, p peer.ID) {
|
||||
defer q.waitGroup.Done()
|
||||
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.QueryPeer")
|
||||
defer span.End()
|
||||
|
||||
dialCtx, queryCtx := ctx, ctx
|
||||
|
||||
// dial the peer
|
||||
if err := q.dht.dialPeer(dialCtx, p); err != nil {
|
||||
// remove the peer if there was a dial failure..but not because of a context cancellation
|
||||
if dialCtx.Err() == nil {
|
||||
q.dht.peerStoppedDHT(p)
|
||||
}
|
||||
ch <- &queryUpdate{cause: p, unreachable: []peer.ID{p}}
|
||||
return
|
||||
}
|
||||
|
||||
startQuery := time.Now()
|
||||
// send query RPC to the remote peer
|
||||
newPeers, err := q.queryFn(queryCtx, p)
|
||||
if err != nil {
|
||||
if queryCtx.Err() == nil {
|
||||
q.dht.peerStoppedDHT(p)
|
||||
}
|
||||
ch <- &queryUpdate{cause: p, unreachable: []peer.ID{p}}
|
||||
return
|
||||
}
|
||||
|
||||
queryDuration := time.Since(startQuery)
|
||||
|
||||
// query successful, try to add to RT
|
||||
q.dht.validPeerFound(p)
|
||||
|
||||
// process new peers
|
||||
saw := []peer.ID{}
|
||||
for _, next := range newPeers {
|
||||
if next.ID == q.dht.self { // don't add self.
|
||||
logger.Debugf("PEERS CLOSER -- worker for: %v found self", p)
|
||||
continue
|
||||
}
|
||||
|
||||
// add any other know addresses for the candidate peer.
|
||||
curInfo := q.dht.peerstore.PeerInfo(next.ID)
|
||||
next.Addrs = append(next.Addrs, curInfo.Addrs...)
|
||||
|
||||
// add their addresses to the dialer's peerstore
|
||||
//
|
||||
// add the next peer to the query if matches the query target even if it would otherwise fail the query filter
|
||||
// TODO: this behavior is really specific to how FindPeer works and not GetClosestPeers or any other function
|
||||
isTarget := string(next.ID) == q.key
|
||||
if isTarget || q.dht.queryPeerFilter(q.dht, *next) {
|
||||
q.dht.maybeAddAddrs(next.ID, next.Addrs, pstore.TempAddrTTL)
|
||||
saw = append(saw, next.ID)
|
||||
}
|
||||
}
|
||||
|
||||
ch <- &queryUpdate{cause: p, heard: saw, queried: []peer.ID{p}, queryDuration: queryDuration}
|
||||
}
|
||||
|
||||
func (q *query) updateState(ctx context.Context, up *queryUpdate) {
|
||||
if q.terminated {
|
||||
panic("update should not be invoked after the logical lookup termination")
|
||||
}
|
||||
PublishLookupEvent(ctx,
|
||||
NewLookupEvent(
|
||||
q.dht.self,
|
||||
q.id,
|
||||
q.key,
|
||||
nil,
|
||||
NewLookupUpdateEvent(
|
||||
up.cause,
|
||||
up.cause,
|
||||
up.heard, // heard
|
||||
nil, // waiting
|
||||
up.queried, // queried
|
||||
up.unreachable, // unreachable
|
||||
),
|
||||
nil,
|
||||
),
|
||||
)
|
||||
for _, p := range up.heard {
|
||||
if p == q.dht.self { // don't add self.
|
||||
continue
|
||||
}
|
||||
q.queryPeers.TryAdd(p, up.cause)
|
||||
}
|
||||
for _, p := range up.queried {
|
||||
if p == q.dht.self { // don't add self.
|
||||
continue
|
||||
}
|
||||
if st := q.queryPeers.GetState(p); st == qpeerset.PeerWaiting {
|
||||
q.queryPeers.SetState(p, qpeerset.PeerQueried)
|
||||
q.peerTimes[p] = up.queryDuration
|
||||
} else {
|
||||
panic(fmt.Errorf("kademlia protocol error: tried to transition to the queried state from state %v", st))
|
||||
}
|
||||
}
|
||||
for _, p := range up.unreachable {
|
||||
if p == q.dht.self { // don't add self.
|
||||
continue
|
||||
}
|
||||
|
||||
if st := q.queryPeers.GetState(p); st == qpeerset.PeerWaiting {
|
||||
q.queryPeers.SetState(p, qpeerset.PeerUnreachable)
|
||||
} else {
|
||||
panic(fmt.Errorf("kademlia protocol error: tried to transition to the unreachable state from state %v", st))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) dialPeer(ctx context.Context, p peer.ID) error {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.DialPeer", trace.WithAttributes(attribute.String("PeerID", p.String())))
|
||||
defer span.End()
|
||||
|
||||
// short-circuit if we're already connected.
|
||||
if dht.host.Network().Connectedness(p) == network.Connected {
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Debug("not connected. dialing.")
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.DialingPeer,
|
||||
ID: p,
|
||||
})
|
||||
|
||||
pi := peer.AddrInfo{ID: p}
|
||||
if err := dht.host.Connect(ctx, pi); err != nil {
|
||||
logger.Debugf("error connecting: %s", err)
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.QueryError,
|
||||
Extra: err.Error(),
|
||||
ID: p,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
logger.Debugf("connected. dial success.")
|
||||
return nil
|
||||
}
|
118
go-libp2p-kad-dht/query_test.go
Normal file
118
go-libp2p-kad-dht/query_test.go
Normal file
@ -0,0 +1,118 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
tu "github.com/libp2p/go-libp2p-testing/etc"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TODO Debug test failures due to timing issue on windows
|
||||
// Tests are timing dependent as can be seen in the 2 seconds timed context that we use in "tu.WaitFor".
|
||||
// While the tests work fine on OSX and complete in under a second,
|
||||
// they repeatedly fail to complete in the stipulated time on Windows.
|
||||
// However, increasing the timeout makes them pass on Windows.
|
||||
|
||||
func TestRTEvictionOnFailedQuery(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer cancel()
|
||||
|
||||
d1 := setupDHT(ctx, t, false)
|
||||
d2 := setupDHT(ctx, t, false)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
connect(t, ctx, d1, d2)
|
||||
for _, conn := range d1.host.Network().ConnsToPeer(d2.self) {
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// peers should be in the RT because of fixLowPeers
|
||||
require.NoError(t, tu.WaitFor(ctx, func() error {
|
||||
if !checkRoutingTable(d1, d2) {
|
||||
return fmt.Errorf("should have routes")
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
|
||||
// close both hosts so query fails
|
||||
require.NoError(t, d1.host.Close())
|
||||
require.NoError(t, d2.host.Close())
|
||||
// peers will still be in the RT because we have decoupled membership from connectivity
|
||||
require.NoError(t, tu.WaitFor(ctx, func() error {
|
||||
if !checkRoutingTable(d1, d2) {
|
||||
return fmt.Errorf("should have routes")
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
|
||||
// failed queries should remove the peers from the RT
|
||||
_, err := d1.GetClosestPeers(ctx, "test")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = d2.GetClosestPeers(ctx, "test")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, tu.WaitFor(ctx, func() error {
|
||||
if checkRoutingTable(d1, d2) {
|
||||
return fmt.Errorf("should not have routes")
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
|
||||
func TestRTAdditionOnSuccessfulQuery(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer cancel()
|
||||
|
||||
d1 := setupDHT(ctx, t, false)
|
||||
d2 := setupDHT(ctx, t, false)
|
||||
d3 := setupDHT(ctx, t, false)
|
||||
|
||||
connect(t, ctx, d1, d2)
|
||||
connect(t, ctx, d2, d3)
|
||||
// validate RT states
|
||||
|
||||
// d1 has d2
|
||||
require.NoError(t, tu.WaitFor(ctx, func() error {
|
||||
if !checkRoutingTable(d1, d2) {
|
||||
return fmt.Errorf("should have routes")
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
// d2 has d3
|
||||
require.NoError(t, tu.WaitFor(ctx, func() error {
|
||||
if !checkRoutingTable(d2, d3) {
|
||||
return fmt.Errorf("should have routes")
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
|
||||
// however, d1 does not know about d3
|
||||
require.NoError(t, tu.WaitFor(ctx, func() error {
|
||||
if checkRoutingTable(d1, d3) {
|
||||
return fmt.Errorf("should not have routes")
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
|
||||
// but when d3 queries d2, d1 and d3 discover each other
|
||||
_, err := d3.GetClosestPeers(ctx, "something")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, tu.WaitFor(ctx, func() error {
|
||||
if !checkRoutingTable(d1, d3) {
|
||||
return fmt.Errorf("should have routes")
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
|
||||
func checkRoutingTable(a, b *IpfsDHT) bool {
|
||||
// loop until connection notification has been received.
|
||||
// under high load, this may not happen as immediately as we would like.
|
||||
return a.routingTable.Find(b.self) != "" && b.routingTable.Find(a.self) != ""
|
||||
}
|
138
go-libp2p-kad-dht/records.go
Normal file
138
go-libp2p-kad-dht/records.go
Normal file
@ -0,0 +1,138 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
ci "github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type pubkrs struct {
|
||||
pubk ci.PubKey
|
||||
err error
|
||||
}
|
||||
|
||||
// GetPublicKey gets the public key when given a Peer ID. It will extract from
|
||||
// the Peer ID if inlined or ask the node it belongs to or ask the DHT.
|
||||
func (dht *IpfsDHT) GetPublicKey(ctx context.Context, p peer.ID) (ci.PubKey, error) {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.GetPublicKey", trace.WithAttributes(attribute.Stringer("PeerID", p)))
|
||||
defer span.End()
|
||||
|
||||
if !dht.enableValues {
|
||||
return nil, routing.ErrNotSupported
|
||||
}
|
||||
|
||||
logger.Debugf("getPublicKey for: %s", p)
|
||||
|
||||
// Check locally. Will also try to extract the public key from the peer
|
||||
// ID itself if possible (if inlined).
|
||||
pk := dht.peerstore.PubKey(p)
|
||||
if pk != nil {
|
||||
return pk, nil
|
||||
}
|
||||
|
||||
// Try getting the public key both directly from the node it identifies
|
||||
// and from the DHT, in parallel
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
resp := make(chan pubkrs, 2)
|
||||
go func() {
|
||||
pubk, err := dht.getPublicKeyFromNode(ctx, p)
|
||||
resp <- pubkrs{pubk, err}
|
||||
}()
|
||||
|
||||
// Note that the number of open connections is capped by the dial
|
||||
// limiter, so there is a chance that getPublicKeyFromDHT(), which
|
||||
// potentially opens a lot of connections, will block
|
||||
// getPublicKeyFromNode() from getting a connection.
|
||||
// Currently this doesn't seem to cause an issue so leaving as is
|
||||
// for now.
|
||||
go func() {
|
||||
pubk, err := dht.getPublicKeyFromDHT(ctx, p)
|
||||
resp <- pubkrs{pubk, err}
|
||||
}()
|
||||
|
||||
// Wait for one of the two go routines to return
|
||||
// a public key (or for both to error out)
|
||||
var err error
|
||||
for i := 0; i < 2; i++ {
|
||||
r := <-resp
|
||||
if r.err == nil {
|
||||
// Found the public key
|
||||
err := dht.peerstore.AddPubKey(p, r.pubk)
|
||||
if err != nil {
|
||||
logger.Errorw("failed to add public key to peerstore", "peer", p)
|
||||
}
|
||||
return r.pubk, nil
|
||||
}
|
||||
err = r.err
|
||||
}
|
||||
|
||||
// Both go routines failed to find a public key
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) getPublicKeyFromDHT(ctx context.Context, p peer.ID) (ci.PubKey, error) {
|
||||
// Only retrieve one value, because the public key is immutable
|
||||
// so there's no need to retrieve multiple versions
|
||||
pkkey := routing.KeyForPublicKey(p)
|
||||
val, err := dht.GetValue(ctx, pkkey, Quorum(1))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubk, err := ci.UnmarshalPublicKey(val)
|
||||
if err != nil {
|
||||
logger.Errorf("Could not unmarshal public key retrieved from DHT for %v", p)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Note: No need to check that public key hash matches peer ID
|
||||
// because this is done by GetValues()
|
||||
logger.Debugf("Got public key for %s from DHT", p)
|
||||
return pubk, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) getPublicKeyFromNode(ctx context.Context, p peer.ID) (ci.PubKey, error) {
|
||||
// check locally, just in case...
|
||||
pk := dht.peerstore.PubKey(p)
|
||||
if pk != nil {
|
||||
return pk, nil
|
||||
}
|
||||
|
||||
// Get the key from the node itself
|
||||
pkkey := routing.KeyForPublicKey(p)
|
||||
record, _, err := dht.protoMessenger.GetValue(ctx, p, pkkey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// node doesn't have key :(
|
||||
if record == nil {
|
||||
return nil, fmt.Errorf("node %v not responding with its public key", p)
|
||||
}
|
||||
|
||||
pubk, err := ci.UnmarshalPublicKey(record.GetValue())
|
||||
if err != nil {
|
||||
logger.Errorf("Could not unmarshal public key for %v", p)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make sure the public key matches the peer ID
|
||||
id, err := peer.IDFromPublicKey(pubk)
|
||||
if err != nil {
|
||||
logger.Errorf("Could not extract peer id from public key for %v", p)
|
||||
return nil, err
|
||||
}
|
||||
if id != p {
|
||||
return nil, fmt.Errorf("public key %v does not match peer %v", id, p)
|
||||
}
|
||||
|
||||
logger.Debugf("Got public key from node %v itself", p)
|
||||
return pubk, nil
|
||||
}
|
384
go-libp2p-kad-dht/records_test.go
Normal file
384
go-libp2p-kad-dht/records_test.go
Normal file
@ -0,0 +1,384 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/test"
|
||||
|
||||
u "github.com/ipfs/boxo/util"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
tnet "github.com/libp2p/go-libp2p-testing/net"
|
||||
ci "github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
)
|
||||
|
||||
// Check that GetPublicKey() correctly extracts a public key
|
||||
func TestPubkeyExtract(t *testing.T) {
|
||||
t.Skip("public key extraction for ed25519 keys has been disabled. See https://github.com/libp2p/specs/issues/111")
|
||||
ctx := context.Background()
|
||||
dht := setupDHT(ctx, t, false)
|
||||
defer dht.Close()
|
||||
|
||||
_, pk, err := ci.GenerateEd25519Key(rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pid, err := peer.IDFromPublicKey(pk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pkOut, err := dht.GetPublicKey(context.Background(), pid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !pkOut.Equals(pk) {
|
||||
t.Fatal("got incorrect public key out")
|
||||
}
|
||||
}
|
||||
|
||||
// Check that GetPublicKey() correctly retrieves a public key from the peerstore
|
||||
func TestPubkeyPeerstore(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
dht := setupDHT(ctx, t, false)
|
||||
|
||||
identity := tnet.RandIdentityOrFatal(t)
|
||||
err := dht.peerstore.AddPubKey(identity.ID(), identity.PublicKey())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rpubk, err := dht.GetPublicKey(context.Background(), identity.ID())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !identity.PublicKey().Equals(rpubk) {
|
||||
t.Fatal("got incorrect public key")
|
||||
}
|
||||
}
|
||||
|
||||
// Check that GetPublicKey() correctly retrieves a public key directly
|
||||
// from the node it identifies
|
||||
func TestPubkeyDirectFromNode(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
dhtA := setupDHT(ctx, t, false)
|
||||
dhtB := setupDHT(ctx, t, false)
|
||||
|
||||
defer dhtA.Close()
|
||||
defer dhtB.Close()
|
||||
defer dhtA.host.Close()
|
||||
defer dhtB.host.Close()
|
||||
|
||||
connect(t, ctx, dhtA, dhtB)
|
||||
|
||||
pubk, err := dhtA.GetPublicKey(context.Background(), dhtB.self)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id, err := peer.IDFromPublicKey(pubk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if id != dhtB.self {
|
||||
t.Fatal("got incorrect public key")
|
||||
}
|
||||
}
|
||||
|
||||
// Check that GetPublicKey() correctly retrieves a public key
|
||||
// from the DHT
|
||||
func TestPubkeyFromDHT(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
dhtA := setupDHT(ctx, t, false)
|
||||
dhtB := setupDHT(ctx, t, false)
|
||||
|
||||
defer dhtA.Close()
|
||||
defer dhtB.Close()
|
||||
defer dhtA.host.Close()
|
||||
defer dhtB.host.Close()
|
||||
|
||||
connect(t, ctx, dhtA, dhtB)
|
||||
|
||||
identity := tnet.RandIdentityOrFatal(t)
|
||||
pubk := identity.PublicKey()
|
||||
id := identity.ID()
|
||||
pkkey := routing.KeyForPublicKey(id)
|
||||
pkbytes, err := ci.MarshalPublicKey(pubk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Store public key on node B
|
||||
err = dhtB.PutValue(ctx, pkkey, pkbytes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Retrieve public key on node A
|
||||
rpubk, err := dhtA.GetPublicKey(ctx, id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !pubk.Equals(rpubk) {
|
||||
t.Fatal("got incorrect public key")
|
||||
}
|
||||
}
|
||||
|
||||
// Check that GetPublicKey() correctly returns an error when the
|
||||
// public key is not available directly from the node or on the DHT
|
||||
func TestPubkeyNotFound(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
dhtA := setupDHT(ctx, t, false)
|
||||
dhtB := setupDHT(ctx, t, false)
|
||||
|
||||
defer dhtA.Close()
|
||||
defer dhtB.Close()
|
||||
defer dhtA.host.Close()
|
||||
defer dhtB.host.Close()
|
||||
|
||||
connect(t, ctx, dhtA, dhtB)
|
||||
|
||||
r := u.NewSeededRand(15) // generate deterministic keypair
|
||||
_, pubk, err := ci.GenerateKeyPairWithReader(ci.RSA, 2048, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id, err := peer.IDFromPublicKey(pubk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Attempt to retrieve public key on node A (should be not found)
|
||||
_, err = dhtA.GetPublicKey(ctx, id)
|
||||
if err == nil {
|
||||
t.Fatal("Expected not found error")
|
||||
}
|
||||
}
|
||||
|
||||
// Check that GetPublicKey() returns an error when
|
||||
// the DHT returns the wrong key
|
||||
func TestPubkeyBadKeyFromDHT(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
dhtA := setupDHT(ctx, t, false)
|
||||
dhtB := setupDHT(ctx, t, false)
|
||||
|
||||
defer dhtA.Close()
|
||||
defer dhtB.Close()
|
||||
defer dhtA.host.Close()
|
||||
defer dhtB.host.Close()
|
||||
|
||||
connect(t, ctx, dhtA, dhtB)
|
||||
|
||||
_, pk, err := test.RandTestKeyPair(ci.RSA, 2048)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id, err := peer.IDFromPublicKey(pk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pkkey := routing.KeyForPublicKey(id)
|
||||
|
||||
peer2 := tnet.RandIdentityOrFatal(t)
|
||||
if pk == peer2.PublicKey() {
|
||||
t.Fatal("Public keys shouldn't match here")
|
||||
}
|
||||
wrongbytes, err := ci.MarshalPublicKey(peer2.PublicKey())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Store incorrect public key on node B
|
||||
rec := record.MakePutRecord(pkkey, wrongbytes)
|
||||
rec.TimeReceived = u.FormatRFC3339(time.Now())
|
||||
err = dhtB.putLocal(ctx, pkkey, rec)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Retrieve public key from node A
|
||||
_, err = dhtA.GetPublicKey(ctx, id)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error because public key is incorrect")
|
||||
}
|
||||
}
|
||||
|
||||
// Check that GetPublicKey() returns the correct value
|
||||
// when the DHT returns the wrong key but the direct
|
||||
// connection returns the correct key
|
||||
func TestPubkeyBadKeyFromDHTGoodKeyDirect(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
dhtA := setupDHT(ctx, t, false)
|
||||
dhtB := setupDHT(ctx, t, false)
|
||||
|
||||
defer dhtA.Close()
|
||||
defer dhtB.Close()
|
||||
defer dhtA.host.Close()
|
||||
defer dhtB.host.Close()
|
||||
|
||||
connect(t, ctx, dhtA, dhtB)
|
||||
|
||||
wrong := tnet.RandIdentityOrFatal(t)
|
||||
pkkey := routing.KeyForPublicKey(dhtB.self)
|
||||
|
||||
wrongbytes, err := ci.MarshalPublicKey(wrong.PublicKey())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Store incorrect public key on node B
|
||||
rec := record.MakePutRecord(pkkey, wrongbytes)
|
||||
rec.TimeReceived = u.FormatRFC3339(time.Now())
|
||||
err = dhtB.putLocal(ctx, pkkey, rec)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Retrieve public key from node A
|
||||
pubk, err := dhtA.GetPublicKey(ctx, dhtB.self)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id, err := peer.IDFromPublicKey(pubk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// The incorrect public key retrieved from the DHT
|
||||
// should be ignored in favour of the correct public
|
||||
// key retrieved from the node directly
|
||||
if id != dhtB.self {
|
||||
t.Fatal("got incorrect public key")
|
||||
}
|
||||
}
|
||||
|
||||
// Check that GetPublicKey() returns the correct value
|
||||
// when both the DHT returns the correct key and the direct
|
||||
// connection returns the correct key
|
||||
func TestPubkeyGoodKeyFromDHTGoodKeyDirect(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
dhtA := setupDHT(ctx, t, false)
|
||||
dhtB := setupDHT(ctx, t, false)
|
||||
|
||||
defer dhtA.Close()
|
||||
defer dhtB.Close()
|
||||
defer dhtA.host.Close()
|
||||
defer dhtB.host.Close()
|
||||
|
||||
connect(t, ctx, dhtA, dhtB)
|
||||
|
||||
pubk := dhtB.peerstore.PubKey(dhtB.self)
|
||||
pkbytes, err := ci.MarshalPublicKey(pubk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Store public key on node B
|
||||
pkkey := routing.KeyForPublicKey(dhtB.self)
|
||||
err = dhtB.PutValue(ctx, pkkey, pkbytes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Retrieve public key on node A
|
||||
rpubk, err := dhtA.GetPublicKey(ctx, dhtB.self)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !pubk.Equals(rpubk) {
|
||||
t.Fatal("got incorrect public key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValuesDisabled(t *testing.T) {
|
||||
for i := 0; i < 3; i++ {
|
||||
enabledA := (i & 0x1) > 0
|
||||
enabledB := (i & 0x2) > 0
|
||||
t.Run(fmt.Sprintf("a=%v/b=%v", enabledA, enabledB), func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var (
|
||||
optsA, optsB []Option
|
||||
)
|
||||
optsA = append(optsA, ProtocolPrefix("/valuesMaybeDisabled"))
|
||||
optsB = append(optsB, ProtocolPrefix("/valuesMaybeDisabled"))
|
||||
|
||||
if !enabledA {
|
||||
optsA = append(optsA, DisableValues())
|
||||
}
|
||||
if !enabledB {
|
||||
optsB = append(optsB, DisableValues())
|
||||
}
|
||||
|
||||
dhtA := setupDHT(ctx, t, false, optsA...)
|
||||
dhtB := setupDHT(ctx, t, false, optsB...)
|
||||
|
||||
defer dhtA.Close()
|
||||
defer dhtB.Close()
|
||||
defer dhtA.host.Close()
|
||||
defer dhtB.host.Close()
|
||||
|
||||
connect(t, ctx, dhtA, dhtB)
|
||||
|
||||
pubk := dhtB.peerstore.PubKey(dhtB.self)
|
||||
pkbytes, err := ci.MarshalPublicKey(pubk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pkkey := routing.KeyForPublicKey(dhtB.self)
|
||||
err = dhtB.PutValue(ctx, pkkey, pkbytes)
|
||||
if enabledB {
|
||||
if err != nil {
|
||||
t.Fatal("put should have succeeded on node B", err)
|
||||
}
|
||||
} else {
|
||||
if err != routing.ErrNotSupported {
|
||||
t.Fatal("should not have put the value to node B", err)
|
||||
}
|
||||
_, err = dhtB.GetValue(ctx, pkkey)
|
||||
if err != routing.ErrNotSupported {
|
||||
t.Fatal("get should have failed on node B")
|
||||
}
|
||||
rec, _ := dhtB.getLocal(ctx, pkkey)
|
||||
if rec != nil {
|
||||
t.Fatal("node B should not have found the value locally")
|
||||
}
|
||||
}
|
||||
|
||||
_, err = dhtA.GetValue(ctx, pkkey)
|
||||
if enabledA {
|
||||
if err != routing.ErrNotFound {
|
||||
t.Fatal("node A should not have found the value")
|
||||
}
|
||||
} else {
|
||||
if err != routing.ErrNotSupported {
|
||||
t.Fatal("node A should not have found the value")
|
||||
}
|
||||
}
|
||||
rec, _ := dhtA.getLocal(ctx, pkkey)
|
||||
if rec != nil {
|
||||
t.Fatal("node A should not have found the value locally")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
694
go-libp2p-kad-dht/routing.go
Normal file
694
go-libp2p-kad-dht/routing.go
Normal file
@ -0,0 +1,694 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
u "github.com/ipfs/boxo/util"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
internalConfig "github.com/libp2p/go-libp2p-kad-dht/internal/config"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/netsize"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/qpeerset"
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
"github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
// This file implements the Routing interface for the IpfsDHT struct.
|
||||
|
||||
// Basic Put/Get
|
||||
|
||||
// PutValue adds value corresponding to given Key.
|
||||
// This is the top level "Store" operation of the DHT
|
||||
func (dht *IpfsDHT) PutValue(ctx context.Context, key string, value []byte, opts ...routing.Option) (err error) {
|
||||
ctx, end := tracer.PutValue(dhtName, ctx, key, value, opts...)
|
||||
defer func() { end(err) }()
|
||||
|
||||
if !dht.enableValues {
|
||||
return routing.ErrNotSupported
|
||||
}
|
||||
|
||||
logger.Debugw("putting value", "key", internal.LoggableRecordKeyString(key))
|
||||
|
||||
// don't even allow local users to put bad values.
|
||||
if err := dht.Validator.Validate(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
old, err := dht.getLocal(ctx, key)
|
||||
if err != nil {
|
||||
// Means something is wrong with the datastore.
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if we have an old value that's not the same as the new one.
|
||||
if old != nil && !bytes.Equal(old.GetValue(), value) {
|
||||
// Check to see if the new one is better.
|
||||
i, err := dht.Validator.Select(key, [][]byte{value, old.GetValue()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if i != 0 {
|
||||
return fmt.Errorf("can't replace a newer value with an older value")
|
||||
}
|
||||
}
|
||||
|
||||
rec := record.MakePutRecord(key, value)
|
||||
rec.TimeReceived = u.FormatRFC3339(time.Now())
|
||||
err = dht.putLocal(ctx, key, rec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peers, err := dht.GetClosestPeers(ctx, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for _, p := range peers {
|
||||
wg.Add(1)
|
||||
go func(p peer.ID) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
defer wg.Done()
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.Value,
|
||||
ID: p,
|
||||
})
|
||||
|
||||
err := dht.protoMessenger.PutValue(ctx, p, rec)
|
||||
if err != nil {
|
||||
logger.Debugf("failed putting value to peer: %s", err)
|
||||
}
|
||||
}(p)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// recvdVal stores a value and the peer from which we got the value.
|
||||
type recvdVal struct {
|
||||
Val []byte
|
||||
From peer.ID
|
||||
}
|
||||
|
||||
// GetValue searches for the value corresponding to given Key.
|
||||
func (dht *IpfsDHT) GetValue(ctx context.Context, key string, opts ...routing.Option) (result []byte, err error) {
|
||||
ctx, end := tracer.GetValue(dhtName, ctx, key, opts...)
|
||||
defer func() { end(result, err) }()
|
||||
|
||||
if !dht.enableValues {
|
||||
return nil, routing.ErrNotSupported
|
||||
}
|
||||
|
||||
// apply defaultQuorum if relevant
|
||||
var cfg routing.Options
|
||||
if err := cfg.Apply(opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts, Quorum(internalConfig.GetQuorum(&cfg)))
|
||||
|
||||
responses, err := dht.SearchValue(ctx, key, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var best []byte
|
||||
|
||||
for r := range responses {
|
||||
best = r
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return best, ctx.Err()
|
||||
}
|
||||
|
||||
if best == nil {
|
||||
return nil, routing.ErrNotFound
|
||||
}
|
||||
logger.Debugf("GetValue %v %x", internal.LoggableRecordKeyString(key), best)
|
||||
return best, nil
|
||||
}
|
||||
|
||||
// SearchValue searches for the value corresponding to given Key and streams the results.
|
||||
func (dht *IpfsDHT) SearchValue(ctx context.Context, key string, opts ...routing.Option) (ch <-chan []byte, err error) {
|
||||
ctx, end := tracer.SearchValue(dhtName, ctx, key, opts...)
|
||||
defer func() { ch, err = end(ch, err) }()
|
||||
|
||||
if !dht.enableValues {
|
||||
return nil, routing.ErrNotSupported
|
||||
}
|
||||
|
||||
var cfg routing.Options
|
||||
if err := cfg.Apply(opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
responsesNeeded := 0
|
||||
if !cfg.Offline {
|
||||
responsesNeeded = internalConfig.GetQuorum(&cfg)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
valCh, lookupRes := dht.getValues(ctx, key, stopCh)
|
||||
|
||||
out := make(chan []byte)
|
||||
go func() {
|
||||
defer close(out)
|
||||
best, peersWithBest, aborted := dht.searchValueQuorum(ctx, key, valCh, stopCh, out, responsesNeeded)
|
||||
if best == nil || aborted {
|
||||
return
|
||||
}
|
||||
|
||||
updatePeers := make([]peer.ID, 0, dht.bucketSize)
|
||||
select {
|
||||
case l := <-lookupRes:
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, p := range l.peers {
|
||||
if _, ok := peersWithBest[p]; !ok {
|
||||
updatePeers = append(updatePeers, p)
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
dht.updatePeerValues(dht.Context(), key, best, updatePeers)
|
||||
}()
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) searchValueQuorum(ctx context.Context, key string, valCh <-chan recvdVal, stopCh chan struct{},
|
||||
out chan<- []byte, nvals int) ([]byte, map[peer.ID]struct{}, bool) {
|
||||
numResponses := 0
|
||||
return dht.processValues(ctx, key, valCh,
|
||||
func(ctx context.Context, v recvdVal, better bool) bool {
|
||||
numResponses++
|
||||
if better {
|
||||
select {
|
||||
case out <- v.Val:
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if nvals > 0 && numResponses > nvals {
|
||||
close(stopCh)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) processValues(ctx context.Context, key string, vals <-chan recvdVal,
|
||||
newVal func(ctx context.Context, v recvdVal, better bool) bool) (best []byte, peersWithBest map[peer.ID]struct{}, aborted bool) {
|
||||
loop:
|
||||
for {
|
||||
if aborted {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case v, ok := <-vals:
|
||||
if !ok {
|
||||
break loop
|
||||
}
|
||||
|
||||
// Select best value
|
||||
if best != nil {
|
||||
if bytes.Equal(best, v.Val) {
|
||||
peersWithBest[v.From] = struct{}{}
|
||||
aborted = newVal(ctx, v, false)
|
||||
continue
|
||||
}
|
||||
sel, err := dht.Validator.Select(key, [][]byte{best, v.Val})
|
||||
if err != nil {
|
||||
logger.Warnw("failed to select best value", "key", internal.LoggableRecordKeyString(key), "error", err)
|
||||
continue
|
||||
}
|
||||
if sel != 1 {
|
||||
aborted = newVal(ctx, v, false)
|
||||
continue
|
||||
}
|
||||
}
|
||||
peersWithBest = make(map[peer.ID]struct{})
|
||||
peersWithBest[v.From] = struct{}{}
|
||||
best = v.Val
|
||||
aborted = newVal(ctx, v, true)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) updatePeerValues(ctx context.Context, key string, val []byte, peers []peer.ID) {
|
||||
fixupRec := record.MakePutRecord(key, val)
|
||||
for _, p := range peers {
|
||||
go func(p peer.ID) {
|
||||
// TODO: Is this possible?
|
||||
if p == dht.self {
|
||||
err := dht.putLocal(ctx, key, fixupRec)
|
||||
if err != nil {
|
||||
logger.Error("Error correcting local dht entry:", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
|
||||
defer cancel()
|
||||
err := dht.protoMessenger.PutValue(ctx, p, fixupRec)
|
||||
if err != nil {
|
||||
logger.Debug("Error correcting DHT entry: ", err)
|
||||
}
|
||||
}(p)
|
||||
}
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) getValues(ctx context.Context, key string, stopQuery chan struct{}) (<-chan recvdVal, <-chan *lookupWithFollowupResult) {
|
||||
valCh := make(chan recvdVal, 1)
|
||||
lookupResCh := make(chan *lookupWithFollowupResult, 1)
|
||||
|
||||
logger.Debugw("finding value", "key", internal.LoggableRecordKeyString(key))
|
||||
|
||||
if rec, err := dht.getLocal(ctx, key); rec != nil && err == nil {
|
||||
select {
|
||||
case valCh <- recvdVal{
|
||||
Val: rec.GetValue(),
|
||||
From: dht.self,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(valCh)
|
||||
defer close(lookupResCh)
|
||||
lookupRes, err := dht.runLookupWithFollowup(ctx, key,
|
||||
func(ctx context.Context, p peer.ID) ([]*peer.AddrInfo, error) {
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.SendingQuery,
|
||||
ID: p,
|
||||
})
|
||||
|
||||
rec, peers, err := dht.protoMessenger.GetValue(ctx, p, key)
|
||||
if err != nil {
|
||||
logger.Debugf("error getting closer peers: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.PeerResponse,
|
||||
ID: p,
|
||||
Responses: peers,
|
||||
})
|
||||
|
||||
if rec == nil {
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
val := rec.GetValue()
|
||||
if val == nil {
|
||||
logger.Debug("received a nil record value")
|
||||
return peers, nil
|
||||
}
|
||||
if err := dht.Validator.Validate(key, val); err != nil {
|
||||
// make sure record is valid
|
||||
logger.Debugw("received invalid record (discarded)", "error", err)
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
// the record is present and valid, send it out for processing
|
||||
select {
|
||||
case valCh <- recvdVal{
|
||||
Val: val,
|
||||
From: p,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
return peers, nil
|
||||
},
|
||||
func(*qpeerset.QueryPeerset) bool {
|
||||
select {
|
||||
case <-stopQuery:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
lookupResCh <- lookupRes
|
||||
|
||||
if ctx.Err() == nil {
|
||||
dht.refreshRTIfNoShortcut(kb.ConvertKey(key), lookupRes)
|
||||
}
|
||||
}()
|
||||
|
||||
return valCh, lookupResCh
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) refreshRTIfNoShortcut(key kb.ID, lookupRes *lookupWithFollowupResult) {
|
||||
if lookupRes.completed {
|
||||
// refresh the cpl for this key as the query was successful
|
||||
dht.routingTable.ResetCplRefreshedAtForID(key, time.Now())
|
||||
}
|
||||
}
|
||||
|
||||
// Provider abstraction for indirect stores.
|
||||
// Some DHTs store values directly, while an indirect store stores pointers to
|
||||
// locations of the value, similarly to Coral and Mainline DHT.
|
||||
|
||||
// Provide makes this node announce that it can provide a value for the given key
|
||||
func (dht *IpfsDHT) Provide(ctx context.Context, key cid.Cid, brdcst bool) (err error) {
|
||||
ctx, end := tracer.Provide(dhtName, ctx, key, brdcst)
|
||||
defer func() { end(err) }()
|
||||
|
||||
if !dht.enableProviders {
|
||||
return routing.ErrNotSupported
|
||||
} else if !key.Defined() {
|
||||
return fmt.Errorf("invalid cid: undefined")
|
||||
}
|
||||
keyMH := key.Hash()
|
||||
logger.Debugw("providing", "cid", key, "mh", internal.LoggableProviderRecordBytes(keyMH))
|
||||
|
||||
// add self locally
|
||||
dht.providerStore.AddProvider(ctx, keyMH, peer.AddrInfo{ID: dht.self})
|
||||
if !brdcst {
|
||||
return nil
|
||||
}
|
||||
|
||||
if dht.enableOptProv {
|
||||
err := dht.optimisticProvide(ctx, keyMH)
|
||||
if errors.Is(err, netsize.ErrNotEnoughData) {
|
||||
logger.Debugln("not enough data for optimistic provide taking classic approach")
|
||||
return dht.classicProvide(ctx, keyMH)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return dht.classicProvide(ctx, keyMH)
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) classicProvide(ctx context.Context, keyMH multihash.Multihash) error {
|
||||
closerCtx := ctx
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
now := time.Now()
|
||||
timeout := deadline.Sub(now)
|
||||
|
||||
if timeout < 0 {
|
||||
// timed out
|
||||
return context.DeadlineExceeded
|
||||
} else if timeout < 10*time.Second {
|
||||
// Reserve 10% for the final put.
|
||||
deadline = deadline.Add(-timeout / 10)
|
||||
} else {
|
||||
// Otherwise, reserve a second (we'll already be
|
||||
// connected so this should be fast).
|
||||
deadline = deadline.Add(-time.Second)
|
||||
}
|
||||
var cancel context.CancelFunc
|
||||
closerCtx, cancel = context.WithDeadline(ctx, deadline)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
var exceededDeadline bool
|
||||
peers, err := dht.GetClosestPeers(closerCtx, string(keyMH))
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
// If the _inner_ deadline has been exceeded but the _outer_
|
||||
// context is still fine, provide the value to the closest peers
|
||||
// we managed to find, even if they're not the _actual_ closest peers.
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
exceededDeadline = true
|
||||
case nil:
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for _, p := range peers {
|
||||
wg.Add(1)
|
||||
go func(p peer.ID) {
|
||||
defer wg.Done()
|
||||
logger.Debugf("putProvider(%s, %s)", internal.LoggableProviderRecordBytes(keyMH), p)
|
||||
err := dht.protoMessenger.PutProviderAddrs(ctx, p, keyMH, peer.AddrInfo{
|
||||
ID: dht.self,
|
||||
Addrs: dht.filterAddrs(dht.host.Addrs()),
|
||||
})
|
||||
if err != nil {
|
||||
logger.Debug(err)
|
||||
}
|
||||
}(p)
|
||||
}
|
||||
wg.Wait()
|
||||
if exceededDeadline {
|
||||
return context.DeadlineExceeded
|
||||
}
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
// FindProviders searches until the context expires.
|
||||
func (dht *IpfsDHT) FindProviders(ctx context.Context, c cid.Cid) ([]peer.AddrInfo, error) {
|
||||
if !dht.enableProviders {
|
||||
return nil, routing.ErrNotSupported
|
||||
} else if !c.Defined() {
|
||||
return nil, fmt.Errorf("invalid cid: undefined")
|
||||
}
|
||||
|
||||
var providers []peer.AddrInfo
|
||||
for p := range dht.FindProvidersAsync(ctx, c, dht.bucketSize) {
|
||||
providers = append(providers, p)
|
||||
}
|
||||
return providers, nil
|
||||
}
|
||||
|
||||
// FindProvidersAsync is the same thing as FindProviders, but returns a channel.
|
||||
// Peers will be returned on the channel as soon as they are found, even before
|
||||
// the search query completes. If count is zero then the query will run until it
|
||||
// completes. Note: not reading from the returned channel may block the query
|
||||
// from progressing.
|
||||
func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) (ch <-chan peer.AddrInfo) {
|
||||
ctx, end := tracer.FindProvidersAsync(dhtName, ctx, key, count)
|
||||
defer func() { ch = end(ch, nil) }()
|
||||
|
||||
if !dht.enableProviders || !key.Defined() {
|
||||
peerOut := make(chan peer.AddrInfo)
|
||||
close(peerOut)
|
||||
return peerOut
|
||||
}
|
||||
|
||||
peerOut := make(chan peer.AddrInfo)
|
||||
|
||||
keyMH := key.Hash()
|
||||
|
||||
logger.Debugw("finding providers", "cid", key, "mh", internal.LoggableProviderRecordBytes(keyMH))
|
||||
go dht.findProvidersAsyncRoutine(ctx, keyMH, count, peerOut)
|
||||
return peerOut
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key multihash.Multihash, count int, peerOut chan peer.AddrInfo) {
|
||||
// use a span here because unlike tracer.FindProvidersAsync we know who told us about it and that intresting to log.
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.FindProvidersAsyncRoutine")
|
||||
defer span.End()
|
||||
|
||||
defer close(peerOut)
|
||||
|
||||
findAll := count == 0
|
||||
|
||||
ps := make(map[peer.ID]peer.AddrInfo)
|
||||
psLock := &sync.Mutex{}
|
||||
psTryAdd := func(p peer.AddrInfo) bool {
|
||||
psLock.Lock()
|
||||
defer psLock.Unlock()
|
||||
pi, ok := ps[p.ID]
|
||||
if (!ok || ((len(pi.Addrs) == 0) && len(p.Addrs) > 0)) && (len(ps) < count || findAll) {
|
||||
ps[p.ID] = p
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
psSize := func() int {
|
||||
psLock.Lock()
|
||||
defer psLock.Unlock()
|
||||
return len(ps)
|
||||
}
|
||||
|
||||
provs, err := dht.providerStore.GetProviders(ctx, key)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, p := range provs {
|
||||
// NOTE: Assuming that this list of peers is unique
|
||||
if psTryAdd(p) {
|
||||
select {
|
||||
case peerOut <- p:
|
||||
span.AddEvent("found provider", trace.WithAttributes(
|
||||
attribute.Stringer("peer", p.ID),
|
||||
attribute.Stringer("from", dht.self),
|
||||
))
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If we have enough peers locally, don't bother with remote RPC
|
||||
// TODO: is this a DOS vector?
|
||||
if !findAll && len(ps) >= count {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
lookupRes, err := dht.runLookupWithFollowup(ctx, string(key),
|
||||
func(ctx context.Context, p peer.ID) ([]*peer.AddrInfo, error) {
|
||||
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.SendingQuery,
|
||||
ID: p,
|
||||
})
|
||||
|
||||
provs, closest, err := dht.protoMessenger.GetProviders(ctx, p, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Debugf("%d provider entries", len(provs))
|
||||
|
||||
// Add unique providers from request, up to 'count'
|
||||
for _, prov := range provs {
|
||||
dht.maybeAddAddrs(prov.ID, prov.Addrs, peerstore.TempAddrTTL)
|
||||
logger.Debugf("got provider: %s", prov)
|
||||
if psTryAdd(*prov) {
|
||||
logger.Debugf("using provider: %s", prov)
|
||||
select {
|
||||
case peerOut <- *prov:
|
||||
span.AddEvent("found provider", trace.WithAttributes(
|
||||
attribute.Stringer("peer", prov.ID),
|
||||
attribute.Stringer("from", p),
|
||||
))
|
||||
case <-ctx.Done():
|
||||
logger.Debug("context timed out sending more providers")
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
if !findAll && psSize() >= count {
|
||||
logger.Debugf("got enough providers (%d/%d)", psSize(), count)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Give closer peers back to the query to be queried
|
||||
logger.Debugf("got closer peers: %d %s", len(closest), closest)
|
||||
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.PeerResponse,
|
||||
ID: p,
|
||||
Responses: closest,
|
||||
})
|
||||
|
||||
return closest, nil
|
||||
},
|
||||
func(*qpeerset.QueryPeerset) bool {
|
||||
return !findAll && psSize() >= count
|
||||
},
|
||||
)
|
||||
|
||||
if err == nil && ctx.Err() == nil {
|
||||
dht.refreshRTIfNoShortcut(kb.ConvertKey(string(key)), lookupRes)
|
||||
}
|
||||
}
|
||||
|
||||
// FindPeer searches for a peer with given ID.
|
||||
func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (pi peer.AddrInfo, err error) {
|
||||
ctx, end := tracer.FindPeer(dhtName, ctx, id)
|
||||
defer func() { end(pi, err) }()
|
||||
|
||||
if err := id.Validate(); err != nil {
|
||||
return peer.AddrInfo{}, err
|
||||
}
|
||||
|
||||
logger.Debugw("finding peer", "peer", id)
|
||||
|
||||
// Check if were already connected to them
|
||||
if pi := dht.FindLocal(ctx, id); pi.ID != "" {
|
||||
return pi, nil
|
||||
}
|
||||
|
||||
lookupRes, err := dht.runLookupWithFollowup(ctx, string(id),
|
||||
func(ctx context.Context, p peer.ID) ([]*peer.AddrInfo, error) {
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.SendingQuery,
|
||||
ID: p,
|
||||
})
|
||||
|
||||
peers, err := dht.protoMessenger.GetClosestPeers(ctx, p, id)
|
||||
if err != nil {
|
||||
logger.Debugf("error getting closer peers: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.PeerResponse,
|
||||
ID: p,
|
||||
Responses: peers,
|
||||
})
|
||||
|
||||
return peers, err
|
||||
},
|
||||
func(*qpeerset.QueryPeerset) bool {
|
||||
return dht.host.Network().Connectedness(id) == network.Connected
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return peer.AddrInfo{}, err
|
||||
}
|
||||
|
||||
dialedPeerDuringQuery := false
|
||||
for i, p := range lookupRes.peers {
|
||||
if p == id {
|
||||
// Note: we consider PeerUnreachable to be a valid state because the peer may not support the DHT protocol
|
||||
// and therefore the peer would fail the query. The fact that a peer that is returned can be a non-DHT
|
||||
// server peer and is not identified as such is a bug.
|
||||
dialedPeerDuringQuery = (lookupRes.state[i] == qpeerset.PeerQueried || lookupRes.state[i] == qpeerset.PeerUnreachable || lookupRes.state[i] == qpeerset.PeerWaiting)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Return peer information if we tried to dial the peer during the query or we are (or recently were) connected
|
||||
// to the peer.
|
||||
connectedness := dht.host.Network().Connectedness(id)
|
||||
if dialedPeerDuringQuery || connectedness == network.Connected || connectedness == network.CanConnect {
|
||||
return dht.peerstore.PeerInfo(id), nil
|
||||
}
|
||||
|
||||
return peer.AddrInfo{}, routing.ErrNotFound
|
||||
}
|
21
go-libp2p-kad-dht/routing_options.go
Normal file
21
go-libp2p-kad-dht/routing_options.go
Normal file
@ -0,0 +1,21 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
internalConfig "github.com/libp2p/go-libp2p-kad-dht/internal/config"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
)
|
||||
|
||||
// Quorum is a DHT option that tells the DHT how many peers it needs to get
|
||||
// values from before returning the best one. Zero means the DHT query
|
||||
// should complete instead of returning early.
|
||||
//
|
||||
// Default: 0
|
||||
func Quorum(n int) routing.Option {
|
||||
return func(opts *routing.Options) error {
|
||||
if opts.Other == nil {
|
||||
opts.Other = make(map[interface{}]interface{}, 1)
|
||||
}
|
||||
opts.Other[internalConfig.QuorumOptionKey{}] = n
|
||||
return nil
|
||||
}
|
||||
}
|
103
go-libp2p-kad-dht/rt_diversity_filter.go
Normal file
103
go-libp2p-kad-dht/rt_diversity_filter.go
Normal file
@ -0,0 +1,103 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kbucket/peerdiversity"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var _ peerdiversity.PeerIPGroupFilter = (*rtPeerIPGroupFilter)(nil)
|
||||
|
||||
type rtPeerIPGroupFilter struct {
|
||||
mu sync.RWMutex
|
||||
h host.Host
|
||||
|
||||
maxPerCpl int
|
||||
maxForTable int
|
||||
|
||||
cplIpGroupCount map[int]map[peerdiversity.PeerIPGroupKey]int
|
||||
tableIpGroupCount map[peerdiversity.PeerIPGroupKey]int
|
||||
}
|
||||
|
||||
// NewRTPeerDiversityFilter constructs the `PeerIPGroupFilter` that will be used to configure
|
||||
// the diversity filter for the Routing Table.
|
||||
// Please see the docs for `peerdiversity.PeerIPGroupFilter` AND `peerdiversity.Filter` for more details.
|
||||
func NewRTPeerDiversityFilter(h host.Host, maxPerCpl, maxForTable int) *rtPeerIPGroupFilter {
|
||||
return &rtPeerIPGroupFilter{
|
||||
h: h,
|
||||
|
||||
maxPerCpl: maxPerCpl,
|
||||
maxForTable: maxForTable,
|
||||
|
||||
cplIpGroupCount: make(map[int]map[peerdiversity.PeerIPGroupKey]int),
|
||||
tableIpGroupCount: make(map[peerdiversity.PeerIPGroupKey]int),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (r *rtPeerIPGroupFilter) Allow(g peerdiversity.PeerGroupInfo) bool {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
key := g.IPGroupKey
|
||||
cpl := g.Cpl
|
||||
|
||||
if r.tableIpGroupCount[key] >= r.maxForTable {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
c, ok := r.cplIpGroupCount[cpl]
|
||||
allow := !ok || c[key] < r.maxPerCpl
|
||||
return allow
|
||||
}
|
||||
|
||||
func (r *rtPeerIPGroupFilter) Increment(g peerdiversity.PeerGroupInfo) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
key := g.IPGroupKey
|
||||
cpl := g.Cpl
|
||||
|
||||
r.tableIpGroupCount[key] = r.tableIpGroupCount[key] + 1
|
||||
if _, ok := r.cplIpGroupCount[cpl]; !ok {
|
||||
r.cplIpGroupCount[cpl] = make(map[peerdiversity.PeerIPGroupKey]int)
|
||||
}
|
||||
|
||||
r.cplIpGroupCount[cpl][key] = r.cplIpGroupCount[cpl][key] + 1
|
||||
}
|
||||
|
||||
func (r *rtPeerIPGroupFilter) Decrement(g peerdiversity.PeerGroupInfo) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
key := g.IPGroupKey
|
||||
cpl := g.Cpl
|
||||
|
||||
r.tableIpGroupCount[key] = r.tableIpGroupCount[key] - 1
|
||||
if r.tableIpGroupCount[key] == 0 {
|
||||
delete(r.tableIpGroupCount, key)
|
||||
}
|
||||
|
||||
r.cplIpGroupCount[cpl][key] = r.cplIpGroupCount[cpl][key] - 1
|
||||
if r.cplIpGroupCount[cpl][key] == 0 {
|
||||
delete(r.cplIpGroupCount[cpl], key)
|
||||
}
|
||||
if len(r.cplIpGroupCount[cpl]) == 0 {
|
||||
delete(r.cplIpGroupCount, cpl)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rtPeerIPGroupFilter) PeerAddresses(p peer.ID) []ma.Multiaddr {
|
||||
cs := r.h.Network().ConnsToPeer(p)
|
||||
addr := make([]ma.Multiaddr, 0, len(cs))
|
||||
for _, c := range cs {
|
||||
addr = append(addr, c.RemoteMultiaddr())
|
||||
}
|
||||
return addr
|
||||
}
|
155
go-libp2p-kad-dht/rt_diversity_filter_test.go
Normal file
155
go-libp2p-kad-dht/rt_diversity_filter_test.go
Normal file
@ -0,0 +1,155 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p-kbucket/peerdiversity"
|
||||
bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
|
||||
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRTPeerDiversityFilter(t *testing.T) {
|
||||
h, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts))
|
||||
require.NoError(t, err)
|
||||
h.Start()
|
||||
defer h.Close()
|
||||
r := NewRTPeerDiversityFilter(h, 2, 3)
|
||||
|
||||
// table should only have 2 for each prefix per cpl
|
||||
key := "key"
|
||||
g := peerdiversity.PeerGroupInfo{Cpl: 1, IPGroupKey: peerdiversity.PeerIPGroupKey(key)}
|
||||
require.True(t, r.Allow(g))
|
||||
r.Increment(g)
|
||||
require.True(t, r.Allow(g))
|
||||
r.Increment(g)
|
||||
require.False(t, r.Allow(g))
|
||||
|
||||
// table should ONLY have 3 for a Prefix
|
||||
key = "random"
|
||||
g2 := peerdiversity.PeerGroupInfo{Cpl: 2, IPGroupKey: peerdiversity.PeerIPGroupKey(key)}
|
||||
require.True(t, r.Allow(g2))
|
||||
r.Increment(g2)
|
||||
|
||||
g2.Cpl = 3
|
||||
require.True(t, r.Allow(g2))
|
||||
r.Increment(g2)
|
||||
|
||||
g2.Cpl = 4
|
||||
require.True(t, r.Allow(g2))
|
||||
r.Increment(g2)
|
||||
|
||||
require.False(t, r.Allow(g2))
|
||||
|
||||
// remove a peer with a prefix and it works
|
||||
r.Decrement(g2)
|
||||
require.True(t, r.Allow(g2))
|
||||
r.Increment(g2)
|
||||
|
||||
// and then it dosen't work again
|
||||
require.False(t, r.Allow(g2))
|
||||
}
|
||||
|
||||
func TestRoutingTableEndToEndMaxPerCpl(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
h, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts))
|
||||
require.NoError(t, err)
|
||||
h.Start()
|
||||
defer h.Close()
|
||||
r := NewRTPeerDiversityFilter(h, 1, 2)
|
||||
|
||||
d, err := New(
|
||||
ctx,
|
||||
h,
|
||||
testPrefix,
|
||||
NamespacedValidator("v", blankValidator{}),
|
||||
Mode(ModeServer),
|
||||
DisableAutoRefresh(),
|
||||
RoutingTablePeerDiversityFilter(r),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer d.Close()
|
||||
|
||||
var d2 *IpfsDHT
|
||||
var d3 *IpfsDHT
|
||||
|
||||
for {
|
||||
d2 = setupDHT(ctx, t, false)
|
||||
if kb.CommonPrefixLen(d.selfKey, kb.ConvertPeerID(d2.self)) == 1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
d3 = setupDHT(ctx, t, false)
|
||||
if kb.CommonPrefixLen(d.selfKey, kb.ConvertPeerID(d3.self)) == 1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// d2 will be allowed in the Routing table but
|
||||
// d3 will not be allowed.
|
||||
connectNoSync(t, ctx, d, d2)
|
||||
require.Eventually(t, func() bool {
|
||||
return d.routingTable.Find(d2.self) != ""
|
||||
}, 1*time.Second, 100*time.Millisecond)
|
||||
|
||||
connectNoSync(t, ctx, d, d3)
|
||||
time.Sleep(1 * time.Second)
|
||||
require.Len(t, d.routingTable.ListPeers(), 1)
|
||||
require.True(t, d.routingTable.Find(d3.self) == "")
|
||||
|
||||
// it works after removing d2
|
||||
d.routingTable.RemovePeer(d2.self)
|
||||
b, err := d.routingTable.TryAddPeer(d3.self, true, false)
|
||||
require.NoError(t, err)
|
||||
require.True(t, b)
|
||||
require.Len(t, d.routingTable.ListPeers(), 1)
|
||||
require.True(t, d.routingTable.Find(d3.self) != "")
|
||||
}
|
||||
|
||||
func TestRoutingTableEndToEndMaxPerTable(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
h, err := bhost.NewHost(swarmt.GenSwarm(t, swarmt.OptDisableReuseport), new(bhost.HostOpts))
|
||||
require.NoError(t, err)
|
||||
h.Start()
|
||||
defer h.Close()
|
||||
r := NewRTPeerDiversityFilter(h, 100, 3)
|
||||
|
||||
d, err := New(
|
||||
ctx,
|
||||
h,
|
||||
testPrefix,
|
||||
NamespacedValidator("v", blankValidator{}),
|
||||
Mode(ModeServer),
|
||||
DisableAutoRefresh(),
|
||||
RoutingTablePeerDiversityFilter(r),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer d.Close()
|
||||
|
||||
// only 3 peers per prefix for the table.
|
||||
d2 := setupDHT(ctx, t, false, DisableAutoRefresh())
|
||||
connect(t, ctx, d, d2)
|
||||
waitForWellFormedTables(t, []*IpfsDHT{d}, 1, 1, 1*time.Second)
|
||||
|
||||
d3 := setupDHT(ctx, t, false, DisableAutoRefresh())
|
||||
connect(t, ctx, d, d3)
|
||||
waitForWellFormedTables(t, []*IpfsDHT{d}, 2, 2, 1*time.Second)
|
||||
|
||||
d4 := setupDHT(ctx, t, false, DisableAutoRefresh())
|
||||
connect(t, ctx, d, d4)
|
||||
waitForWellFormedTables(t, []*IpfsDHT{d}, 3, 3, 1*time.Second)
|
||||
|
||||
d5 := setupDHT(ctx, t, false, DisableAutoRefresh())
|
||||
connectNoSync(t, ctx, d, d5)
|
||||
time.Sleep(1 * time.Second)
|
||||
require.Len(t, d.routingTable.ListPeers(), 3)
|
||||
require.True(t, d.routingTable.Find(d5.self) == "")
|
||||
}
|
372
go-libp2p-kad-dht/rtrefresh/rt_refresh_manager.go
Normal file
372
go-libp2p-kad-dht/rtrefresh/rt_refresh_manager.go
Normal file
@ -0,0 +1,372 @@
|
||||
package rtrefresh
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
kbucket "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-base32"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
var logger = logging.Logger("dht/RtRefreshManager")
|
||||
|
||||
const (
|
||||
peerPingTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
type triggerRefreshReq struct {
|
||||
respCh chan error
|
||||
forceCplRefresh bool
|
||||
}
|
||||
|
||||
type RtRefreshManager struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
refcount sync.WaitGroup
|
||||
|
||||
// peerId of this DHT peer i.e. self peerId.
|
||||
h host.Host
|
||||
dhtPeerId peer.ID
|
||||
rt *kbucket.RoutingTable
|
||||
|
||||
enableAutoRefresh bool // should run periodic refreshes ?
|
||||
refreshKeyGenFnc func(cpl uint) (string, error) // generate the key for the query to refresh this cpl
|
||||
refreshQueryFnc func(ctx context.Context, key string) error // query to run for a refresh.
|
||||
refreshPingFnc func(ctx context.Context, p peer.ID) error // request to check liveness of remote peer
|
||||
refreshQueryTimeout time.Duration // timeout for one refresh query
|
||||
|
||||
// interval between two periodic refreshes.
|
||||
// also, a cpl wont be refreshed if the time since it was last refreshed
|
||||
// is below the interval..unless a "forced" refresh is done.
|
||||
refreshInterval time.Duration
|
||||
successfulOutboundQueryGracePeriod time.Duration
|
||||
|
||||
triggerRefresh chan *triggerRefreshReq // channel to write refresh requests to.
|
||||
|
||||
refreshDoneCh chan struct{} // write to this channel after every refresh
|
||||
}
|
||||
|
||||
func NewRtRefreshManager(h host.Host, rt *kbucket.RoutingTable, autoRefresh bool,
|
||||
refreshKeyGenFnc func(cpl uint) (string, error),
|
||||
refreshQueryFnc func(ctx context.Context, key string) error,
|
||||
refreshPingFnc func(ctx context.Context, p peer.ID) error,
|
||||
refreshQueryTimeout time.Duration,
|
||||
refreshInterval time.Duration,
|
||||
successfulOutboundQueryGracePeriod time.Duration,
|
||||
refreshDoneCh chan struct{}) (*RtRefreshManager, error) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &RtRefreshManager{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
h: h,
|
||||
dhtPeerId: h.ID(),
|
||||
rt: rt,
|
||||
|
||||
enableAutoRefresh: autoRefresh,
|
||||
refreshKeyGenFnc: refreshKeyGenFnc,
|
||||
refreshQueryFnc: refreshQueryFnc,
|
||||
refreshPingFnc: refreshPingFnc,
|
||||
|
||||
refreshQueryTimeout: refreshQueryTimeout,
|
||||
refreshInterval: refreshInterval,
|
||||
successfulOutboundQueryGracePeriod: successfulOutboundQueryGracePeriod,
|
||||
|
||||
triggerRefresh: make(chan *triggerRefreshReq),
|
||||
refreshDoneCh: refreshDoneCh,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) Start() {
|
||||
r.refcount.Add(1)
|
||||
go r.loop()
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) Close() error {
|
||||
r.cancel()
|
||||
r.refcount.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshRoutingTable requests the refresh manager to refresh the Routing Table.
|
||||
// If the force parameter is set to true true, all buckets will be refreshed irrespective of when they were last refreshed.
|
||||
//
|
||||
// The returned channel will block until the refresh finishes, then yield the
|
||||
// error and close. The channel is buffered and safe to ignore.
|
||||
func (r *RtRefreshManager) Refresh(force bool) <-chan error {
|
||||
resp := make(chan error, 1)
|
||||
r.refcount.Add(1)
|
||||
go func() {
|
||||
defer r.refcount.Done()
|
||||
select {
|
||||
case r.triggerRefresh <- &triggerRefreshReq{respCh: resp, forceCplRefresh: force}:
|
||||
case <-r.ctx.Done():
|
||||
resp <- r.ctx.Err()
|
||||
close(resp)
|
||||
}
|
||||
}()
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// RefreshNoWait requests the refresh manager to refresh the Routing Table.
|
||||
// However, it moves on without blocking if it's request can't get through.
|
||||
func (r *RtRefreshManager) RefreshNoWait() {
|
||||
select {
|
||||
case r.triggerRefresh <- &triggerRefreshReq{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// pingAndEvictPeers pings Routing Table peers that haven't been heard of/from
|
||||
// in the interval they should have been and evict them if they don't reply.
|
||||
func (r *RtRefreshManager) pingAndEvictPeers(ctx context.Context) {
|
||||
ctx, span := internal.StartSpan(ctx, "RefreshManager.PingAndEvictPeers")
|
||||
defer span.End()
|
||||
|
||||
var peersChecked int
|
||||
var alive int64
|
||||
var wg sync.WaitGroup
|
||||
peers := r.rt.GetPeerInfos()
|
||||
for _, ps := range peers {
|
||||
if time.Since(ps.LastSuccessfulOutboundQueryAt) <= r.successfulOutboundQueryGracePeriod {
|
||||
continue
|
||||
}
|
||||
|
||||
peersChecked++
|
||||
wg.Add(1)
|
||||
go func(ps kbucket.PeerInfo) {
|
||||
defer wg.Done()
|
||||
|
||||
livelinessCtx, cancel := context.WithTimeout(ctx, peerPingTimeout)
|
||||
defer cancel()
|
||||
peerIdStr := ps.Id.String()
|
||||
livelinessCtx, span := internal.StartSpan(livelinessCtx, "RefreshManager.PingAndEvictPeers.worker", trace.WithAttributes(attribute.String("peer", peerIdStr)))
|
||||
defer span.End()
|
||||
|
||||
if err := r.h.Connect(livelinessCtx, peer.AddrInfo{ID: ps.Id}); err != nil {
|
||||
logger.Debugw("evicting peer after failed connection", "peer", peerIdStr, "error", err)
|
||||
span.RecordError(err)
|
||||
r.rt.RemovePeer(ps.Id)
|
||||
return
|
||||
}
|
||||
|
||||
if err := r.refreshPingFnc(livelinessCtx, ps.Id); err != nil {
|
||||
logger.Debugw("evicting peer after failed ping", "peer", peerIdStr, "error", err)
|
||||
span.RecordError(err)
|
||||
r.rt.RemovePeer(ps.Id)
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddInt64(&alive, 1)
|
||||
}(ps)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
span.SetAttributes(attribute.Int("NumPeersChecked", peersChecked), attribute.Int("NumPeersSkipped", len(peers)-peersChecked), attribute.Int64("NumPeersAlive", alive))
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) loop() {
|
||||
defer r.refcount.Done()
|
||||
|
||||
var refreshTickrCh <-chan time.Time
|
||||
if r.enableAutoRefresh {
|
||||
err := r.doRefresh(r.ctx, true)
|
||||
if err != nil {
|
||||
logger.Warn("failed when refreshing routing table", err)
|
||||
}
|
||||
t := time.NewTicker(r.refreshInterval)
|
||||
defer t.Stop()
|
||||
refreshTickrCh = t.C
|
||||
}
|
||||
|
||||
for {
|
||||
var waiting []chan<- error
|
||||
var forced bool
|
||||
select {
|
||||
case <-refreshTickrCh:
|
||||
case triggerRefreshReq := <-r.triggerRefresh:
|
||||
if triggerRefreshReq.respCh != nil {
|
||||
waiting = append(waiting, triggerRefreshReq.respCh)
|
||||
}
|
||||
forced = forced || triggerRefreshReq.forceCplRefresh
|
||||
case <-r.ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
// Batch multiple refresh requests if they're all waiting at the same time.
|
||||
OuterLoop:
|
||||
for {
|
||||
select {
|
||||
case triggerRefreshReq := <-r.triggerRefresh:
|
||||
if triggerRefreshReq.respCh != nil {
|
||||
waiting = append(waiting, triggerRefreshReq.respCh)
|
||||
}
|
||||
forced = forced || triggerRefreshReq.forceCplRefresh
|
||||
default:
|
||||
break OuterLoop
|
||||
}
|
||||
}
|
||||
|
||||
ctx, span := internal.StartSpan(r.ctx, "RefreshManager.Refresh")
|
||||
|
||||
r.pingAndEvictPeers(ctx)
|
||||
|
||||
// Query for self and refresh the required buckets
|
||||
err := r.doRefresh(ctx, forced)
|
||||
for _, w := range waiting {
|
||||
w <- err
|
||||
close(w)
|
||||
}
|
||||
if err != nil {
|
||||
logger.Warnw("failed when refreshing routing table", "error", err)
|
||||
}
|
||||
|
||||
span.End()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) doRefresh(ctx context.Context, forceRefresh bool) error {
|
||||
ctx, span := internal.StartSpan(ctx, "RefreshManager.doRefresh")
|
||||
defer span.End()
|
||||
|
||||
var merr error
|
||||
|
||||
if err := r.queryForSelf(ctx); err != nil {
|
||||
merr = multierror.Append(merr, err)
|
||||
}
|
||||
|
||||
refreshCpls := r.rt.GetTrackedCplsForRefresh()
|
||||
|
||||
rfnc := func(cpl uint) (err error) {
|
||||
if forceRefresh {
|
||||
err = r.refreshCpl(ctx, cpl)
|
||||
} else {
|
||||
err = r.refreshCplIfEligible(ctx, cpl, refreshCpls[cpl])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for c := range refreshCpls {
|
||||
cpl := uint(c)
|
||||
if err := rfnc(cpl); err != nil {
|
||||
merr = multierror.Append(merr, err)
|
||||
} else {
|
||||
// If we see a gap at a Cpl in the Routing table, we ONLY refresh up until the maximum cpl we
|
||||
// have in the Routing Table OR (2 * (Cpl+ 1) with the gap), whichever is smaller.
|
||||
// This is to prevent refreshes for Cpls that have no peers in the network but happen to be before a very high max Cpl
|
||||
// for which we do have peers in the network.
|
||||
// The number of 2 * (Cpl + 1) can be proved and a proof would have been written here if the programmer
|
||||
// had paid more attention in the Math classes at university.
|
||||
// So, please be patient and a doc explaining it will be published soon.
|
||||
if r.rt.NPeersForCpl(cpl) == 0 {
|
||||
lastCpl := min(2*(c+1), len(refreshCpls)-1)
|
||||
for i := c + 1; i < lastCpl+1; i++ {
|
||||
if err := rfnc(uint(i)); err != nil {
|
||||
merr = multierror.Append(merr, err)
|
||||
}
|
||||
}
|
||||
return merr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case r.refreshDoneCh <- struct{}{}:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
return merr
|
||||
}
|
||||
|
||||
func min(a int, b int) int {
|
||||
if a <= b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) refreshCplIfEligible(ctx context.Context, cpl uint, lastRefreshedAt time.Time) error {
|
||||
if time.Since(lastRefreshedAt) <= r.refreshInterval {
|
||||
logger.Debugf("not running refresh for cpl %d as time since last refresh not above interval", cpl)
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.refreshCpl(ctx, cpl)
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) refreshCpl(ctx context.Context, cpl uint) error {
|
||||
ctx, span := internal.StartSpan(ctx, "RefreshManager.refreshCpl", trace.WithAttributes(attribute.Int("cpl", int(cpl))))
|
||||
defer span.End()
|
||||
|
||||
// gen a key for the query to refresh the cpl
|
||||
key, err := r.refreshKeyGenFnc(cpl)
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
return fmt.Errorf("failed to generated query key for cpl=%d, err=%s", cpl, err)
|
||||
}
|
||||
|
||||
logger.Infof("starting refreshing cpl %d with key %s (routing table size was %d)",
|
||||
cpl, loggableRawKeyString(key), r.rt.Size())
|
||||
|
||||
if err := r.runRefreshDHTQuery(ctx, key); err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
return fmt.Errorf("failed to refresh cpl=%d, err=%s", cpl, err)
|
||||
}
|
||||
|
||||
sz := r.rt.Size()
|
||||
logger.Infof("finished refreshing cpl %d, routing table size is now %d", cpl, sz)
|
||||
span.SetAttributes(attribute.Int("NewSize", sz))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) queryForSelf(ctx context.Context) error {
|
||||
ctx, span := internal.StartSpan(ctx, "RefreshManager.queryForSelf")
|
||||
defer span.End()
|
||||
|
||||
if err := r.runRefreshDHTQuery(ctx, string(r.dhtPeerId)); err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
return fmt.Errorf("failed to query for self, err=%s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) runRefreshDHTQuery(ctx context.Context, key string) error {
|
||||
queryCtx, cancel := context.WithTimeout(ctx, r.refreshQueryTimeout)
|
||||
defer cancel()
|
||||
|
||||
err := r.refreshQueryFnc(queryCtx, key)
|
||||
|
||||
if err == nil || (err == context.DeadlineExceeded && queryCtx.Err() == context.DeadlineExceeded) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type loggableRawKeyString string
|
||||
|
||||
func (lk loggableRawKeyString) String() string {
|
||||
k := string(lk)
|
||||
|
||||
if len(k) == 0 {
|
||||
return k
|
||||
}
|
||||
|
||||
encStr := base32.RawStdEncoding.EncodeToString([]byte(k))
|
||||
|
||||
return encStr
|
||||
}
|
102
go-libp2p-kad-dht/rtrefresh/rt_refresh_manager_test.go
Normal file
102
go-libp2p-kad-dht/rtrefresh/rt_refresh_manager_test.go
Normal file
@ -0,0 +1,102 @@
|
||||
package rtrefresh
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/test"
|
||||
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
pstore "github.com/libp2p/go-libp2p/p2p/host/peerstore"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSkipRefreshOnGapCpls(t *testing.T) {
|
||||
t.Skip("This test is flaky, see https://github.com/libp2p/go-libp2p-kad-dht/issues/722.")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
local := test.RandPeerIDFatal(t)
|
||||
|
||||
// adds a peer for a cpl.
|
||||
// The "ignoreCpl" is the cpl for which we assume we have no peers in the network.
|
||||
// So. if the query function gets a "key" which is basically tha stringed version of the "ignoreCpl",
|
||||
// we return without adding any peers for it to the Routing Table.
|
||||
qFuncWithIgnore := func(rt *kb.RoutingTable, ignoreCpl uint) func(c context.Context, key string) error {
|
||||
return func(c context.Context, key string) error {
|
||||
if key == string(local) {
|
||||
return nil
|
||||
}
|
||||
|
||||
u, err := strconv.ParseInt(key, 10, 64)
|
||||
require.NoError(t, err)
|
||||
|
||||
if uint(u) == ignoreCpl {
|
||||
return nil
|
||||
}
|
||||
|
||||
p, err := rt.GenRandPeerID(uint(u))
|
||||
require.NoError(t, err)
|
||||
b, err := rt.TryAddPeer(p, true, false)
|
||||
require.NoError(t, err)
|
||||
require.True(t, b)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// We use the cpl as the key for the query. So, the cpl -> key transformation function
|
||||
// basically just converts the uint cpl to a string key using the strconv lib.
|
||||
kfnc := func(cpl uint) (string, error) {
|
||||
return strconv.FormatInt(int64(cpl), 10), nil
|
||||
}
|
||||
|
||||
// when 2*gapcpl < maxCpl
|
||||
// gap is 2 and max is 10
|
||||
rt, err := kb.NewRoutingTable(2, kb.ConvertPeerID(local), time.Hour, pstore.NewMetrics(), 100*time.Hour, nil)
|
||||
require.NoError(t, err)
|
||||
r := &RtRefreshManager{ctx: ctx, rt: rt, refreshKeyGenFnc: kfnc, dhtPeerId: local}
|
||||
icpl := uint(2)
|
||||
lastCpl := 2 * (icpl + 1)
|
||||
p, err := rt.GenRandPeerID(10)
|
||||
require.NoError(t, err)
|
||||
b, _ := rt.TryAddPeer(p, true, false)
|
||||
require.True(t, b)
|
||||
r.refreshQueryFnc = qFuncWithIgnore(rt, icpl)
|
||||
require.NoError(t, r.doRefresh(ctx, true))
|
||||
|
||||
for i := uint(0); i < lastCpl+1; i++ {
|
||||
if i == icpl {
|
||||
require.Equal(t, 0, rt.NPeersForCpl(i))
|
||||
continue
|
||||
}
|
||||
require.Equal(t, 1, rt.NPeersForCpl(uint(i)))
|
||||
}
|
||||
for i := lastCpl + 1; i < 10; i++ {
|
||||
require.Equal(t, 0, rt.NPeersForCpl(i))
|
||||
}
|
||||
|
||||
// when 2 * (gapcpl + 1) > maxCpl
|
||||
rt, err = kb.NewRoutingTable(2, kb.ConvertPeerID(local), time.Hour, pstore.NewMetrics(), 100*time.Hour, nil)
|
||||
require.NoError(t, err)
|
||||
r = &RtRefreshManager{ctx: ctx, rt: rt, refreshKeyGenFnc: kfnc, dhtPeerId: local}
|
||||
icpl = uint(6)
|
||||
p, err = rt.GenRandPeerID(10)
|
||||
require.NoError(t, err)
|
||||
b, _ = rt.TryAddPeer(p, true, false)
|
||||
require.True(t, b)
|
||||
r.refreshQueryFnc = qFuncWithIgnore(rt, icpl)
|
||||
require.NoError(t, r.doRefresh(ctx, true))
|
||||
|
||||
for i := uint(0); i < 10; i++ {
|
||||
if i == icpl {
|
||||
require.Equal(t, 0, rt.NPeersForCpl(i))
|
||||
continue
|
||||
}
|
||||
|
||||
require.Equal(t, 1, rt.NPeersForCpl(uint(i)))
|
||||
}
|
||||
require.Equal(t, 2, rt.NPeersForCpl(10))
|
||||
}
|
141
go-libp2p-kad-dht/subscriber_notifee.go
Normal file
141
go-libp2p-kad-dht/subscriber_notifee.go
Normal file
@ -0,0 +1,141 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
|
||||
)
|
||||
|
||||
func (dht *IpfsDHT) startNetworkSubscriber() error {
|
||||
bufSize := eventbus.BufSize(256)
|
||||
|
||||
evts := []interface{}{
|
||||
// register for event bus notifications of when peers successfully complete identification in order to update
|
||||
// the routing table
|
||||
new(event.EvtPeerIdentificationCompleted),
|
||||
|
||||
// register for event bus protocol ID changes in order to update the routing table
|
||||
new(event.EvtPeerProtocolsUpdated),
|
||||
|
||||
// register for event bus notifications for when our local address/addresses change so we can
|
||||
// advertise those to the network
|
||||
new(event.EvtLocalAddressesUpdated),
|
||||
|
||||
// we want to know when we are disconnecting from other peers.
|
||||
new(event.EvtPeerConnectednessChanged),
|
||||
}
|
||||
|
||||
// register for event bus local routability changes in order to trigger switching between client and server modes
|
||||
// only register for events if the DHT is operating in ModeAuto
|
||||
if dht.auto == ModeAuto || dht.auto == ModeAutoServer {
|
||||
evts = append(evts, new(event.EvtLocalReachabilityChanged))
|
||||
}
|
||||
|
||||
subs, err := dht.host.EventBus().Subscribe(evts, bufSize)
|
||||
if err != nil {
|
||||
return fmt.Errorf("dht could not subscribe to eventbus events: %w", err)
|
||||
}
|
||||
|
||||
dht.wg.Add(1)
|
||||
go func() {
|
||||
defer dht.wg.Done()
|
||||
defer subs.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case e, more := <-subs.Out():
|
||||
if !more {
|
||||
return
|
||||
}
|
||||
|
||||
switch evt := e.(type) {
|
||||
case event.EvtLocalAddressesUpdated:
|
||||
// when our address changes, we should proactively tell our closest peers about it so
|
||||
// we become discoverable quickly. The Identify protocol will push a signed peer record
|
||||
// with our new address to all peers we are connected to. However, we might not necessarily be connected
|
||||
// to our closet peers & so in the true spirit of Zen, searching for ourself in the network really is the best way
|
||||
// to to forge connections with those matter.
|
||||
if dht.autoRefresh || dht.testAddressUpdateProcessing {
|
||||
dht.rtRefreshManager.RefreshNoWait()
|
||||
}
|
||||
case event.EvtPeerProtocolsUpdated:
|
||||
handlePeerChangeEvent(dht, evt.Peer)
|
||||
case event.EvtPeerIdentificationCompleted:
|
||||
handlePeerChangeEvent(dht, evt.Peer)
|
||||
case event.EvtPeerConnectednessChanged:
|
||||
if evt.Connectedness != network.Connected {
|
||||
dht.msgSender.OnDisconnect(dht.ctx, evt.Peer)
|
||||
}
|
||||
case event.EvtLocalReachabilityChanged:
|
||||
if dht.auto == ModeAuto || dht.auto == ModeAutoServer {
|
||||
handleLocalReachabilityChangedEvent(dht, evt)
|
||||
} else {
|
||||
// something has gone really wrong if we get an event we did not subscribe to
|
||||
logger.Errorf("received LocalReachabilityChanged event that was not subscribed to")
|
||||
}
|
||||
default:
|
||||
// something has gone really wrong if we get an event for another type
|
||||
logger.Errorf("got wrong type from subscription: %T", e)
|
||||
}
|
||||
case <-dht.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func handlePeerChangeEvent(dht *IpfsDHT, p peer.ID) {
|
||||
valid, err := dht.validRTPeer(p)
|
||||
if err != nil {
|
||||
logger.Errorf("could not check peerstore for protocol support: err: %s", err)
|
||||
return
|
||||
} else if valid {
|
||||
dht.peerFound(p)
|
||||
} else {
|
||||
dht.peerStoppedDHT(p)
|
||||
}
|
||||
}
|
||||
|
||||
func handleLocalReachabilityChangedEvent(dht *IpfsDHT, e event.EvtLocalReachabilityChanged) {
|
||||
var target mode
|
||||
|
||||
switch e.Reachability {
|
||||
case network.ReachabilityPrivate:
|
||||
target = modeClient
|
||||
case network.ReachabilityUnknown:
|
||||
if dht.auto == ModeAutoServer {
|
||||
target = modeServer
|
||||
} else {
|
||||
target = modeClient
|
||||
}
|
||||
case network.ReachabilityPublic:
|
||||
target = modeServer
|
||||
}
|
||||
|
||||
logger.Infof("processed event %T; performing dht mode switch", e)
|
||||
|
||||
err := dht.setMode(target)
|
||||
// NOTE: the mode will be printed out as a decimal.
|
||||
if err == nil {
|
||||
logger.Infow("switched DHT mode successfully", "mode", target)
|
||||
} else {
|
||||
logger.Errorw("switching DHT mode failed", "mode", target, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// validRTPeer returns true if the peer supports the DHT protocol and false otherwise. Supporting the DHT protocol means
|
||||
// supporting the primary protocols, we do not want to add peers that are speaking obsolete secondary protocols to our
|
||||
// routing table
|
||||
func (dht *IpfsDHT) validRTPeer(p peer.ID) (bool, error) {
|
||||
b, err := dht.peerstore.FirstSupportedProtocol(p, dht.protocols...)
|
||||
if len(b) == 0 || err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return dht.routingTablePeerFilter == nil || dht.routingTablePeerFilter(dht, p), nil
|
||||
}
|
3
go-libp2p-kad-dht/version.json
Normal file
3
go-libp2p-kad-dht/version.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"version": "v0.25.2"
|
||||
}
|
@ -17,10 +17,11 @@ func TestLogs(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
LogMisbehavingPeer(test.RandPeerIDFatal(t), multiaddr.StringCast("/ip4/1.2.3.4"), "somecomponent", fmt.Errorf("something"), "hi")
|
||||
m, _ := multiaddr.StringCast("/ip4/1.2.3.4")
|
||||
LogMisbehavingPeer(test.RandPeerIDFatal(t), m, "somecomponent", fmt.Errorf("something"), "hi")
|
||||
|
||||
netAddr := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 80}
|
||||
LogMisbehavingPeerNetAddr(test.RandPeerIDFatal(t), netAddr, "somecomponent", fmt.Errorf("something"), "hello \"world\"")
|
||||
|
||||
LogPeerStatus(1, test.RandPeerIDFatal(t), multiaddr.StringCast("/ip4/1.2.3.4"), "extra", "info")
|
||||
LogPeerStatus(1, test.RandPeerIDFatal(t), m, "extra", "info")
|
||||
}
|
||||
|
@ -25,8 +25,8 @@ var ErrInvalidAddr = fmt.Errorf("invalid p2p multiaddr")
|
||||
func AddrInfosFromP2pAddrs(maddrs ...ma.Multiaddr) ([]AddrInfo, error) {
|
||||
m := make(map[ID][]ma.Multiaddr)
|
||||
for _, maddr := range maddrs {
|
||||
transport, id := SplitAddr(maddr)
|
||||
if id == "" {
|
||||
transport, id, err := SplitAddr(maddr)
|
||||
if id == "" || err != nil {
|
||||
return nil, ErrInvalidAddr
|
||||
}
|
||||
if transport == nil {
|
||||
@ -48,17 +48,21 @@ func AddrInfosFromP2pAddrs(maddrs ...ma.Multiaddr) ([]AddrInfo, error) {
|
||||
//
|
||||
// * Returns a nil transport if the address only contains a /p2p part.
|
||||
// * Returns an empty peer ID if the address doesn't contain a /p2p part.
|
||||
func SplitAddr(m ma.Multiaddr) (transport ma.Multiaddr, id ID) {
|
||||
func SplitAddr(m ma.Multiaddr) (transport ma.Multiaddr, id ID, err error) {
|
||||
if m == nil {
|
||||
return nil, ""
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
transport, p2ppart, err := ma.SplitLast(m)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
transport, p2ppart := ma.SplitLast(m)
|
||||
if p2ppart == nil || p2ppart.Protocol().Code != ma.P_P2P {
|
||||
return m, ""
|
||||
return m, "", nil
|
||||
}
|
||||
id = ID(p2ppart.RawValue()) // already validated by the multiaddr library.
|
||||
return transport, id
|
||||
return transport, id, nil
|
||||
}
|
||||
|
||||
// AddrInfoFromString builds an AddrInfo from the string representation of a Multiaddr
|
||||
@ -73,8 +77,8 @@ func AddrInfoFromString(s string) (*AddrInfo, error) {
|
||||
|
||||
// AddrInfoFromP2pAddr converts a Multiaddr to an AddrInfo.
|
||||
func AddrInfoFromP2pAddr(m ma.Multiaddr) (*AddrInfo, error) {
|
||||
transport, id := SplitAddr(m)
|
||||
if id == "" {
|
||||
transport, id, err := SplitAddr(m)
|
||||
if id == "" || err != nil {
|
||||
return nil, ErrInvalidAddr
|
||||
}
|
||||
info := &AddrInfo{ID: id}
|
||||
|
@ -19,13 +19,13 @@ func init() {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
maddrPeer = ma.StringCast("/p2p/" + testID.String())
|
||||
maddrTpt = ma.StringCast("/ip4/127.0.0.1/tcp/1234")
|
||||
maddrPeer, _ = ma.StringCast("/p2p/" + testID.String())
|
||||
maddrTpt, _ = ma.StringCast("/ip4/127.0.0.1/tcp/1234")
|
||||
maddrFull = maddrTpt.Encapsulate(maddrPeer)
|
||||
}
|
||||
|
||||
func TestSplitAddr(t *testing.T) {
|
||||
tpt, id := SplitAddr(maddrFull)
|
||||
tpt, id, _ := SplitAddr(maddrFull)
|
||||
if !tpt.Equal(maddrTpt) {
|
||||
t.Fatal("expected transport")
|
||||
}
|
||||
@ -33,7 +33,7 @@ func TestSplitAddr(t *testing.T) {
|
||||
t.Fatalf("%s != %s", id, testID)
|
||||
}
|
||||
|
||||
tpt, id = SplitAddr(maddrPeer)
|
||||
tpt, id, _ = SplitAddr(maddrPeer)
|
||||
if tpt != nil {
|
||||
t.Fatal("expected no transport")
|
||||
}
|
||||
@ -41,7 +41,7 @@ func TestSplitAddr(t *testing.T) {
|
||||
t.Fatalf("%s != %s", id, testID)
|
||||
}
|
||||
|
||||
tpt, id = SplitAddr(maddrTpt)
|
||||
tpt, id, _ = SplitAddr(maddrTpt)
|
||||
if !tpt.Equal(maddrTpt) {
|
||||
t.Fatal("expected a transport")
|
||||
}
|
||||
@ -91,22 +91,30 @@ func TestAddrInfosFromP2pAddrs(t *testing.T) {
|
||||
t.Fatal("expected nil multiaddr to fail")
|
||||
}
|
||||
|
||||
m1, _ := ma.StringCast("/ip4/128.199.219.111/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64")
|
||||
m2, _ := ma.StringCast("/ip4/104.236.76.40/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64")
|
||||
m3, _ := ma.StringCast("/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd")
|
||||
m4, _ := ma.StringCast("/ip4/178.62.158.247/tcp/4001/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd")
|
||||
m5, _ := ma.StringCast("/ipfs/QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM")
|
||||
addrs := []ma.Multiaddr{
|
||||
ma.StringCast("/ip4/128.199.219.111/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64"),
|
||||
ma.StringCast("/ip4/104.236.76.40/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64"),
|
||||
m1,
|
||||
m2,
|
||||
|
||||
ma.StringCast("/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd"),
|
||||
ma.StringCast("/ip4/178.62.158.247/tcp/4001/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd"),
|
||||
m3,
|
||||
m4,
|
||||
|
||||
ma.StringCast("/ipfs/QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM"),
|
||||
m5,
|
||||
}
|
||||
p1, _ := ma.StringCast("/ip4/128.199.219.111/tcp/4001")
|
||||
p2, _ := ma.StringCast("/ip4/104.236.76.40/tcp/4001")
|
||||
p3, _ := ma.StringCast("/ip4/178.62.158.247/tcp/4001")
|
||||
expected := map[string][]ma.Multiaddr{
|
||||
"QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64": {
|
||||
ma.StringCast("/ip4/128.199.219.111/tcp/4001"),
|
||||
ma.StringCast("/ip4/104.236.76.40/tcp/4001"),
|
||||
p1,
|
||||
p2,
|
||||
},
|
||||
"QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd": {
|
||||
ma.StringCast("/ip4/178.62.158.247/tcp/4001"),
|
||||
p3,
|
||||
},
|
||||
"QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM": nil,
|
||||
}
|
||||
|
@ -137,13 +137,13 @@ var (
|
||||
func TimestampSeq() uint64 {
|
||||
now := uint64(time.Now().UnixNano())
|
||||
lastTimestampMu.Lock()
|
||||
defer lastTimestampMu.Unlock()
|
||||
// Not all clocks are strictly increasing, but we need these sequence numbers to be strictly
|
||||
// increasing.
|
||||
if now <= lastTimestamp {
|
||||
now = lastTimestamp + 1
|
||||
}
|
||||
lastTimestamp = now
|
||||
lastTimestampMu.Unlock()
|
||||
return now
|
||||
}
|
||||
|
||||
|
@ -4,8 +4,13 @@ go 1.21
|
||||
|
||||
retract v0.26.1 // Tag was applied incorrectly due to a bug in the release workflow.
|
||||
|
||||
replace github.com/multiformats/go-multiaddr => ../go-multiaddr
|
||||
|
||||
replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns
|
||||
|
||||
require (
|
||||
github.com/benbjohnson/clock v1.3.5
|
||||
github.com/cloudflare/circl v1.3.9
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0
|
||||
github.com/flynn/noise v1.1.0
|
||||
|
@ -29,6 +29,8 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.3.9 h1:QFrlgFYf2Qpi8bSpVPK1HBvWpx16v/1TZivyo7pGuBE=
|
||||
github.com/cloudflare/circl v1.3.9/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU=
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
@ -103,6 +105,7 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
@ -134,6 +137,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
|
||||
github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
|
||||
github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
|
||||
github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk=
|
||||
@ -164,6 +168,10 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
|
||||
@ -208,7 +216,6 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
|
||||
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
|
||||
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
|
||||
@ -220,37 +227,37 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdn
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
|
||||
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
|
||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||
github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
|
||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
||||
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
|
||||
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
|
||||
github.com/multiformats/go-multiaddr v0.12.4 h1:rrKqpY9h+n80EwhhC/kkcunCZZ7URIF8yN1WEUt2Hvc=
|
||||
github.com/multiformats/go-multiaddr v0.12.4/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII=
|
||||
github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A=
|
||||
github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
|
||||
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
|
||||
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
||||
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
||||
github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
|
||||
github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
|
||||
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
|
||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
||||
github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE=
|
||||
github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA=
|
||||
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
@ -444,6 +451,8 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
@ -453,6 +462,7 @@ golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOM
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@ -466,7 +476,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -486,7 +498,9 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
@ -536,8 +550,10 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@ -552,6 +568,7 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
@ -565,6 +582,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
@ -592,6 +610,7 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
@ -634,6 +653,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
@ -641,6 +661,7 @@ honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
|
||||
lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
|
||||
lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
|
||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||
|
@ -163,6 +163,10 @@ func TestChainOptions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
func tStringCast(s string) ma.Multiaddr {
|
||||
st, _ := ma.StringCast(s)
|
||||
return st
|
||||
}
|
||||
|
||||
func TestTransportConstructorTCP(t *testing.T) {
|
||||
h, err := New(
|
||||
@ -171,8 +175,8 @@ func TestTransportConstructorTCP(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer h.Close()
|
||||
require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0")))
|
||||
err = h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"))
|
||||
require.NoError(t, h.Network().Listen(tStringCast("/ip4/127.0.0.1/tcp/0")))
|
||||
err = h.Network().Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1"))
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), swarm.ErrNoTransport.Error())
|
||||
}
|
||||
@ -184,8 +188,8 @@ func TestTransportConstructorQUIC(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer h.Close()
|
||||
require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")))
|
||||
err = h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0"))
|
||||
require.NoError(t, h.Network().Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1")))
|
||||
err = h.Network().Listen(tStringCast("/ip4/127.0.0.1/tcp/0"))
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), swarm.ErrNoTransport.Error())
|
||||
}
|
||||
@ -290,8 +294,8 @@ func TestTransportConstructorWebTransport(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer h.Close()
|
||||
require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")))
|
||||
err = h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/"))
|
||||
require.NoError(t, h.Network().Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")))
|
||||
err = h.Network().Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/"))
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), swarm.ErrNoTransport.Error())
|
||||
}
|
||||
@ -311,12 +315,12 @@ func TestTransportCustomAddressWebTransport(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer h.Close()
|
||||
require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")))
|
||||
require.NoError(t, h.Network().Listen(tStringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")))
|
||||
addrs := h.Addrs()
|
||||
require.Len(t, addrs, 1)
|
||||
require.NotEqual(t, addrs[0], customAddr)
|
||||
restOfAddr, lastComp := ma.SplitLast(addrs[0])
|
||||
restOfAddr, secondToLastComp := ma.SplitLast(restOfAddr)
|
||||
restOfAddr, lastComp, _ := ma.SplitLast(addrs[0])
|
||||
restOfAddr, secondToLastComp, _ := ma.SplitLast(restOfAddr)
|
||||
require.Equal(t, ma.P_CERTHASH, lastComp.Protocol().Code)
|
||||
require.Equal(t, ma.P_CERTHASH, secondToLastComp.Protocol().Code)
|
||||
require.True(t, restOfAddr.Equal(customAddr))
|
||||
@ -343,7 +347,7 @@ func TestTransportCustomAddressWebTransportDoesNotStall(t *testing.T) {
|
||||
defer h.Close()
|
||||
addrs := h.Addrs()
|
||||
require.Len(t, addrs, 1)
|
||||
_, lastComp := ma.SplitLast(addrs[0])
|
||||
_, lastComp, _ := ma.SplitLast(addrs[0])
|
||||
require.NotEqual(t, ma.P_CERTHASH, lastComp.Protocol().Code)
|
||||
// We did not add the certhash to the multiaddr
|
||||
require.Equal(t, addrs[0], customAddr)
|
||||
@ -447,7 +451,7 @@ func TestDialCircuitAddrWithWrappedResourceManager(t *testing.T) {
|
||||
|
||||
h.Peerstore().AddAddrs(relay.ID(), relay.Addrs(), 10*time.Minute)
|
||||
h.Peerstore().AddAddr(peerBehindRelay.ID(),
|
||||
ma.StringCast(
|
||||
tStringCast(
|
||||
fmt.Sprintf("/p2p/%s/p2p-circuit", relay.ID()),
|
||||
),
|
||||
peerstore.TempAddrTTL,
|
||||
|
@ -143,7 +143,6 @@ func (d *BackoffDiscovery) FindPeers(ctx context.Context, ns string, opts ...dis
|
||||
}
|
||||
|
||||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
|
||||
timeExpired := d.clock.Now().After(c.nextDiscover)
|
||||
|
||||
@ -165,6 +164,7 @@ func (d *BackoffDiscovery) FindPeers(ctx context.Context, ns string, opts ...dis
|
||||
}
|
||||
}
|
||||
close(pch)
|
||||
c.mux.Unlock()
|
||||
return pch, nil
|
||||
}
|
||||
|
||||
@ -172,6 +172,7 @@ func (d *BackoffDiscovery) FindPeers(ctx context.Context, ns string, opts ...dis
|
||||
if !c.ongoing {
|
||||
pch, err := d.disc.FindPeers(ctx, ns, opts...)
|
||||
if err != nil {
|
||||
c.mux.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -189,12 +190,12 @@ func (d *BackoffDiscovery) FindPeers(ctx context.Context, ns string, opts ...dis
|
||||
c.sendingChs[evtCh] = options.Limit
|
||||
|
||||
go findPeerReceiver(ctx, pch, evtCh, rcvPeers)
|
||||
|
||||
c.mux.Unlock()
|
||||
return pch, nil
|
||||
}
|
||||
|
||||
func findPeerDispatcher(ctx context.Context, c *backoffCache, pch <-chan peer.AddrInfo) {
|
||||
defer func() {
|
||||
cleanup := func() {
|
||||
c.mux.Lock()
|
||||
|
||||
// If the peer addresses have changed reset the backoff
|
||||
@ -212,12 +213,13 @@ func findPeerDispatcher(ctx context.Context, c *backoffCache, pch <-chan peer.Ad
|
||||
}
|
||||
c.sendingChs = make(map[chan peer.AddrInfo]int)
|
||||
c.mux.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case ai, ok := <-pch:
|
||||
if !ok {
|
||||
cleanup()
|
||||
return
|
||||
}
|
||||
c.mux.Lock()
|
||||
@ -246,14 +248,13 @@ func findPeerDispatcher(ctx context.Context, c *backoffCache, pch <-chan peer.Ad
|
||||
|
||||
c.mux.Unlock()
|
||||
case <-ctx.Done():
|
||||
cleanup()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func findPeerReceiver(ctx context.Context, pch, evtCh chan peer.AddrInfo, rcvPeers []peer.AddrInfo) {
|
||||
defer close(pch)
|
||||
|
||||
for {
|
||||
select {
|
||||
case ai, ok := <-evtCh:
|
||||
@ -279,12 +280,15 @@ func findPeerReceiver(ctx context.Context, pch, evtCh chan peer.AddrInfo, rcvPee
|
||||
select {
|
||||
case pch <- p:
|
||||
case <-ctx.Done():
|
||||
close(pch)
|
||||
return
|
||||
}
|
||||
}
|
||||
close(pch)
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
close(pch)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -77,13 +77,12 @@ func (c *BackoffConnector) Connect(ctx context.Context, peerCh <-chan peer.AddrI
|
||||
|
||||
go func(pi peer.AddrInfo) {
|
||||
ctx, cancel := context.WithTimeout(ctx, c.connTryDur)
|
||||
defer cancel()
|
||||
|
||||
err := c.host.Connect(ctx, pi)
|
||||
if err != nil {
|
||||
log.Debugf("Error connecting to pubsub peer %s: %s", pi.ID, err.Error())
|
||||
return
|
||||
}
|
||||
cancel()
|
||||
}(pi)
|
||||
|
||||
case <-ctx.Done():
|
||||
|
@ -86,7 +86,10 @@ func (s *mdnsService) Close() error {
|
||||
func (s *mdnsService) getIPs(addrs []ma.Multiaddr) ([]string, error) {
|
||||
var ip4, ip6 string
|
||||
for _, addr := range addrs {
|
||||
first, _ := ma.SplitFirst(addr)
|
||||
first, _, err := ma.SplitFirst(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if first == nil {
|
||||
continue
|
||||
}
|
||||
@ -154,7 +157,6 @@ func (s *mdnsService) startResolver(ctx context.Context) {
|
||||
s.resolverWG.Add(2)
|
||||
entryChan := make(chan *zeroconf.ServiceEntry, 1000)
|
||||
go func() {
|
||||
defer s.resolverWG.Done()
|
||||
for entry := range entryChan {
|
||||
// We only care about the TXT records.
|
||||
// Ignore A, AAAA and PTR.
|
||||
@ -183,12 +185,13 @@ func (s *mdnsService) startResolver(ctx context.Context) {
|
||||
go s.notifee.HandlePeerFound(info)
|
||||
}
|
||||
}
|
||||
s.resolverWG.Done()
|
||||
}()
|
||||
go func() {
|
||||
defer s.resolverWG.Done()
|
||||
if err := zeroconf.Browse(ctx, s.serviceName, mdnsDomain, entryChan); err != nil {
|
||||
log.Debugf("zeroconf browsing failed: %s", err)
|
||||
}
|
||||
s.resolverWG.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
|
@ -45,13 +45,14 @@ func (d *RoutingDiscovery) Advertise(ctx context.Context, ns string, opts ...dis
|
||||
// closest peers to the key/CID before it goes on to provide the record to them.
|
||||
// Not setting a timeout here will make the DHT wander forever.
|
||||
pctx, cancel := context.WithTimeout(ctx, 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err = d.Provide(pctx, cid, true)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return 0, err
|
||||
}
|
||||
|
||||
cancel()
|
||||
return ttl, nil
|
||||
}
|
||||
|
||||
|
@ -156,17 +156,13 @@ func ipInList(candidate ma.Multiaddr, list []ma.Multiaddr) bool {
|
||||
}
|
||||
|
||||
func (as *AmbientAutoNAT) background() {
|
||||
defer close(as.backgroundRunning)
|
||||
// wait a bit for the node to come online and establish some connections
|
||||
// before starting autodetection
|
||||
delay := as.config.bootDelay
|
||||
|
||||
subChan := as.subscriber.Out()
|
||||
defer as.subscriber.Close()
|
||||
defer as.emitReachabilityChanged.Close()
|
||||
|
||||
timer := time.NewTimer(delay)
|
||||
defer timer.Stop()
|
||||
timerRunning := true
|
||||
retryProbe := false
|
||||
for {
|
||||
@ -174,7 +170,8 @@ func (as *AmbientAutoNAT) background() {
|
||||
// new inbound connection.
|
||||
case conn := <-as.inboundConn:
|
||||
localAddrs := as.host.Addrs()
|
||||
if manet.IsPublicAddr(conn.RemoteMultiaddr()) &&
|
||||
is, err := manet.IsPublicAddr(conn.RemoteMultiaddr())
|
||||
if is && err == nil &&
|
||||
!ipInList(conn.RemoteMultiaddr(), localAddrs) {
|
||||
as.lastInbound = time.Now()
|
||||
}
|
||||
@ -201,6 +198,10 @@ func (as *AmbientAutoNAT) background() {
|
||||
// probe finished.
|
||||
case err, ok := <-as.dialResponses:
|
||||
if !ok {
|
||||
close(as.backgroundRunning)
|
||||
as.subscriber.Close()
|
||||
as.emitReachabilityChanged.Close()
|
||||
timer.Stop()
|
||||
return
|
||||
}
|
||||
if IsDialRefused(err) {
|
||||
@ -214,6 +215,10 @@ func (as *AmbientAutoNAT) background() {
|
||||
timerRunning = false
|
||||
retryProbe = false
|
||||
case <-as.ctx.Done():
|
||||
close(as.backgroundRunning)
|
||||
as.subscriber.Close()
|
||||
as.emitReachabilityChanged.Close()
|
||||
timer.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
@ -381,7 +386,6 @@ func (as *AmbientAutoNAT) tryProbe(p peer.ID) bool {
|
||||
func (as *AmbientAutoNAT) probe(pi *peer.AddrInfo) {
|
||||
cli := NewAutoNATClient(as.host, as.config.addressFunc, as.metricsTracer)
|
||||
ctx, cancel := context.WithTimeout(as.ctx, as.config.requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
err := cli.DialBack(ctx, pi.ID)
|
||||
log.Debugf("Dialback through peer %s completed: err: %s", pi.ID, err)
|
||||
@ -389,6 +393,7 @@ func (as *AmbientAutoNAT) probe(pi *peer.AddrInfo) {
|
||||
select {
|
||||
case as.dialResponses <- err:
|
||||
case <-as.ctx.Done():
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -51,12 +51,8 @@ func (c *client) DialBack(ctx context.Context, p peer.ID) error {
|
||||
s.Reset()
|
||||
return err
|
||||
}
|
||||
defer s.Scope().ReleaseMemory(maxMsgSize)
|
||||
|
||||
s.SetDeadline(time.Now().Add(streamTimeout))
|
||||
// Might as well just reset the stream. Once we get to this point, we
|
||||
// don't care about being nice.
|
||||
defer s.Close()
|
||||
|
||||
r := pbio.NewDelimitedReader(s, maxMsgSize)
|
||||
w := pbio.NewDelimitedWriter(s)
|
||||
@ -64,16 +60,22 @@ func (c *client) DialBack(ctx context.Context, p peer.ID) error {
|
||||
req := newDialMessage(peer.AddrInfo{ID: c.h.ID(), Addrs: c.addrFunc()})
|
||||
if err := w.WriteMsg(req); err != nil {
|
||||
s.Reset()
|
||||
s.Scope().ReleaseMemory(maxMsgSize)
|
||||
s.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
var res pb.Message
|
||||
if err := r.ReadMsg(&res); err != nil {
|
||||
s.Reset()
|
||||
s.Scope().ReleaseMemory(maxMsgSize)
|
||||
s.Close()
|
||||
return err
|
||||
}
|
||||
if res.GetType() != pb.Message_DIAL_RESPONSE {
|
||||
s.Reset()
|
||||
s.Scope().ReleaseMemory(maxMsgSize)
|
||||
s.Close()
|
||||
return fmt.Errorf("unexpected response: %s", res.GetType().String())
|
||||
}
|
||||
|
||||
@ -81,6 +83,8 @@ func (c *client) DialBack(ctx context.Context, p peer.ID) error {
|
||||
if c.mt != nil {
|
||||
c.mt.ReceivedDialResponse(status)
|
||||
}
|
||||
s.Scope().ReleaseMemory(maxMsgSize)
|
||||
s.Close()
|
||||
switch status {
|
||||
case pb.Message_OK:
|
||||
return nil
|
||||
|
@ -30,7 +30,7 @@ func (d *dialPolicy) skipDial(addr ma.Multiaddr) bool {
|
||||
}
|
||||
|
||||
// skip private network (unroutable) addresses
|
||||
if !manet.IsPublicAddr(addr) {
|
||||
if is, err := manet.IsPublicAddr(addr); !is && err == nil {
|
||||
return true
|
||||
}
|
||||
candidateIP, err := manet.ToIP(addr)
|
||||
@ -60,7 +60,11 @@ func (d *dialPolicy) skipPeer(addrs []ma.Multiaddr) bool {
|
||||
localAddrs := d.host.Addrs()
|
||||
localHosts := make([]net.IP, 0)
|
||||
for _, lAddr := range localAddrs {
|
||||
if _, err := lAddr.ValueForProtocol(ma.P_CIRCUIT); err != nil && manet.IsPublicAddr(lAddr) {
|
||||
is, err := manet.IsPublicAddr(lAddr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if _, err := lAddr.ValueForProtocol(ma.P_CIRCUIT); err != nil && is {
|
||||
lIP, err := manet.ToIP(lAddr)
|
||||
if err != nil {
|
||||
continue
|
||||
@ -72,7 +76,11 @@ func (d *dialPolicy) skipPeer(addrs []ma.Multiaddr) bool {
|
||||
// if a public IP of the peer is one of ours: skip the peer.
|
||||
goodPublic := false
|
||||
for _, addr := range addrs {
|
||||
if _, err := addr.ValueForProtocol(ma.P_CIRCUIT); err != nil && manet.IsPublicAddr(addr) {
|
||||
is, err := manet.IsPublicAddr(addr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if _, err := addr.ValueForProtocol(ma.P_CIRCUIT); err != nil && is {
|
||||
aIP, err := manet.ToIP(addr)
|
||||
if err != nil {
|
||||
continue
|
||||
|
@ -138,23 +138,23 @@ func (mt *metricsTracer) ReachabilityStatusConfidence(confidence int) {
|
||||
|
||||
func (mt *metricsTracer) ReceivedDialResponse(status pb.Message_ResponseStatus) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
*tags = append(*tags, getResponseStatus(status))
|
||||
receivedDialResponseTotal.WithLabelValues(*tags...).Inc()
|
||||
metricshelper.PutStringSlice(tags)
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) OutgoingDialResponse(status pb.Message_ResponseStatus) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
*tags = append(*tags, getResponseStatus(status))
|
||||
outgoingDialResponseTotal.WithLabelValues(*tags...).Inc()
|
||||
metricshelper.PutStringSlice(tags)
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) OutgoingDialRefused(reason string) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
*tags = append(*tags, reason)
|
||||
outgoingDialRefusedTotal.WithLabelValues(*tags...).Inc()
|
||||
metricshelper.PutStringSlice(tags)
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) NextProbeTime(t time.Time) {
|
||||
|
@ -17,8 +17,7 @@ func (as *AmbientAutoNAT) ListenClose(net network.Network, a ma.Multiaddr) {}
|
||||
|
||||
// Connected is part of the network.Notifiee interface
|
||||
func (as *AmbientAutoNAT) Connected(net network.Network, c network.Conn) {
|
||||
if c.Stat().Direction == network.DirInbound &&
|
||||
manet.IsPublicAddr(c.RemoteMultiaddr()) {
|
||||
if is, err := manet.IsPublicAddr(c.RemoteMultiaddr()); c.Stat().Direction == network.DirInbound && is && err == nil {
|
||||
select {
|
||||
case as.inboundConn <- c:
|
||||
default:
|
||||
|
@ -62,10 +62,8 @@ func (as *autoNATService) handleStream(s network.Stream) {
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
defer s.Scope().ReleaseMemory(maxMsgSize)
|
||||
|
||||
s.SetDeadline(time.Now().Add(streamTimeout))
|
||||
defer s.Close()
|
||||
|
||||
pid := s.Conn().RemotePeer()
|
||||
log.Debugf("New stream from %s", pid)
|
||||
@ -80,6 +78,8 @@ func (as *autoNATService) handleStream(s network.Stream) {
|
||||
if err != nil {
|
||||
log.Debugf("Error reading message from %s: %s", pid, err.Error())
|
||||
s.Reset()
|
||||
s.Scope().ReleaseMemory(maxMsgSize)
|
||||
s.Close()
|
||||
return
|
||||
}
|
||||
|
||||
@ -87,6 +87,8 @@ func (as *autoNATService) handleStream(s network.Stream) {
|
||||
if t != pb.Message_DIAL {
|
||||
log.Debugf("Unexpected message from %s: %s (%d)", pid, t.String(), t)
|
||||
s.Reset()
|
||||
s.Scope().ReleaseMemory(maxMsgSize)
|
||||
s.Close()
|
||||
return
|
||||
}
|
||||
|
||||
@ -98,11 +100,15 @@ func (as *autoNATService) handleStream(s network.Stream) {
|
||||
if err != nil {
|
||||
log.Debugf("Error writing response to %s: %s", pid, err.Error())
|
||||
s.Reset()
|
||||
s.Scope().ReleaseMemory(maxMsgSize)
|
||||
s.Close()
|
||||
return
|
||||
}
|
||||
if as.config.metricsTracer != nil {
|
||||
as.config.metricsTracer.OutgoingDialResponse(res.GetDialResponse().GetStatus())
|
||||
}
|
||||
s.Scope().ReleaseMemory(maxMsgSize)
|
||||
s.Close()
|
||||
}
|
||||
|
||||
func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Message_PeerInfo) *pb.Message_DialResponse {
|
||||
@ -137,7 +143,10 @@ func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Me
|
||||
}
|
||||
|
||||
// Determine the peer's IP address.
|
||||
hostIP, _ := ma.SplitFirst(obsaddr)
|
||||
hostIP, _, err := ma.SplitFirst(obsaddr)
|
||||
if err != nil {
|
||||
return newDialResponseError(pb.Message_E_INTERNAL_ERROR, err.Error())
|
||||
}
|
||||
switch hostIP.Protocol().Code {
|
||||
case ma.P_IP4, ma.P_IP6:
|
||||
default:
|
||||
@ -160,7 +169,7 @@ func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Me
|
||||
// For security reasons, we _only_ dial the observed IP address.
|
||||
// Replace other IP addresses with the observed one so we can still try the
|
||||
// requested ports/transports.
|
||||
if ip, rest := ma.SplitFirst(addr); !ip.Equal(hostIP) {
|
||||
if ip, rest, _ := ma.SplitFirst(addr); !ip.Equal(hostIP) {
|
||||
// Make sure it's an IP address
|
||||
switch ip.Protocol().Code {
|
||||
case ma.P_IP4, ma.P_IP6:
|
||||
@ -221,36 +230,36 @@ func (as *autoNATService) doDial(pi peer.AddrInfo) *pb.Message_DialResponse {
|
||||
as.mx.Unlock()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), as.config.dialTimeout)
|
||||
defer cancel()
|
||||
|
||||
as.config.dialer.Peerstore().ClearAddrs(pi.ID)
|
||||
|
||||
as.config.dialer.Peerstore().AddAddrs(pi.ID, pi.Addrs, peerstore.TempAddrTTL)
|
||||
|
||||
defer func() {
|
||||
as.config.dialer.Peerstore().ClearAddrs(pi.ID)
|
||||
as.config.dialer.Peerstore().RemovePeer(pi.ID)
|
||||
}()
|
||||
|
||||
conn, err := as.config.dialer.DialPeer(ctx, pi.ID)
|
||||
if err != nil {
|
||||
log.Debugf("error dialing %s: %s", pi.ID, err.Error())
|
||||
// wait for the context to timeout to avoid leaking timing information
|
||||
// this renders the service ineffective as a port scanner
|
||||
<-ctx.Done()
|
||||
cancel()
|
||||
as.config.dialer.Peerstore().ClearAddrs(pi.ID)
|
||||
as.config.dialer.Peerstore().RemovePeer(pi.ID)
|
||||
return newDialResponseError(pb.Message_E_DIAL_ERROR, "dial failed")
|
||||
}
|
||||
|
||||
ra := conn.RemoteMultiaddr()
|
||||
as.config.dialer.ClosePeer(pi.ID)
|
||||
cancel()
|
||||
as.config.dialer.Peerstore().ClearAddrs(pi.ID)
|
||||
as.config.dialer.Peerstore().RemovePeer(pi.ID)
|
||||
return newDialResponseOK(ra)
|
||||
}
|
||||
|
||||
// Enable the autoNAT service if it is not running.
|
||||
func (as *autoNATService) Enable() {
|
||||
as.instanceLock.Lock()
|
||||
defer as.instanceLock.Unlock()
|
||||
if as.instance != nil {
|
||||
as.instanceLock.Unlock()
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -259,18 +268,19 @@ func (as *autoNATService) Enable() {
|
||||
as.config.host.SetStreamHandler(AutoNATProto, as.handleStream)
|
||||
|
||||
go as.background(ctx)
|
||||
as.instanceLock.Unlock()
|
||||
}
|
||||
|
||||
// Disable the autoNAT service if it is running.
|
||||
func (as *autoNATService) Disable() {
|
||||
as.instanceLock.Lock()
|
||||
defer as.instanceLock.Unlock()
|
||||
if as.instance != nil {
|
||||
as.config.host.RemoveStreamHandler(AutoNATProto)
|
||||
as.instance()
|
||||
as.instance = nil
|
||||
<-as.backgroundRunning
|
||||
}
|
||||
as.instanceLock.Unlock()
|
||||
}
|
||||
|
||||
func (as *autoNATService) Close() error {
|
||||
@ -279,10 +289,7 @@ func (as *autoNATService) Close() error {
|
||||
}
|
||||
|
||||
func (as *autoNATService) background(ctx context.Context) {
|
||||
defer close(as.backgroundRunning)
|
||||
|
||||
timer := time.NewTimer(as.config.throttleResetPeriod)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
@ -294,6 +301,8 @@ func (as *autoNATService) background(ctx context.Context) {
|
||||
jitter := rand.Float32() * float32(as.config.throttleResetJitter)
|
||||
timer.Reset(as.config.throttleResetPeriod + time.Duration(int64(jitter)))
|
||||
case <-ctx.Done():
|
||||
close(as.backgroundRunning)
|
||||
timer.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -17,13 +17,13 @@ func cleanupAddressSet(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
continue
|
||||
}
|
||||
|
||||
if manet.IsPublicAddr(a) || isDNSAddr(a) {
|
||||
if is, err := manet.IsPublicAddr(a); (is && err == nil) || isDNSAddr(a) {
|
||||
public = append(public, a)
|
||||
continue
|
||||
}
|
||||
|
||||
// discard unroutable addrs
|
||||
if manet.IsPrivateAddr(a) {
|
||||
if is, err := manet.IsPrivateAddr(a); is && err == nil {
|
||||
private = append(private, a)
|
||||
}
|
||||
}
|
||||
@ -38,7 +38,10 @@ func cleanupAddressSet(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
func isRelayAddr(a ma.Multiaddr) bool {
|
||||
isRelay := false
|
||||
|
||||
ma.ForEach(a, func(c ma.Component) bool {
|
||||
ma.ForEach(a, func(c ma.Component, e error) bool {
|
||||
if e != nil {
|
||||
return false
|
||||
}
|
||||
switch c.Protocol().Code {
|
||||
case ma.P_CIRCUIT:
|
||||
isRelay = true
|
||||
@ -52,7 +55,7 @@ func isRelayAddr(a ma.Multiaddr) bool {
|
||||
}
|
||||
|
||||
func isDNSAddr(a ma.Multiaddr) bool {
|
||||
if first, _ := ma.SplitFirst(a); first != nil {
|
||||
if first, _, err := ma.SplitFirst(a); err == nil && first != nil {
|
||||
switch first.Protocol().Code {
|
||||
case ma.P_DNS, ma.P_DNS4, ma.P_DNS6, ma.P_DNSADDR:
|
||||
return true
|
||||
@ -84,7 +87,10 @@ func addrKeyAndPort(a ma.Multiaddr) (string, int) {
|
||||
port int
|
||||
)
|
||||
|
||||
ma.ForEach(a, func(c ma.Component) bool {
|
||||
ma.ForEach(a, func(c ma.Component, e error) bool {
|
||||
if e != nil {
|
||||
return false
|
||||
}
|
||||
switch c.Protocol().Code {
|
||||
case ma.P_TCP, ma.P_UDP:
|
||||
port = int(binary.BigEndian.Uint16(c.RawValue()))
|
||||
|
@ -84,7 +84,8 @@ func TestCleanupAddrs(t *testing.T) {
|
||||
func makeAddrList(strs ...string) []ma.Multiaddr {
|
||||
result := make([]ma.Multiaddr, 0, len(strs))
|
||||
for _, s := range strs {
|
||||
result = append(result, ma.StringCast(s))
|
||||
m, _ := ma.StringCast(s)
|
||||
result = append(result, m)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
@ -59,8 +59,8 @@ func NewAutoRelay(bhost *basic.BasicHost, opts ...Option) (*AutoRelay, error) {
|
||||
func (r *AutoRelay) Start() {
|
||||
r.refCount.Add(1)
|
||||
go func() {
|
||||
defer r.refCount.Done()
|
||||
r.background()
|
||||
r.refCount.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
@ -70,14 +70,15 @@ func (r *AutoRelay) background() {
|
||||
log.Debug("failed to subscribe to the EvtLocalReachabilityChanged")
|
||||
return
|
||||
}
|
||||
defer subReachability.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-r.ctx.Done():
|
||||
subReachability.Close()
|
||||
return
|
||||
case ev, ok := <-subReachability.Out():
|
||||
if !ok {
|
||||
subReachability.Close()
|
||||
return
|
||||
}
|
||||
// TODO: push changed addresses
|
||||
@ -109,12 +110,15 @@ func (r *AutoRelay) hostAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
|
||||
func (r *AutoRelay) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
r.mx.Lock()
|
||||
defer r.mx.Unlock()
|
||||
|
||||
if r.status != network.ReachabilityPrivate {
|
||||
r.mx.Unlock()
|
||||
return addrs
|
||||
}
|
||||
return r.relayFinder.relayAddrs(addrs)
|
||||
|
||||
a := r.relayFinder.relayAddrs(addrs)
|
||||
r.mx.Unlock()
|
||||
return a
|
||||
}
|
||||
|
||||
func (r *AutoRelay) Close() error {
|
||||
|
@ -43,11 +43,11 @@ func numRelays(h host.Host) int {
|
||||
func usedRelays(h host.Host) []peer.ID {
|
||||
m := make(map[peer.ID]struct{})
|
||||
for _, addr := range h.Addrs() {
|
||||
addr, comp := ma.SplitLast(addr)
|
||||
addr, comp, _ := ma.SplitLast(addr)
|
||||
if comp.Protocol().Code != ma.P_CIRCUIT { // not a relay addr
|
||||
continue
|
||||
}
|
||||
_, comp = ma.SplitLast(addr)
|
||||
_, comp, _ = ma.SplitLast(addr)
|
||||
if comp.Protocol().Code != ma.P_P2P {
|
||||
panic("expected p2p component")
|
||||
}
|
||||
@ -96,7 +96,7 @@ func newRelay(t *testing.T) host.Host {
|
||||
saddr := addr.String()
|
||||
if strings.HasPrefix(saddr, "/ip4/127.0.0.1/") {
|
||||
addrNoIP := strings.TrimPrefix(saddr, "/ip4/127.0.0.1")
|
||||
addrs[i] = ma.StringCast("/dns4/localhost" + addrNoIP)
|
||||
addrs[i], _ = ma.StringCast("/dns4/localhost" + addrNoIP)
|
||||
}
|
||||
}
|
||||
return addrs
|
||||
@ -206,7 +206,7 @@ func TestBackoff(t *testing.T) {
|
||||
saddr := addr.String()
|
||||
if strings.HasPrefix(saddr, "/ip4/127.0.0.1/") {
|
||||
addrNoIP := strings.TrimPrefix(saddr, "/ip4/127.0.0.1")
|
||||
addrs[i] = ma.StringCast("/dns4/localhost" + addrNoIP)
|
||||
addrs[i], _ = ma.StringCast("/dns4/localhost" + addrNoIP)
|
||||
}
|
||||
}
|
||||
return addrs
|
||||
|
@ -193,7 +193,6 @@ func (mt *metricsTracer) ReservationOpened(cnt int) {
|
||||
|
||||
func (mt *metricsTracer) ReservationRequestFinished(isRefresh bool, err error) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
if isRefresh {
|
||||
*tags = append(*tags, "refresh")
|
||||
@ -206,6 +205,7 @@ func (mt *metricsTracer) ReservationRequestFinished(isRefresh bool, err error) {
|
||||
if !isRefresh && err == nil {
|
||||
reservationsOpenedTotal.Inc()
|
||||
}
|
||||
metricshelper.PutStringSlice(tags)
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) RelayAddressUpdated() {
|
||||
@ -218,27 +218,30 @@ func (mt *metricsTracer) RelayAddressCount(cnt int) {
|
||||
|
||||
func (mt *metricsTracer) CandidateChecked(supportsCircuitV2 bool) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
if supportsCircuitV2 {
|
||||
*tags = append(*tags, "yes")
|
||||
} else {
|
||||
*tags = append(*tags, "no")
|
||||
}
|
||||
candidatesCircuitV2SupportTotal.WithLabelValues(*tags...).Inc()
|
||||
metricshelper.PutStringSlice(tags)
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) CandidateAdded(cnt int) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
*tags = append(*tags, "added")
|
||||
candidatesTotal.WithLabelValues(*tags...).Add(float64(cnt))
|
||||
metricshelper.PutStringSlice(tags)
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) CandidateRemoved(cnt int) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
*tags = append(*tags, "removed")
|
||||
candidatesTotal.WithLabelValues(*tags...).Add(float64(cnt))
|
||||
metricshelper.PutStringSlice(tags)
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) CandidateLoopState(state candidateLoopState) {
|
||||
@ -247,7 +250,6 @@ func (mt *metricsTracer) CandidateLoopState(state candidateLoopState) {
|
||||
|
||||
func (mt *metricsTracer) ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
*tags = append(*tags, "allowed peer source call")
|
||||
scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextAllowedCallToPeerSource.Unix()))
|
||||
@ -263,6 +265,8 @@ func (mt *metricsTracer) ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes)
|
||||
|
||||
*tags = append(*tags, "old candidate check")
|
||||
scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextOldCandidateCheck.Unix()))
|
||||
|
||||
metricshelper.PutStringSlice(tags)
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) DesiredReservations(cnt int) {
|
||||
|
@ -72,11 +72,11 @@ func WithStaticRelays(static []peer.AddrInfo) Option {
|
||||
numPeers = len(static)
|
||||
}
|
||||
c := make(chan peer.AddrInfo, numPeers)
|
||||
defer close(c)
|
||||
|
||||
for i := 0; i < numPeers; i++ {
|
||||
c <- static[i]
|
||||
}
|
||||
close(c)
|
||||
return c
|
||||
})(c)
|
||||
WithMinCandidates(len(static))(c)
|
||||
|
@ -119,14 +119,14 @@ func (rf *relayFinder) background(ctx context.Context) {
|
||||
peerSourceRateLimiter := make(chan struct{}, 1)
|
||||
rf.refCount.Add(1)
|
||||
go func() {
|
||||
defer rf.refCount.Done()
|
||||
rf.findNodes(ctx, peerSourceRateLimiter)
|
||||
rf.refCount.Done()
|
||||
}()
|
||||
|
||||
rf.refCount.Add(1)
|
||||
go func() {
|
||||
defer rf.refCount.Done()
|
||||
rf.handleNewCandidates(ctx)
|
||||
rf.refCount.Done()
|
||||
}()
|
||||
|
||||
subConnectedness, err := rf.host.EventBus().Subscribe(new(event.EvtPeerConnectednessChanged), eventbus.Name("autorelay (relay finder)"))
|
||||
@ -134,11 +134,9 @@ func (rf *relayFinder) background(ctx context.Context) {
|
||||
log.Error("failed to subscribe to the EvtPeerConnectednessChanged")
|
||||
return
|
||||
}
|
||||
defer subConnectedness.Close()
|
||||
|
||||
now := rf.conf.clock.Now()
|
||||
bootDelayTimer := rf.conf.clock.InstantTimer(now.Add(rf.conf.bootDelay))
|
||||
defer bootDelayTimer.Stop()
|
||||
|
||||
// This is the least frequent event. It's our fallback timer if we don't have any other work to do.
|
||||
leastFrequentInterval := rf.conf.minInterval
|
||||
@ -162,12 +160,14 @@ func (rf *relayFinder) background(ctx context.Context) {
|
||||
}
|
||||
|
||||
workTimer := rf.conf.clock.InstantTimer(rf.runScheduledWork(ctx, now, scheduledWork, peerSourceRateLimiter))
|
||||
defer workTimer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case ev, ok := <-subConnectedness.Out():
|
||||
if !ok {
|
||||
subConnectedness.Close()
|
||||
bootDelayTimer.Stop()
|
||||
workTimer.Stop()
|
||||
return
|
||||
}
|
||||
evt := ev.(event.EvtPeerConnectednessChanged)
|
||||
@ -206,6 +206,9 @@ func (rf *relayFinder) background(ctx context.Context) {
|
||||
// Ignore the next time because we aren't scheduling any future work here
|
||||
_ = rf.runScheduledWork(ctx, rf.conf.clock.Now(), scheduledWork, peerSourceRateLimiter)
|
||||
case <-ctx.Done():
|
||||
subConnectedness.Close()
|
||||
bootDelayTimer.Stop()
|
||||
workTimer.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -282,7 +285,6 @@ func (rf *relayFinder) clearOldCandidates(now time.Time) time.Time {
|
||||
|
||||
var deleted bool
|
||||
rf.candidateMx.Lock()
|
||||
defer rf.candidateMx.Unlock()
|
||||
for id, cand := range rf.candidates {
|
||||
expiry := cand.added.Add(rf.conf.maxCandidateAge)
|
||||
if expiry.After(now) {
|
||||
@ -298,7 +300,7 @@ func (rf *relayFinder) clearOldCandidates(now time.Time) time.Time {
|
||||
if deleted {
|
||||
rf.notifyMaybeNeedNewCandidates()
|
||||
}
|
||||
|
||||
rf.candidateMx.Unlock()
|
||||
return nextTime
|
||||
}
|
||||
|
||||
@ -308,7 +310,6 @@ func (rf *relayFinder) clearBackoff(now time.Time) time.Time {
|
||||
nextTime := now.Add(rf.conf.backoff)
|
||||
|
||||
rf.candidateMx.Lock()
|
||||
defer rf.candidateMx.Unlock()
|
||||
for id, t := range rf.backoff {
|
||||
expiry := t.Add(rf.conf.backoff)
|
||||
if expiry.After(now) {
|
||||
@ -320,6 +321,7 @@ func (rf *relayFinder) clearBackoff(now time.Time) time.Time {
|
||||
delete(rf.backoff, id)
|
||||
}
|
||||
}
|
||||
rf.candidateMx.Unlock()
|
||||
|
||||
return nextTime
|
||||
}
|
||||
@ -383,11 +385,11 @@ func (rf *relayFinder) findNodes(ctx context.Context, peerSourceRateLimiter <-ch
|
||||
rf.refCount.Add(1)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer rf.refCount.Done()
|
||||
defer wg.Done()
|
||||
if added := rf.handleNewNode(ctx, pi); added {
|
||||
rf.notifyNewCandidate()
|
||||
}
|
||||
rf.refCount.Done()
|
||||
wg.Done()
|
||||
}()
|
||||
case <-ctx.Done():
|
||||
rf.metricsTracer.CandidateLoopState(stopped)
|
||||
@ -430,13 +432,13 @@ func (rf *relayFinder) handleNewNode(ctx context.Context, pi peer.AddrInfo) (add
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
supportsV2, err := rf.tryNode(ctx, pi)
|
||||
if err != nil {
|
||||
log.Debugf("node %s not accepted as a candidate: %s", pi.ID, err)
|
||||
if err == errProtocolNotSupported {
|
||||
rf.metricsTracer.CandidateChecked(false)
|
||||
}
|
||||
cancel()
|
||||
return false
|
||||
}
|
||||
rf.metricsTracer.CandidateChecked(true)
|
||||
@ -444,6 +446,7 @@ func (rf *relayFinder) handleNewNode(ctx context.Context, pi peer.AddrInfo) (add
|
||||
rf.candidateMx.Lock()
|
||||
if len(rf.candidates) > rf.conf.maxCandidates {
|
||||
rf.candidateMx.Unlock()
|
||||
cancel()
|
||||
return false
|
||||
}
|
||||
log.Debugw("node supports relay protocol", "peer", pi.ID, "supports circuit v2", supportsV2)
|
||||
@ -453,6 +456,7 @@ func (rf *relayFinder) handleNewNode(ctx context.Context, pi peer.AddrInfo) (add
|
||||
supportsRelayV2: supportsV2,
|
||||
})
|
||||
rf.candidateMx.Unlock()
|
||||
cancel()
|
||||
return true
|
||||
}
|
||||
|
||||
@ -588,7 +592,6 @@ func (rf *relayFinder) connectToRelay(ctx context.Context, cand *candidate) (*ci
|
||||
id := cand.ai.ID
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var rsvp *circuitv2.Reservation
|
||||
|
||||
@ -598,6 +601,7 @@ func (rf *relayFinder) connectToRelay(ctx context.Context, cand *candidate) (*ci
|
||||
rf.candidateMx.Lock()
|
||||
rf.removeCandidate(cand.ai.ID)
|
||||
rf.candidateMx.Unlock()
|
||||
cancel()
|
||||
return nil, fmt.Errorf("failed to connect: %w", err)
|
||||
}
|
||||
}
|
||||
@ -615,6 +619,7 @@ func (rf *relayFinder) connectToRelay(ctx context.Context, cand *candidate) (*ci
|
||||
rf.candidateMx.Lock()
|
||||
rf.removeCandidate(id)
|
||||
rf.candidateMx.Unlock()
|
||||
cancel()
|
||||
return rsvp, err
|
||||
}
|
||||
|
||||
@ -716,9 +721,9 @@ func (rf *relayFinder) selectCandidates() []*candidate {
|
||||
// through which we can be dialed.
|
||||
func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
rf.relayMx.Lock()
|
||||
defer rf.relayMx.Unlock()
|
||||
|
||||
if rf.cachedAddrs != nil && rf.conf.clock.Now().Before(rf.cachedAddrsExpiry) {
|
||||
rf.relayMx.Unlock()
|
||||
return rf.cachedAddrs
|
||||
}
|
||||
|
||||
@ -726,7 +731,7 @@ func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
|
||||
// only keep private addrs from the original addr set
|
||||
for _, addr := range addrs {
|
||||
if manet.IsPrivateAddr(addr) {
|
||||
if is, err := manet.IsPrivateAddr(addr); is && err == nil {
|
||||
raddrs = append(raddrs, addr)
|
||||
}
|
||||
}
|
||||
@ -735,8 +740,11 @@ func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
relayAddrCnt := 0
|
||||
for p := range rf.relays {
|
||||
addrs := cleanupAddressSet(rf.host.Peerstore().Addrs(p))
|
||||
circuit, err := ma.StringCast(fmt.Sprintf("/p2p/%s/p2p-circuit", p))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
relayAddrCnt += len(addrs)
|
||||
circuit := ma.StringCast(fmt.Sprintf("/p2p/%s/p2p-circuit", p))
|
||||
for _, addr := range addrs {
|
||||
pub := addr.Encapsulate(circuit)
|
||||
raddrs = append(raddrs, pub)
|
||||
@ -747,13 +755,14 @@ func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
rf.cachedAddrsExpiry = rf.conf.clock.Now().Add(30 * time.Second)
|
||||
|
||||
rf.metricsTracer.RelayAddressCount(relayAddrCnt)
|
||||
rf.relayMx.Unlock()
|
||||
return raddrs
|
||||
}
|
||||
|
||||
func (rf *relayFinder) Start() error {
|
||||
rf.ctxCancelMx.Lock()
|
||||
defer rf.ctxCancelMx.Unlock()
|
||||
if rf.ctxCancel != nil {
|
||||
rf.ctxCancelMx.Unlock()
|
||||
return errAlreadyRunning
|
||||
}
|
||||
log.Debug("starting relay finder")
|
||||
@ -764,15 +773,15 @@ func (rf *relayFinder) Start() error {
|
||||
rf.ctxCancel = cancel
|
||||
rf.refCount.Add(1)
|
||||
go func() {
|
||||
defer rf.refCount.Done()
|
||||
rf.background(ctx)
|
||||
rf.refCount.Done()
|
||||
}()
|
||||
rf.ctxCancelMx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rf *relayFinder) Stop() error {
|
||||
rf.ctxCancelMx.Lock()
|
||||
defer rf.ctxCancelMx.Unlock()
|
||||
log.Debug("stopping relay finder")
|
||||
if rf.ctxCancel != nil {
|
||||
rf.ctxCancel()
|
||||
@ -781,6 +790,7 @@ func (rf *relayFinder) Stop() error {
|
||||
rf.ctxCancel = nil
|
||||
|
||||
rf.resetMetrics()
|
||||
rf.ctxCancelMx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -327,7 +327,6 @@ func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) {
|
||||
|
||||
func (h *BasicHost) updateLocalIpAddr() {
|
||||
h.addrMu.Lock()
|
||||
defer h.addrMu.Unlock()
|
||||
|
||||
h.filteredInterfaceAddrs = nil
|
||||
h.allInterfaceAddrs = nil
|
||||
@ -367,6 +366,7 @@ func (h *BasicHost) updateLocalIpAddr() {
|
||||
// Then bail. There's nothing else we can do here.
|
||||
h.filteredInterfaceAddrs = append(h.filteredInterfaceAddrs, manet.IP4Loopback, manet.IP6Loopback)
|
||||
h.allInterfaceAddrs = h.filteredInterfaceAddrs
|
||||
h.addrMu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
@ -391,6 +391,7 @@ func (h *BasicHost) updateLocalIpAddr() {
|
||||
}
|
||||
}
|
||||
}
|
||||
h.addrMu.Unlock()
|
||||
}
|
||||
|
||||
// Start starts background tasks in the host
|
||||
@ -508,7 +509,6 @@ func (h *BasicHost) makeSignedPeerRecord(addrs []ma.Multiaddr) (*record.Envelope
|
||||
}
|
||||
|
||||
func (h *BasicHost) background() {
|
||||
defer h.refCount.Done()
|
||||
var lastAddrs []ma.Multiaddr
|
||||
|
||||
emitAddrChange := func(currentAddrs []ma.Multiaddr, lastAddrs []ma.Multiaddr) {
|
||||
@ -548,7 +548,6 @@ func (h *BasicHost) background() {
|
||||
// periodically schedules an IdentifyPush to update our peers for changes
|
||||
// in our address set (if needed)
|
||||
ticker := time.NewTicker(addrChangeTickrInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
if len(h.network.ListenAddresses()) > 0 {
|
||||
@ -564,6 +563,8 @@ func (h *BasicHost) background() {
|
||||
case <-ticker.C:
|
||||
case <-h.addrChangeChan:
|
||||
case <-h.ctx.Done():
|
||||
h.refCount.Done()
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -831,7 +832,7 @@ func (h *BasicHost) NormalizeMultiaddr(addr ma.Multiaddr) ma.Multiaddr {
|
||||
if ok && n > 0 {
|
||||
out := addr
|
||||
for i := 0; i < n; i++ {
|
||||
out, _ = ma.SplitLast(out)
|
||||
out, _, _ = ma.SplitLast(out)
|
||||
}
|
||||
return out
|
||||
}
|
||||
@ -885,7 +886,7 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr {
|
||||
}
|
||||
|
||||
// Did the router give us a routable public addr?
|
||||
if manet.IsPublicAddr(extMaddr) {
|
||||
if is, err := manet.IsPublicAddr(extMaddr); is && err == nil {
|
||||
// well done
|
||||
continue
|
||||
}
|
||||
@ -911,12 +912,12 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr {
|
||||
}
|
||||
|
||||
// Drop the IP from the external maddr
|
||||
_, extMaddrNoIP := ma.SplitFirst(extMaddr)
|
||||
_, extMaddrNoIP, _ := ma.SplitFirst(extMaddr)
|
||||
|
||||
for _, obsMaddr := range observed {
|
||||
// Extract a public observed addr.
|
||||
ip, _ := ma.SplitFirst(obsMaddr)
|
||||
if ip == nil || !manet.IsPublicAddr(ip) {
|
||||
ip, _, _ := ma.SplitFirst(obsMaddr)
|
||||
if is, err := manet.IsPublicAddr(ip); err != nil || !is || ip == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -937,7 +938,7 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr {
|
||||
return finalAddrs
|
||||
}
|
||||
|
||||
var wtComponent = ma.StringCast("/webtransport")
|
||||
var wtComponent, _ = ma.StringCast("/webtransport")
|
||||
|
||||
// inferWebtransportAddrsFromQuic infers more webtransport addresses from QUIC addresses.
|
||||
// This is useful when we discover our public QUIC address, but haven't discovered our public WebTransport addrs.
|
||||
@ -952,7 +953,7 @@ func inferWebtransportAddrsFromQuic(in []ma.Multiaddr) []ma.Multiaddr {
|
||||
// Count the number of QUIC addrs, this will let us allocate just once at the beginning.
|
||||
quicAddrCount := 0
|
||||
for _, addr := range in {
|
||||
if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 {
|
||||
if _, lastComponent, err := ma.SplitLast(addr); err == nil && lastComponent.Protocol().Code == ma.P_QUIC_V1 {
|
||||
quicAddrCount++
|
||||
}
|
||||
}
|
||||
@ -964,14 +965,14 @@ func inferWebtransportAddrsFromQuic(in []ma.Multiaddr) []ma.Multiaddr {
|
||||
if isWebtransport {
|
||||
for i := 0; i < numCertHashes; i++ {
|
||||
// Remove certhashes
|
||||
addr, _ = ma.SplitLast(addr)
|
||||
addr, _, _ = ma.SplitLast(addr)
|
||||
}
|
||||
webtransportAddrs[string(addr.Bytes())] = struct{}{}
|
||||
// Remove webtransport component, now it's a multiaddr that ends in /quic-v1
|
||||
addr, _ = ma.SplitLast(addr)
|
||||
addr, _, _ = ma.SplitLast(addr)
|
||||
}
|
||||
|
||||
if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 {
|
||||
if _, lastComponent, err := ma.SplitLast(addr); err == nil && lastComponent.Protocol().Code == ma.P_QUIC_V1 {
|
||||
bytes := addr.Bytes()
|
||||
if _, ok := quicOrWebtransportAddrs[string(bytes)]; ok {
|
||||
foundSameListeningAddr = true
|
||||
@ -995,7 +996,7 @@ func inferWebtransportAddrsFromQuic(in []ma.Multiaddr) []ma.Multiaddr {
|
||||
for _, addr := range in {
|
||||
// Add all the original addresses
|
||||
out = append(out, addr)
|
||||
if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 {
|
||||
if _, lastComponent, err := ma.SplitLast(addr); err == nil && lastComponent.Protocol().Code == ma.P_QUIC_V1 {
|
||||
// Convert quic to webtransport
|
||||
addr = addr.Encapsulate(wtComponent)
|
||||
if _, ok := webtransportAddrs[string(addr.Bytes())]; ok {
|
||||
@ -1021,13 +1022,16 @@ func trimHostAddrList(addrs []ma.Multiaddr, maxSize int) []ma.Multiaddr {
|
||||
|
||||
score := func(addr ma.Multiaddr) int {
|
||||
var res int
|
||||
if manet.IsPublicAddr(addr) {
|
||||
if is, err := manet.IsPublicAddr(addr); is && err == nil {
|
||||
res |= 1 << 12
|
||||
} else if !manet.IsIPLoopback(addr) {
|
||||
res |= 1 << 11
|
||||
}
|
||||
var protocolWeight int
|
||||
ma.ForEach(addr, func(c ma.Component) bool {
|
||||
ma.ForEach(addr, func(c ma.Component, e error) bool {
|
||||
if e != nil {
|
||||
return false
|
||||
}
|
||||
switch c.Protocol().Code {
|
||||
case ma.P_QUIC_V1:
|
||||
protocolWeight = 5
|
||||
@ -1065,17 +1069,19 @@ func trimHostAddrList(addrs []ma.Multiaddr, maxSize int) []ma.Multiaddr {
|
||||
// SetAutoNat sets the autonat service for the host.
|
||||
func (h *BasicHost) SetAutoNat(a autonat.AutoNAT) {
|
||||
h.addrMu.Lock()
|
||||
defer h.addrMu.Unlock()
|
||||
|
||||
if h.autoNat == nil {
|
||||
h.autoNat = a
|
||||
}
|
||||
h.addrMu.Unlock()
|
||||
}
|
||||
|
||||
// GetAutoNat returns the host's AutoNAT service, if AutoNAT is enabled.
|
||||
func (h *BasicHost) GetAutoNat() autonat.AutoNAT {
|
||||
h.addrMu.Lock()
|
||||
defer h.addrMu.Unlock()
|
||||
return h.autoNat
|
||||
n := h.autoNat
|
||||
h.addrMu.Unlock()
|
||||
return n
|
||||
}
|
||||
|
||||
// Close shuts down the Host's services (network, etc).
|
||||
|
@ -95,7 +95,8 @@ func TestSignedPeerRecordWithNoListenAddrs(t *testing.T) {
|
||||
|
||||
require.Empty(t, h.Addrs(), "expected no listen addrs")
|
||||
// now add a listen addr
|
||||
require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/0.0.0.0/tcp/0")))
|
||||
m, _ := ma.StringCast("/ip4/0.0.0.0/tcp/0")
|
||||
require.NoError(t, h.Network().Listen(m))
|
||||
require.NotEmpty(t, h.Addrs(), "expected at least 1 listen addr")
|
||||
|
||||
cab, ok := peerstore.GetCertifiedAddrBook(h.Peerstore())
|
||||
@ -168,7 +169,7 @@ func TestProtocolHandlerEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHostAddrsFactory(t *testing.T) {
|
||||
maddr := ma.StringCast("/ip4/1.2.3.4/tcp/1234")
|
||||
maddr, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1234")
|
||||
addrsFactory := func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
return []ma.Multiaddr{maddr}
|
||||
}
|
||||
@ -209,7 +210,8 @@ func TestLocalIPChangesWhenListenAddrChanges(t *testing.T) {
|
||||
h.addrMu.Unlock()
|
||||
|
||||
// change listen addrs and verify local IP addr is not nil again
|
||||
require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/0.0.0.0/tcp/0")))
|
||||
m, _ := ma.StringCast("/ip4/0.0.0.0/tcp/0")
|
||||
require.NoError(t, h.Network().Listen(m))
|
||||
h.SignalAddressChange()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
@ -227,14 +229,15 @@ func TestAllAddrs(t *testing.T) {
|
||||
require.Nil(t, h.AllAddrs())
|
||||
|
||||
// listen on loopback
|
||||
laddr := ma.StringCast("/ip4/127.0.0.1/tcp/0")
|
||||
laddr, _ := ma.StringCast("/ip4/127.0.0.1/tcp/0")
|
||||
require.NoError(t, h.Network().Listen(laddr))
|
||||
require.Len(t, h.AllAddrs(), 1)
|
||||
firstAddr := h.AllAddrs()[0]
|
||||
require.Equal(t, "/ip4/127.0.0.1", ma.Split(firstAddr)[0].String())
|
||||
|
||||
// listen on IPv4 0.0.0.0
|
||||
require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/0.0.0.0/tcp/0")))
|
||||
m, _ := ma.StringCast("/ip4/0.0.0.0/tcp/0")
|
||||
require.NoError(t, h.Network().Listen(m))
|
||||
// should contain localhost and private local addr along with previous listen address
|
||||
require.Len(t, h.AllAddrs(), 3)
|
||||
// Should still contain the original addr.
|
||||
@ -551,7 +554,8 @@ func TestProtoDowngrade(t *testing.T) {
|
||||
|
||||
func TestAddrChangeImmediatelyIfAddressNonEmpty(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
taddrs := []ma.Multiaddr{ma.StringCast("/ip4/1.2.3.4/tcp/1234")}
|
||||
m, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1234")
|
||||
taddrs := []ma.Multiaddr{m}
|
||||
|
||||
starting := make(chan struct{})
|
||||
h, err := NewHost(swarmt.GenSwarm(t), &HostOpts{AddrsFactory: func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
@ -569,10 +573,11 @@ func TestAddrChangeImmediatelyIfAddressNonEmpty(t *testing.T) {
|
||||
defer sub.Close()
|
||||
h.Start()
|
||||
|
||||
n, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1234")
|
||||
expected := event.EvtLocalAddressesUpdated{
|
||||
Diffs: true,
|
||||
Current: []event.UpdatedAddress{
|
||||
{Action: event.Added, Address: ma.StringCast("/ip4/1.2.3.4/tcp/1234")},
|
||||
{Action: event.Added, Address: n},
|
||||
},
|
||||
Removed: []event.UpdatedAddress{}}
|
||||
|
||||
@ -617,11 +622,14 @@ func TestHostAddrChangeDetection(t *testing.T) {
|
||||
// This test uses the address factory to provide several
|
||||
// sets of listen addresses for the host. It advances through
|
||||
// the sets by changing the currentAddrSet index var below.
|
||||
m1, _ := ma.StringCast("/ip4/1.2.3.4/tcp/1234")
|
||||
m2, _ := ma.StringCast("/ip4/2.3.4.5/tcp/1234")
|
||||
m3, _ := ma.StringCast("/ip4/3.4.5.6/tcp/4321")
|
||||
addrSets := [][]ma.Multiaddr{
|
||||
{},
|
||||
{ma.StringCast("/ip4/1.2.3.4/tcp/1234")},
|
||||
{ma.StringCast("/ip4/1.2.3.4/tcp/1234"), ma.StringCast("/ip4/2.3.4.5/tcp/1234")},
|
||||
{ma.StringCast("/ip4/2.3.4.5/tcp/1234"), ma.StringCast("/ip4/3.4.5.6/tcp/4321")},
|
||||
{m1},
|
||||
{m1, m2},
|
||||
{m2, m3},
|
||||
}
|
||||
|
||||
// The events we expect the host to emit when SignalAddressChange is called
|
||||
@ -630,26 +638,26 @@ func TestHostAddrChangeDetection(t *testing.T) {
|
||||
{
|
||||
Diffs: true,
|
||||
Current: []event.UpdatedAddress{
|
||||
{Action: event.Added, Address: ma.StringCast("/ip4/1.2.3.4/tcp/1234")},
|
||||
{Action: event.Added, Address: m1},
|
||||
},
|
||||
Removed: []event.UpdatedAddress{},
|
||||
},
|
||||
{
|
||||
Diffs: true,
|
||||
Current: []event.UpdatedAddress{
|
||||
{Action: event.Maintained, Address: ma.StringCast("/ip4/1.2.3.4/tcp/1234")},
|
||||
{Action: event.Added, Address: ma.StringCast("/ip4/2.3.4.5/tcp/1234")},
|
||||
{Action: event.Maintained, Address: m1},
|
||||
{Action: event.Added, Address: m2},
|
||||
},
|
||||
Removed: []event.UpdatedAddress{},
|
||||
},
|
||||
{
|
||||
Diffs: true,
|
||||
Current: []event.UpdatedAddress{
|
||||
{Action: event.Added, Address: ma.StringCast("/ip4/3.4.5.6/tcp/4321")},
|
||||
{Action: event.Maintained, Address: ma.StringCast("/ip4/2.3.4.5/tcp/1234")},
|
||||
{Action: event.Added, Address: m3},
|
||||
{Action: event.Maintained, Address: m2},
|
||||
},
|
||||
Removed: []event.UpdatedAddress{
|
||||
{Action: event.Removed, Address: ma.StringCast("/ip4/1.2.3.4/tcp/1234")},
|
||||
{Action: event.Removed, Address: m1},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -821,8 +829,8 @@ func TestNormalizeMultiaddr(t *testing.T) {
|
||||
h1, err := NewHost(swarmt.GenSwarm(t), nil)
|
||||
require.NoError(t, err)
|
||||
defer h1.Close()
|
||||
|
||||
require.Equal(t, "/ip4/1.2.3.4/udp/9999/quic-v1/webtransport", h1.NormalizeMultiaddr(ma.StringCast("/ip4/1.2.3.4/udp/9999/quic-v1/webtransport/certhash/uEgNmb28")).String())
|
||||
m, _ := ma.StringCast("/ip4/1.2.3.4/udp/9999/quic-v1/webtransport/certhash/uEgNmb28")
|
||||
require.Equal(t, "/ip4/1.2.3.4/udp/9999/quic-v1/webtransport", h1.NormalizeMultiaddr(m).String())
|
||||
}
|
||||
|
||||
func TestInferWebtransportAddrsFromQuic(t *testing.T) {
|
||||
@ -883,7 +891,8 @@ func TestInferWebtransportAddrsFromQuic(t *testing.T) {
|
||||
sort.StringSlice(tc.out).Sort()
|
||||
min := make([]ma.Multiaddr, 0, len(tc.in))
|
||||
for _, addr := range tc.in {
|
||||
min = append(min, ma.StringCast(addr))
|
||||
m1, _ := ma.StringCast(addr)
|
||||
min = append(min, m1)
|
||||
}
|
||||
outMa := inferWebtransportAddrsFromQuic(min)
|
||||
outStr := make([]string, 0, len(outMa))
|
||||
@ -905,14 +914,14 @@ func TestTrimHostAddrList(t *testing.T) {
|
||||
out []ma.Multiaddr
|
||||
}
|
||||
|
||||
tcpPublic := ma.StringCast("/ip4/1.1.1.1/tcp/1")
|
||||
quicPublic := ma.StringCast("/ip4/1.1.1.1/udp/1/quic-v1")
|
||||
tcpPublic, _ := ma.StringCast("/ip4/1.1.1.1/tcp/1")
|
||||
quicPublic, _ := ma.StringCast("/ip4/1.1.1.1/udp/1/quic-v1")
|
||||
|
||||
tcpPrivate := ma.StringCast("/ip4/192.168.1.1/tcp/1")
|
||||
quicPrivate := ma.StringCast("/ip4/192.168.1.1/udp/1/quic-v1")
|
||||
tcpPrivate, _ := ma.StringCast("/ip4/192.168.1.1/tcp/1")
|
||||
quicPrivate, _ := ma.StringCast("/ip4/192.168.1.1/udp/1/quic-v1")
|
||||
|
||||
tcpLocal := ma.StringCast("/ip4/127.0.0.1/tcp/1")
|
||||
quicLocal := ma.StringCast("/ip4/127.0.0.1/udp/1/quic-v1")
|
||||
tcpLocal, _ := ma.StringCast("/ip4/127.0.0.1/tcp/1")
|
||||
quicLocal, _ := ma.StringCast("/ip4/127.0.0.1/udp/1/quic-v1")
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
|
@ -89,27 +89,24 @@ func (nmgr *natManager) Close() error {
|
||||
|
||||
func (nmgr *natManager) HasDiscoveredNAT() bool {
|
||||
nmgr.natMx.RLock()
|
||||
defer nmgr.natMx.RUnlock()
|
||||
return nmgr.nat != nil
|
||||
h := nmgr.nat != nil
|
||||
nmgr.natMx.RUnlock()
|
||||
return h
|
||||
}
|
||||
|
||||
func (nmgr *natManager) background(ctx context.Context) {
|
||||
defer nmgr.refCount.Done()
|
||||
|
||||
defer func() {
|
||||
nmgr.natMx.Lock()
|
||||
defer nmgr.natMx.Unlock()
|
||||
|
||||
if nmgr.nat != nil {
|
||||
nmgr.nat.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
discoverCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
natInstance, err := discoverNAT(discoverCtx)
|
||||
if err != nil {
|
||||
log.Info("DiscoverNAT error:", err)
|
||||
nmgr.refCount.Done()
|
||||
nmgr.natMx.Lock()
|
||||
if nmgr.nat != nil {
|
||||
nmgr.nat.Close()
|
||||
}
|
||||
nmgr.natMx.Unlock()
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
@ -121,7 +118,6 @@ func (nmgr *natManager) background(ctx context.Context) {
|
||||
// we need to sign up here to avoid missing some notifs
|
||||
// before the NAT has been found.
|
||||
nmgr.net.Notify((*nmgrNetNotifiee)(nmgr))
|
||||
defer nmgr.net.StopNotify((*nmgrNetNotifiee)(nmgr))
|
||||
|
||||
nmgr.doSync() // sync one first.
|
||||
for {
|
||||
@ -129,6 +125,14 @@ func (nmgr *natManager) background(ctx context.Context) {
|
||||
case <-nmgr.syncFlag:
|
||||
nmgr.doSync() // sync when our listen addresses change.
|
||||
case <-ctx.Done():
|
||||
nmgr.refCount.Done()
|
||||
nmgr.natMx.Lock()
|
||||
if nmgr.nat != nil {
|
||||
nmgr.nat.Close()
|
||||
}
|
||||
nmgr.natMx.Unlock()
|
||||
cancel()
|
||||
nmgr.net.StopNotify((*nmgrNetNotifiee)(nmgr))
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -150,8 +154,8 @@ func (nmgr *natManager) doSync() {
|
||||
var newAddresses []entry
|
||||
for _, maddr := range nmgr.net.ListenAddresses() {
|
||||
// Strip the IP
|
||||
maIP, rest := ma.SplitFirst(maddr)
|
||||
if maIP == nil || rest == nil {
|
||||
maIP, rest, err := ma.SplitFirst(maddr)
|
||||
if maIP == nil || rest == nil || err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -168,8 +172,8 @@ func (nmgr *natManager) doSync() {
|
||||
}
|
||||
|
||||
// Extract the port/protocol
|
||||
proto, _ := ma.SplitFirst(rest)
|
||||
if proto == nil {
|
||||
proto, _, err := ma.SplitFirst(rest)
|
||||
if proto == nil || err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -195,9 +199,6 @@ func (nmgr *natManager) doSync() {
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
|
||||
// Close old mappings
|
||||
for e, v := range nmgr.tracked {
|
||||
if !v {
|
||||
@ -217,15 +218,15 @@ func (nmgr *natManager) doSync() {
|
||||
|
||||
func (nmgr *natManager) GetMapping(addr ma.Multiaddr) ma.Multiaddr {
|
||||
nmgr.natMx.Lock()
|
||||
defer nmgr.natMx.Unlock()
|
||||
|
||||
if nmgr.nat == nil { // NAT not yet initialized
|
||||
nmgr.natMx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
var found bool
|
||||
var proto int // ma.P_TCP or ma.P_UDP
|
||||
transport, rest := ma.SplitFunc(addr, func(c ma.Component) bool {
|
||||
transport, rest, err := ma.SplitFunc(addr, func(c ma.Component) bool {
|
||||
if found {
|
||||
return true
|
||||
}
|
||||
@ -233,13 +234,20 @@ func (nmgr *natManager) GetMapping(addr ma.Multiaddr) ma.Multiaddr {
|
||||
found = proto == ma.P_TCP || proto == ma.P_UDP
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
nmgr.natMx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
if !manet.IsThinWaist(transport) {
|
||||
nmgr.natMx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
naddr, err := manet.ToNetAddr(transport)
|
||||
if err != nil {
|
||||
log.Error("error parsing net multiaddr %q: %s", transport, err)
|
||||
nmgr.natMx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -258,16 +266,19 @@ func (nmgr *natManager) GetMapping(addr ma.Multiaddr) ma.Multiaddr {
|
||||
port = naddr.Port
|
||||
protocol = "udp"
|
||||
default:
|
||||
nmgr.natMx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
if !ip.IsGlobalUnicast() && !ip.IsUnspecified() {
|
||||
// We only map global unicast & unspecified addresses ports, not broadcast, multicast, etc.
|
||||
nmgr.natMx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
extAddr, ok := nmgr.nat.GetMapping(protocol, port)
|
||||
if !ok {
|
||||
nmgr.natMx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -281,12 +292,14 @@ func (nmgr *natManager) GetMapping(addr ma.Multiaddr) ma.Multiaddr {
|
||||
mappedMaddr, err := manet.FromNetAddr(mappedAddr)
|
||||
if err != nil {
|
||||
log.Errorf("mapped addr can't be turned into a multiaddr %q: %s", mappedAddr, err)
|
||||
nmgr.natMx.Unlock()
|
||||
return nil
|
||||
}
|
||||
extMaddr := mappedMaddr
|
||||
if rest != nil {
|
||||
extMaddr = ma.Join(extMaddr, rest)
|
||||
}
|
||||
nmgr.natMx.Unlock()
|
||||
return extMaddr
|
||||
}
|
||||
|
||||
|
@ -42,23 +42,32 @@ func TestMapping(t *testing.T) {
|
||||
externalAddr := netip.AddrPortFrom(netip.AddrFrom4([4]byte{1, 2, 3, 4}), 4321)
|
||||
// pretend that we have a TCP mapping
|
||||
mockNAT.EXPECT().GetMapping("tcp", 1234).Return(externalAddr, true)
|
||||
require.Equal(t, ma.StringCast("/ip4/1.2.3.4/tcp/4321"), m.GetMapping(ma.StringCast("/ip4/0.0.0.0/tcp/1234")))
|
||||
m1, _ := ma.StringCast("/ip4/1.2.3.4/tcp/4321")
|
||||
m2, _ := ma.StringCast("/ip4/0.0.0.0/tcp/1234")
|
||||
require.Equal(t, m1, m.GetMapping(m2))
|
||||
|
||||
// pretend that we have a QUIC mapping
|
||||
mockNAT.EXPECT().GetMapping("udp", 1234).Return(externalAddr, true)
|
||||
require.Equal(t, ma.StringCast("/ip4/1.2.3.4/udp/4321/quic-v1"), m.GetMapping(ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1")))
|
||||
m3, _ := ma.StringCast("/ip4/1.2.3.4/udp/4321/quic-v1")
|
||||
m4, _ := ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1")
|
||||
require.Equal(t, m3, m.GetMapping(m4))
|
||||
|
||||
// pretend that there's no mapping
|
||||
mockNAT.EXPECT().GetMapping("tcp", 1234).Return(netip.AddrPort{}, false)
|
||||
require.Nil(t, m.GetMapping(ma.StringCast("/ip4/0.0.0.0/tcp/1234")))
|
||||
m5, _ := ma.StringCast("/ip4/0.0.0.0/tcp/1234")
|
||||
require.Nil(t, m.GetMapping(m5))
|
||||
|
||||
// make sure this works for WebSocket addresses as well
|
||||
mockNAT.EXPECT().GetMapping("tcp", 1234).Return(externalAddr, true)
|
||||
require.Equal(t, ma.StringCast("/ip4/1.2.3.4/tcp/4321/ws"), m.GetMapping(ma.StringCast("/ip4/0.0.0.0/tcp/1234/ws")))
|
||||
m6, _ := ma.StringCast("/ip4/1.2.3.4/tcp/4321/ws")
|
||||
m7, _ := ma.StringCast("/ip4/0.0.0.0/tcp/1234/ws")
|
||||
require.Equal(t, m6, m.GetMapping(m7))
|
||||
|
||||
// make sure this works for WebTransport addresses as well
|
||||
mockNAT.EXPECT().GetMapping("udp", 1234).Return(externalAddr, true)
|
||||
require.Equal(t, ma.StringCast("/ip4/1.2.3.4/udp/4321/quic-v1/webtransport"), m.GetMapping(ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1/webtransport")))
|
||||
m8, _ := ma.StringCast("/ip4/1.2.3.4/udp/4321/quic-v1/webtransport")
|
||||
m9, _ := ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1/webtransport")
|
||||
require.Equal(t, m8, m.GetMapping(m9))
|
||||
}
|
||||
|
||||
func TestAddAndRemoveListeners(t *testing.T) {
|
||||
@ -77,7 +86,8 @@ func TestAddAndRemoveListeners(t *testing.T) {
|
||||
added := make(chan struct{}, 1)
|
||||
// add a TCP listener
|
||||
mockNAT.EXPECT().AddMapping(gomock.Any(), "tcp", 1234).Do(func(context.Context, string, int) { added <- struct{}{} })
|
||||
require.NoError(t, sw.Listen(ma.StringCast("/ip4/0.0.0.0/tcp/1234")))
|
||||
m1, _ := ma.StringCast("/ip4/0.0.0.0/tcp/1234")
|
||||
require.NoError(t, sw.Listen(m1))
|
||||
select {
|
||||
case <-added:
|
||||
case <-time.After(time.Second):
|
||||
@ -86,7 +96,8 @@ func TestAddAndRemoveListeners(t *testing.T) {
|
||||
|
||||
// add a QUIC listener
|
||||
mockNAT.EXPECT().AddMapping(gomock.Any(), "udp", 1234).Do(func(context.Context, string, int) { added <- struct{}{} })
|
||||
require.NoError(t, sw.Listen(ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1")))
|
||||
m2, _ := ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1")
|
||||
require.NoError(t, sw.Listen(m2))
|
||||
select {
|
||||
case <-added:
|
||||
case <-time.After(time.Second):
|
||||
@ -95,7 +106,8 @@ func TestAddAndRemoveListeners(t *testing.T) {
|
||||
|
||||
// remove the QUIC listener
|
||||
mockNAT.EXPECT().RemoveMapping(gomock.Any(), "udp", 1234).Do(func(context.Context, string, int) { added <- struct{}{} })
|
||||
sw.ListenClose(ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1"))
|
||||
m3, _ := ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1")
|
||||
sw.ListenClose(m3)
|
||||
select {
|
||||
case <-added:
|
||||
case <-time.After(time.Second):
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user