mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2025-04-04 15:35:18 +00:00
Compare commits
63 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
74074dfa2f | ||
![]() |
12538e4b6c | ||
![]() |
2fd8ffc2ea | ||
![]() |
4cf45149cb | ||
![]() |
cfe4b45c41 | ||
![]() |
d43a616b56 | ||
![]() |
898350e271 | ||
![]() |
6a26c336e7 | ||
![]() |
a915766840 | ||
![]() |
3d9aa8c940 | ||
![]() |
538afb00e1 | ||
![]() |
7ad3f717b4 | ||
![]() |
26cc19b92d | ||
![]() |
a3335eed82 | ||
![]() |
2272b5dbfd | ||
![]() |
760d4b4a53 | ||
![]() |
91680f2e33 | ||
![]() |
c9bca86add | ||
![]() |
93f587c407 | ||
![]() |
1f71aadeec | ||
![]() |
656a092cf8 | ||
![]() |
8014f51b6d | ||
![]() |
b0a9a415f7 | ||
![]() |
bc6bcf857c | ||
![]() |
d15ef5ba3d | ||
![]() |
9ce215b919 | ||
![]() |
40d435597a | ||
![]() |
bb74143ddc | ||
![]() |
52f700c86e | ||
![]() |
4e5b14c0a3 | ||
![]() |
3024771fb1 | ||
![]() |
a56876eb1a | ||
![]() |
64120399f1 | ||
![]() |
8790fe1d66 | ||
![]() |
910b5af1c7 | ||
![]() |
349e13e7fc | ||
![]() |
affa14e8b7 | ||
![]() |
5b8b8971ca | ||
![]() |
d3a2118985 | ||
![]() |
6b2420cac4 | ||
![]() |
afa471e9ae | ||
![]() |
c5ddcc1f17 | ||
![]() |
4bb2fac98f | ||
![]() |
37a601cef1 | ||
![]() |
d322c38c11 | ||
![]() |
66544fed0c | ||
![]() |
27366a5331 | ||
![]() |
1f4d56b645 | ||
![]() |
d0aad154da | ||
![]() |
b9e6431a4d | ||
![]() |
bfe434972d | ||
![]() |
f21d691812 | ||
![]() |
8f4dfff2f6 | ||
![]() |
40104de891 | ||
![]() |
0da3c374db | ||
![]() |
2a24bbde18 | ||
![]() |
cfe05b6f00 | ||
![]() |
c4845f9103 | ||
![]() |
ce2b97f3c1 | ||
![]() |
96f846073f | ||
![]() |
e912522386 | ||
![]() |
4566eadb3e | ||
![]() |
4b48d25fb4 |
4
.github/actions/setup-rust/action.yml
vendored
4
.github/actions/setup-rust/action.yml
vendored
@ -2,11 +2,11 @@ name: Setup Rust (cache & toolchain)
|
|||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Install toolchain 1.75.0
|
- name: Install toolchain 1.78.0
|
||||||
uses: actions-rs/toolchain@v1
|
uses: actions-rs/toolchain@v1
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
toolchain: 1.75.0
|
toolchain: 1.78.0
|
||||||
components: rustfmt, clippy
|
components: rustfmt, clippy
|
||||||
|
|
||||||
- uses: Swatinem/rust-cache@v2
|
- uses: Swatinem/rust-cache@v2
|
5
.github/workflows/tests.yml
vendored
5
.github/workflows/tests.yml
vendored
@ -45,6 +45,11 @@ jobs:
|
|||||||
python-version: '3.9'
|
python-version: '3.9'
|
||||||
cache: 'pip'
|
cache: 'pip'
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: '1.22'
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -6,3 +6,4 @@ tests/**/__pycache__
|
|||||||
tests/tmp/**
|
tests/tmp/**
|
||||||
.vscode/*.json
|
.vscode/*.json
|
||||||
/0g-storage-contracts-dev
|
/0g-storage-contracts-dev
|
||||||
|
/run/.env
|
||||||
|
410
Cargo.lock
generated
410
Cargo.lock
generated
@ -453,6 +453,28 @@ dependencies = [
|
|||||||
"trust-dns-resolver",
|
"trust-dns-resolver",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "async-stream"
|
||||||
|
version = "0.3.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
|
||||||
|
dependencies = [
|
||||||
|
"async-stream-impl",
|
||||||
|
"futures-core",
|
||||||
|
"pin-project-lite 0.2.14",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "async-stream-impl"
|
||||||
|
version = "0.3.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn 2.0.68",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-task"
|
name = "async-task"
|
||||||
version = "4.7.1"
|
version = "4.7.1"
|
||||||
@ -506,7 +528,7 @@ version = "0.16.3"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247"
|
checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"http",
|
"http 0.2.12",
|
||||||
"log",
|
"log",
|
||||||
"url",
|
"url",
|
||||||
"wildmatch",
|
"wildmatch",
|
||||||
@ -529,6 +551,53 @@ version = "1.3.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
|
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "axum"
|
||||||
|
version = "0.7.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf"
|
||||||
|
dependencies = [
|
||||||
|
"async-trait",
|
||||||
|
"axum-core",
|
||||||
|
"bytes",
|
||||||
|
"futures-util",
|
||||||
|
"http 1.2.0",
|
||||||
|
"http-body 1.0.1",
|
||||||
|
"http-body-util",
|
||||||
|
"itoa",
|
||||||
|
"matchit",
|
||||||
|
"memchr",
|
||||||
|
"mime",
|
||||||
|
"percent-encoding",
|
||||||
|
"pin-project-lite 0.2.14",
|
||||||
|
"rustversion",
|
||||||
|
"serde",
|
||||||
|
"sync_wrapper 1.0.2",
|
||||||
|
"tower",
|
||||||
|
"tower-layer",
|
||||||
|
"tower-service",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "axum-core"
|
||||||
|
version = "0.4.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199"
|
||||||
|
dependencies = [
|
||||||
|
"async-trait",
|
||||||
|
"bytes",
|
||||||
|
"futures-util",
|
||||||
|
"http 1.2.0",
|
||||||
|
"http-body 1.0.1",
|
||||||
|
"http-body-util",
|
||||||
|
"mime",
|
||||||
|
"pin-project-lite 0.2.14",
|
||||||
|
"rustversion",
|
||||||
|
"sync_wrapper 1.0.2",
|
||||||
|
"tower-layer",
|
||||||
|
"tower-service",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "backtrace"
|
name = "backtrace"
|
||||||
version = "0.3.73"
|
version = "0.3.73"
|
||||||
@ -568,6 +637,12 @@ version = "0.21.7"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
|
checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "base64"
|
||||||
|
version = "0.22.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "base64ct"
|
name = "base64ct"
|
||||||
version = "1.6.0"
|
version = "1.6.0"
|
||||||
@ -1104,6 +1179,45 @@ dependencies = [
|
|||||||
"yaml-rust",
|
"yaml-rust",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "console-api"
|
||||||
|
version = "0.8.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8030735ecb0d128428b64cd379809817e620a40e5001c54465b99ec5feec2857"
|
||||||
|
dependencies = [
|
||||||
|
"futures-core",
|
||||||
|
"prost 0.13.4",
|
||||||
|
"prost-types 0.13.4",
|
||||||
|
"tonic",
|
||||||
|
"tracing-core",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "console-subscriber"
|
||||||
|
version = "0.4.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6539aa9c6a4cd31f4b1c040f860a1eac9aa80e7df6b05d506a6e7179936d6a01"
|
||||||
|
dependencies = [
|
||||||
|
"console-api",
|
||||||
|
"crossbeam-channel",
|
||||||
|
"crossbeam-utils",
|
||||||
|
"futures-task",
|
||||||
|
"hdrhistogram",
|
||||||
|
"humantime",
|
||||||
|
"hyper-util",
|
||||||
|
"prost 0.13.4",
|
||||||
|
"prost-types 0.13.4",
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
"thread_local",
|
||||||
|
"tokio",
|
||||||
|
"tokio-stream",
|
||||||
|
"tonic",
|
||||||
|
"tracing",
|
||||||
|
"tracing-core",
|
||||||
|
"tracing-subscriber",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "const-hex"
|
name = "const-hex"
|
||||||
version = "1.12.0"
|
version = "1.12.0"
|
||||||
@ -1157,6 +1271,17 @@ dependencies = [
|
|||||||
"serde_json",
|
"serde_json",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "contract-wrapper"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"ethers",
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
"tokio",
|
||||||
|
"tracing",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "convert_case"
|
name = "convert_case"
|
||||||
version = "0.6.0"
|
version = "0.6.0"
|
||||||
@ -2286,7 +2411,7 @@ dependencies = [
|
|||||||
"futures-timer",
|
"futures-timer",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"hashers",
|
"hashers",
|
||||||
"http",
|
"http 0.2.12",
|
||||||
"instant",
|
"instant",
|
||||||
"jsonwebtoken",
|
"jsonwebtoken",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@ -2905,7 +3030,26 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-sink",
|
"futures-sink",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"http",
|
"http 0.2.12",
|
||||||
|
"indexmap 2.2.6",
|
||||||
|
"slab",
|
||||||
|
"tokio",
|
||||||
|
"tokio-util 0.7.11",
|
||||||
|
"tracing",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "h2"
|
||||||
|
version = "0.4.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e"
|
||||||
|
dependencies = [
|
||||||
|
"atomic-waker",
|
||||||
|
"bytes",
|
||||||
|
"fnv",
|
||||||
|
"futures-core",
|
||||||
|
"futures-sink",
|
||||||
|
"http 1.2.0",
|
||||||
"indexmap 2.2.6",
|
"indexmap 2.2.6",
|
||||||
"slab",
|
"slab",
|
||||||
"tokio",
|
"tokio",
|
||||||
@ -3004,6 +3148,19 @@ dependencies = [
|
|||||||
"tokio-util 0.6.10",
|
"tokio-util 0.6.10",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hdrhistogram"
|
||||||
|
version = "7.5.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d"
|
||||||
|
dependencies = [
|
||||||
|
"base64 0.21.7",
|
||||||
|
"byteorder",
|
||||||
|
"flate2",
|
||||||
|
"nom",
|
||||||
|
"num-traits",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "heck"
|
name = "heck"
|
||||||
version = "0.3.3"
|
version = "0.3.3"
|
||||||
@ -3125,6 +3282,17 @@ dependencies = [
|
|||||||
"itoa",
|
"itoa",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "http"
|
||||||
|
version = "1.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea"
|
||||||
|
dependencies = [
|
||||||
|
"bytes",
|
||||||
|
"fnv",
|
||||||
|
"itoa",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "http-body"
|
name = "http-body"
|
||||||
version = "0.4.6"
|
version = "0.4.6"
|
||||||
@ -3132,7 +3300,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
|
checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"http",
|
"http 0.2.12",
|
||||||
|
"pin-project-lite 0.2.14",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "http-body"
|
||||||
|
version = "1.0.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
|
||||||
|
dependencies = [
|
||||||
|
"bytes",
|
||||||
|
"http 1.2.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "http-body-util"
|
||||||
|
version = "0.1.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f"
|
||||||
|
dependencies = [
|
||||||
|
"bytes",
|
||||||
|
"futures-util",
|
||||||
|
"http 1.2.0",
|
||||||
|
"http-body 1.0.1",
|
||||||
"pin-project-lite 0.2.14",
|
"pin-project-lite 0.2.14",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -3164,9 +3355,9 @@ dependencies = [
|
|||||||
"futures-channel",
|
"futures-channel",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"h2",
|
"h2 0.3.26",
|
||||||
"http",
|
"http 0.2.12",
|
||||||
"http-body",
|
"http-body 0.4.6",
|
||||||
"httparse",
|
"httparse",
|
||||||
"httpdate",
|
"httpdate",
|
||||||
"itoa",
|
"itoa",
|
||||||
@ -3178,14 +3369,35 @@ dependencies = [
|
|||||||
"want",
|
"want",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hyper"
|
||||||
|
version = "1.5.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0"
|
||||||
|
dependencies = [
|
||||||
|
"bytes",
|
||||||
|
"futures-channel",
|
||||||
|
"futures-util",
|
||||||
|
"h2 0.4.7",
|
||||||
|
"http 1.2.0",
|
||||||
|
"http-body 1.0.1",
|
||||||
|
"httparse",
|
||||||
|
"httpdate",
|
||||||
|
"itoa",
|
||||||
|
"pin-project-lite 0.2.14",
|
||||||
|
"smallvec",
|
||||||
|
"tokio",
|
||||||
|
"want",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hyper-rustls"
|
name = "hyper-rustls"
|
||||||
version = "0.23.2"
|
version = "0.23.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c"
|
checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"http",
|
"http 0.2.12",
|
||||||
"hyper",
|
"hyper 0.14.29",
|
||||||
"log",
|
"log",
|
||||||
"rustls 0.20.9",
|
"rustls 0.20.9",
|
||||||
"rustls-native-certs",
|
"rustls-native-certs",
|
||||||
@ -3201,8 +3413,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590"
|
checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"http",
|
"http 0.2.12",
|
||||||
"hyper",
|
"hyper 0.14.29",
|
||||||
"rustls 0.21.12",
|
"rustls 0.21.12",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-rustls 0.24.1",
|
"tokio-rustls 0.24.1",
|
||||||
@ -3216,12 +3428,25 @@ checksum = "6eea26c5d0b6ab9d72219f65000af310f042a740926f7b2fa3553e774036e2e7"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"derive_builder",
|
"derive_builder",
|
||||||
"dns-lookup",
|
"dns-lookup",
|
||||||
"hyper",
|
"hyper 0.14.29",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hyper-timeout"
|
||||||
|
version = "0.5.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0"
|
||||||
|
dependencies = [
|
||||||
|
"hyper 1.5.2",
|
||||||
|
"hyper-util",
|
||||||
|
"pin-project-lite 0.2.14",
|
||||||
|
"tokio",
|
||||||
|
"tower-service",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hyper-tls"
|
name = "hyper-tls"
|
||||||
version = "0.5.0"
|
version = "0.5.0"
|
||||||
@ -3229,12 +3454,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905"
|
checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"hyper",
|
"hyper 0.14.29",
|
||||||
"native-tls",
|
"native-tls",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-native-tls",
|
"tokio-native-tls",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hyper-util"
|
||||||
|
version = "0.1.10"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4"
|
||||||
|
dependencies = [
|
||||||
|
"bytes",
|
||||||
|
"futures-channel",
|
||||||
|
"futures-util",
|
||||||
|
"http 1.2.0",
|
||||||
|
"http-body 1.0.1",
|
||||||
|
"hyper 1.5.2",
|
||||||
|
"pin-project-lite 0.2.14",
|
||||||
|
"socket2 0.5.7",
|
||||||
|
"tokio",
|
||||||
|
"tower-service",
|
||||||
|
"tracing",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "iana-time-zone"
|
name = "iana-time-zone"
|
||||||
version = "0.1.60"
|
version = "0.1.60"
|
||||||
@ -3586,7 +3830,7 @@ dependencies = [
|
|||||||
"futures-timer",
|
"futures-timer",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"gloo-net",
|
"gloo-net",
|
||||||
"http",
|
"http 0.2.12",
|
||||||
"jsonrpsee-core",
|
"jsonrpsee-core",
|
||||||
"jsonrpsee-types",
|
"jsonrpsee-types",
|
||||||
"pin-project 1.1.5",
|
"pin-project 1.1.5",
|
||||||
@ -3615,7 +3859,7 @@ dependencies = [
|
|||||||
"futures-timer",
|
"futures-timer",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"globset",
|
"globset",
|
||||||
"hyper",
|
"hyper 0.14.29",
|
||||||
"jsonrpsee-types",
|
"jsonrpsee-types",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"parking_lot 0.12.3",
|
"parking_lot 0.12.3",
|
||||||
@ -3638,7 +3882,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "5fc1d8c0e4f455c47df21f8a29f4bbbcb75eb71bfee919b92e92502b48358392"
|
checksum = "5fc1d8c0e4f455c47df21f8a29f4bbbcb75eb71bfee919b92e92502b48358392"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"hyper",
|
"hyper 0.14.29",
|
||||||
"hyper-rustls 0.23.2",
|
"hyper-rustls 0.23.2",
|
||||||
"jsonrpsee-core",
|
"jsonrpsee-core",
|
||||||
"jsonrpsee-types",
|
"jsonrpsee-types",
|
||||||
@ -3658,7 +3902,7 @@ checksum = "bdd69efeb3ce2cba767f126872f4eeb4624038a29098e75d77608b2b4345ad03"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-channel",
|
"futures-channel",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"hyper",
|
"hyper 0.14.29",
|
||||||
"jsonrpsee-core",
|
"jsonrpsee-core",
|
||||||
"jsonrpsee-types",
|
"jsonrpsee-types",
|
||||||
"serde",
|
"serde",
|
||||||
@ -4596,9 +4840,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "log"
|
name = "log"
|
||||||
version = "0.4.21"
|
version = "0.4.22"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
|
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
"value-bag",
|
"value-bag",
|
||||||
@ -4713,6 +4957,12 @@ version = "0.1.10"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5"
|
checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "matchit"
|
||||||
|
version = "0.7.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "md-5"
|
name = "md-5"
|
||||||
version = "0.10.6"
|
version = "0.10.6"
|
||||||
@ -4778,6 +5028,7 @@ dependencies = [
|
|||||||
"async-trait",
|
"async-trait",
|
||||||
"blake2",
|
"blake2",
|
||||||
"contract-interface",
|
"contract-interface",
|
||||||
|
"contract-wrapper",
|
||||||
"ethereum-types 0.14.1",
|
"ethereum-types 0.14.1",
|
||||||
"ethers",
|
"ethers",
|
||||||
"hex",
|
"hex",
|
||||||
@ -6025,6 +6276,16 @@ dependencies = [
|
|||||||
"prost-derive 0.10.1",
|
"prost-derive 0.10.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "prost"
|
||||||
|
version = "0.13.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec"
|
||||||
|
dependencies = [
|
||||||
|
"bytes",
|
||||||
|
"prost-derive 0.13.4",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "prost-build"
|
name = "prost-build"
|
||||||
version = "0.9.0"
|
version = "0.9.0"
|
||||||
@ -6106,6 +6367,19 @@ dependencies = [
|
|||||||
"syn 1.0.109",
|
"syn 1.0.109",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "prost-derive"
|
||||||
|
version = "0.13.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3"
|
||||||
|
dependencies = [
|
||||||
|
"anyhow",
|
||||||
|
"itertools 0.13.0",
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn 2.0.68",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "prost-types"
|
name = "prost-types"
|
||||||
version = "0.9.0"
|
version = "0.9.0"
|
||||||
@ -6126,6 +6400,15 @@ dependencies = [
|
|||||||
"prost 0.10.4",
|
"prost 0.10.4",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "prost-types"
|
||||||
|
version = "0.13.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "cc2f1e56baa61e93533aebc21af4d2134b70f66275e0fcdf3cbe43d77ff7e8fc"
|
||||||
|
dependencies = [
|
||||||
|
"prost 0.13.4",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "protobuf"
|
name = "protobuf"
|
||||||
version = "2.28.0"
|
version = "2.28.0"
|
||||||
@ -6159,8 +6442,8 @@ dependencies = [
|
|||||||
"dns-lookup",
|
"dns-lookup",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"http",
|
"http 0.2.12",
|
||||||
"hyper",
|
"hyper 0.14.29",
|
||||||
"hyper-system-resolver",
|
"hyper-system-resolver",
|
||||||
"pin-project-lite 0.2.14",
|
"pin-project-lite 0.2.14",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
@ -6403,10 +6686,10 @@ dependencies = [
|
|||||||
"encoding_rs",
|
"encoding_rs",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"h2",
|
"h2 0.3.26",
|
||||||
"http",
|
"http 0.2.12",
|
||||||
"http-body",
|
"http-body 0.4.6",
|
||||||
"hyper",
|
"hyper 0.14.29",
|
||||||
"hyper-rustls 0.24.2",
|
"hyper-rustls 0.24.2",
|
||||||
"hyper-tls",
|
"hyper-tls",
|
||||||
"ipnet",
|
"ipnet",
|
||||||
@ -6422,7 +6705,7 @@ dependencies = [
|
|||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_urlencoded",
|
"serde_urlencoded",
|
||||||
"sync_wrapper",
|
"sync_wrapper 0.1.2",
|
||||||
"system-configuration",
|
"system-configuration",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-native-tls",
|
"tokio-native-tls",
|
||||||
@ -6594,6 +6877,7 @@ dependencies = [
|
|||||||
"metrics",
|
"metrics",
|
||||||
"miner",
|
"miner",
|
||||||
"network",
|
"network",
|
||||||
|
"parking_lot 0.12.3",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"shared_types",
|
"shared_types",
|
||||||
@ -7496,6 +7780,12 @@ version = "0.1.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
|
checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "sync_wrapper"
|
||||||
|
version = "1.0.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "synstructure"
|
name = "synstructure"
|
||||||
version = "0.12.6"
|
version = "0.12.6"
|
||||||
@ -7728,6 +8018,7 @@ dependencies = [
|
|||||||
"signal-hook-registry",
|
"signal-hook-registry",
|
||||||
"socket2 0.5.7",
|
"socket2 0.5.7",
|
||||||
"tokio-macros",
|
"tokio-macros",
|
||||||
|
"tracing",
|
||||||
"windows-sys 0.48.0",
|
"windows-sys 0.48.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -7785,9 +8076,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio-stream"
|
name = "tokio-stream"
|
||||||
version = "0.1.15"
|
version = "0.1.17"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af"
|
checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"pin-project-lite 0.2.14",
|
"pin-project-lite 0.2.14",
|
||||||
@ -7894,6 +8185,62 @@ dependencies = [
|
|||||||
"winnow 0.6.13",
|
"winnow 0.6.13",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tonic"
|
||||||
|
version = "0.12.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52"
|
||||||
|
dependencies = [
|
||||||
|
"async-stream",
|
||||||
|
"async-trait",
|
||||||
|
"axum",
|
||||||
|
"base64 0.22.1",
|
||||||
|
"bytes",
|
||||||
|
"h2 0.4.7",
|
||||||
|
"http 1.2.0",
|
||||||
|
"http-body 1.0.1",
|
||||||
|
"http-body-util",
|
||||||
|
"hyper 1.5.2",
|
||||||
|
"hyper-timeout",
|
||||||
|
"hyper-util",
|
||||||
|
"percent-encoding",
|
||||||
|
"pin-project 1.1.5",
|
||||||
|
"prost 0.13.4",
|
||||||
|
"socket2 0.5.7",
|
||||||
|
"tokio",
|
||||||
|
"tokio-stream",
|
||||||
|
"tower",
|
||||||
|
"tower-layer",
|
||||||
|
"tower-service",
|
||||||
|
"tracing",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tower"
|
||||||
|
version = "0.4.13"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c"
|
||||||
|
dependencies = [
|
||||||
|
"futures-core",
|
||||||
|
"futures-util",
|
||||||
|
"indexmap 1.9.3",
|
||||||
|
"pin-project 1.1.5",
|
||||||
|
"pin-project-lite 0.2.14",
|
||||||
|
"rand 0.8.5",
|
||||||
|
"slab",
|
||||||
|
"tokio",
|
||||||
|
"tokio-util 0.7.11",
|
||||||
|
"tower-layer",
|
||||||
|
"tower-service",
|
||||||
|
"tracing",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tower-layer"
|
||||||
|
version = "0.3.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tower-service"
|
name = "tower-service"
|
||||||
version = "0.3.2"
|
version = "0.3.2"
|
||||||
@ -8123,7 +8470,7 @@ dependencies = [
|
|||||||
"byteorder",
|
"byteorder",
|
||||||
"bytes",
|
"bytes",
|
||||||
"data-encoding",
|
"data-encoding",
|
||||||
"http",
|
"http 0.2.12",
|
||||||
"httparse",
|
"httparse",
|
||||||
"log",
|
"log",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
@ -8921,6 +9268,8 @@ dependencies = [
|
|||||||
"chunk_pool",
|
"chunk_pool",
|
||||||
"clap",
|
"clap",
|
||||||
"config",
|
"config",
|
||||||
|
"console-subscriber",
|
||||||
|
"contract-wrapper",
|
||||||
"ctrlc",
|
"ctrlc",
|
||||||
"duration-str",
|
"duration-str",
|
||||||
"error-chain",
|
"error-chain",
|
||||||
@ -8931,6 +9280,7 @@ dependencies = [
|
|||||||
"futures",
|
"futures",
|
||||||
"itertools 0.10.5",
|
"itertools 0.10.5",
|
||||||
"libp2p",
|
"libp2p",
|
||||||
|
"log",
|
||||||
"log_entry_sync",
|
"log_entry_sync",
|
||||||
"metrics",
|
"metrics",
|
||||||
"miner",
|
"miner",
|
||||||
@ -8950,6 +9300,8 @@ dependencies = [
|
|||||||
"toml 0.5.11",
|
"toml 0.5.11",
|
||||||
"tracing",
|
"tracing",
|
||||||
"tracing-appender",
|
"tracing-appender",
|
||||||
|
"tracing-core",
|
||||||
|
"tracing-log",
|
||||||
"tracing-subscriber",
|
"tracing-subscriber",
|
||||||
"zgs_spec",
|
"zgs_spec",
|
||||||
"zgs_version",
|
"zgs_version",
|
||||||
|
6
DockerfileStandard
Normal file
6
DockerfileStandard
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
FROM rust
|
||||||
|
VOLUME ["/data"]
|
||||||
|
COPY . .
|
||||||
|
RUN apt-get update && apt-get install -y clang cmake build-essential pkg-config libssl-dev
|
||||||
|
RUN cargo build --release
|
||||||
|
CMD ["./target/release/zgs_node", "--config", "run/config-testnet-standard.toml", "--log", "run/log_config"]
|
6
DockerfileTurbo
Normal file
6
DockerfileTurbo
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
FROM rust
|
||||||
|
VOLUME ["/data"]
|
||||||
|
COPY . .
|
||||||
|
RUN apt-get update && apt-get install -y clang cmake build-essential pkg-config libssl-dev
|
||||||
|
RUN cargo build --release
|
||||||
|
CMD ["./target/release/zgs_node", "--config", "run/config-testnet-turbo.toml", "--log", "run/log_config"]
|
74
README.md
74
README.md
@ -2,69 +2,31 @@
|
|||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
0G Storage is the storage layer for the ZeroGravity data availability (DA) system. The 0G Storage layer holds three important features:
|
0G Storage is a decentralized data storage system designed to address the challenges of high-throughput and low-latency data storage and retrieval, particularly for areas such as AI.
|
||||||
|
|
||||||
* Built-in - It is natively built into the ZeroGravity DA system for data storage and retrieval.
|
## System Architecture
|
||||||
* General purpose - It is designed to support atomic transactions, mutable kv stores as well as archive log systems to enable wide range of applications with various data types.
|
|
||||||
* Incentive - Instead of being just a decentralized database, 0G Storage introduces PoRA mining algorithm to incentivize storage network participants.
|
|
||||||
|
|
||||||
To dive deep into the technical details, continue reading [0G Storage Spec.](docs/)
|
0G Storage consists of two main components:
|
||||||
|
|
||||||
## Integration
|
1. **Data Publishing Lane**: Ensures fast Merkle tree data root commitment and verification through 0G Chain.
|
||||||
|
2. **Data Storage Lane**: Manages large data transfers and storage using an erasure-coding mechanism for redundancy and sharding for parallel processing.
|
||||||
|
|
||||||
We provide a [SDK](https://github.com/0glabs/0g-js-storage-sdk) for users to easily integrate 0G Storage in their applications with the following features:
|
Across the two lanes, 0G Storage supports the following features:
|
||||||
|
|
||||||
* File Merkle Tree Class
|
* **General Purpose Design**: Supports atomic transactions, mutable key-value stores, and archive log systems, enabling a wide range of applications with various data types.
|
||||||
* Flow Contract Types
|
* **Validated Incentivization**: Utilizes the PoRA (Proof of Random Access) mining algorithm to mitigate the data outsourcing issue and to ensure rewards are distributed to nodes who contribute to the storage network.
|
||||||
* RPC methods support
|
|
||||||
* File upload
|
|
||||||
* Support browser environment
|
|
||||||
* Tests for different environments (In Progress)
|
|
||||||
* File download (In Progress)
|
|
||||||
|
|
||||||
## Deployment
|
For in-depth technical details about 0G Storage, please read our [Intro to 0G Storage](https://docs.0g.ai/0g-storage).
|
||||||
|
|
||||||
Please refer to [Deployment](docs/run.md) page for detailed steps to compile and start a 0G Storage node.
|
## Documentation
|
||||||
|
|
||||||
## Test
|
- If you want to run a node, please refer to the [Running a Node](https://docs.0g.ai/run-a-node/storage-node) guide.
|
||||||
|
- If you want to conduct local testing, please refer to [Onebox Testing](https://github.com/0glabs/0g-storage-node/blob/main/docs/onebox-test.md) guide.
|
||||||
|
- If you want to build a project using 0G storage, please refer to the [0G Storage SDK](https://docs.0g.ai/build-with-0g/storage-sdk) guide.
|
||||||
|
|
||||||
### Prerequisites
|
## Support and Additional Resources
|
||||||
|
We want to do everything we can to help you be successful while working on your contribution and projects. Here you'll find various resources and communities that may help you complete a project or contribute to 0G.
|
||||||
|
|
||||||
* Required python version: 3.8, 3.9, 3.10, higher version is not guaranteed (e.g. failed to install `pysha3`).
|
### Communities
|
||||||
* Install dependencies under root folder: `pip3 install -r requirements.txt`
|
- [0G Telegram](https://t.me/web3_0glabs)
|
||||||
|
- [0G Discord](https://discord.com/invite/0glabs)
|
||||||
### Dependencies
|
|
||||||
|
|
||||||
Python test framework will launch blockchain fullnodes at local for storage node to interact with. There are 2 kinds of fullnodes supported:
|
|
||||||
|
|
||||||
* Conflux eSpace node (by default).
|
|
||||||
* BSC node (geth).
|
|
||||||
|
|
||||||
For Conflux eSpace node, the test framework will automatically compile the binary at runtime, and copy the binary to `tests/tmp` folder. For BSC node, the test framework will automatically download the latest version binary from [github](https://github.com/bnb-chain/bsc/releases) to `tests/tmp` folder.
|
|
||||||
|
|
||||||
Alternatively, you could also manually copy specific version binaries (conflux or geth) to the `tests/tmp` folder. Note, do **NOT** copy released conflux binary on github, since block height of some CIPs are hardcoded.
|
|
||||||
|
|
||||||
For testing, it's also dependent on the following repos:
|
|
||||||
|
|
||||||
* [0G Storage Contract](https://github.com/0glabs/0g-storage-contracts): It essentially provides two abi interfaces for 0G Storage Node to interact with the on-chain contracts.
|
|
||||||
* ZgsFlow: It contains apis to submit chunk data.
|
|
||||||
* PoraMine: It contains apis to submit PoRA answers.
|
|
||||||
* [0G Storage Client](https://github.com/0glabs/0g-storage-client): It is used to interact with certain 0G Storage Nodes to upload/download files.
|
|
||||||
|
|
||||||
### Run Tests
|
|
||||||
|
|
||||||
Go to the `tests` folder and run the following command to run all tests:
|
|
||||||
|
|
||||||
```
|
|
||||||
python test_all.py
|
|
||||||
```
|
|
||||||
|
|
||||||
or, run any single test, e.g.
|
|
||||||
|
|
||||||
```
|
|
||||||
python sync_test.py
|
|
||||||
```
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
To make contributions to the project, please follow the guidelines [here](contributing.md).
|
|
||||||
|
17
common/contract-wrapper/Cargo.toml
Normal file
17
common/contract-wrapper/Cargo.toml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
[package]
|
||||||
|
name = "contract-wrapper"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
tokio = { version = "1.28", features = ["macros"] }
|
||||||
|
ethers = "2.0"
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "1.0"
|
||||||
|
tracing = "0.1.35"
|
||||||
|
# or `tracing` if you prefer
|
||||||
|
|
||||||
|
[features]
|
||||||
|
dev = []
|
204
common/contract-wrapper/src/lib.rs
Normal file
204
common/contract-wrapper/src/lib.rs
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
use ethers::{
|
||||||
|
abi::Detokenize,
|
||||||
|
contract::ContractCall,
|
||||||
|
providers::{Middleware, ProviderError},
|
||||||
|
types::{TransactionReceipt, U256},
|
||||||
|
};
|
||||||
|
use serde::Deserialize;
|
||||||
|
use std::{sync::Arc, time::Duration};
|
||||||
|
use tokio::time::sleep;
|
||||||
|
use tracing::{debug, info};
|
||||||
|
|
||||||
|
/// The result of a single submission attempt.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum SubmissionAction {
|
||||||
|
Success(TransactionReceipt),
|
||||||
|
/// Generic "retry" signal, but we still need to know if it's "mempool/timeout" or something else.
|
||||||
|
/// We'll parse the error string or have a separate reason in a real app.
|
||||||
|
Retry(String),
|
||||||
|
Error(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configuration for submission retries, gas price, etc.
|
||||||
|
#[derive(Clone, Copy, Debug, Deserialize)]
|
||||||
|
pub struct SubmitConfig {
|
||||||
|
/// If `Some`, use this gas price for the first attempt.
|
||||||
|
/// If `None`, fetch the current network gas price.
|
||||||
|
pub(crate) initial_gas_price: Option<U256>,
|
||||||
|
/// If `Some`, clamp increased gas price to this limit.
|
||||||
|
/// If `None`, do not bump gas for mempool/timeout errors.
|
||||||
|
pub(crate) max_gas_price: Option<U256>,
|
||||||
|
/// Gas limit of the transaction
|
||||||
|
pub(crate) max_gas: Option<U256>,
|
||||||
|
/// Factor by which to multiply the gas price on each mempool/timeout error.
|
||||||
|
/// E.g. if factor=11 => a 10% bump => newGas = (gas * factor) / 10
|
||||||
|
pub(crate) gas_increase_factor: Option<u64>,
|
||||||
|
/// The maximum number of gas bumps (for mempool/timeout). If `max_gas_price` is set,
|
||||||
|
/// we typically rely on clamping. But you can still cap the number of bumps if you want.
|
||||||
|
pub(crate) max_retries: Option<usize>,
|
||||||
|
/// Seconds to wait between attempts.
|
||||||
|
pub(crate) interval_secs: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
const DEFAULT_INTERVAL_SECS: u64 = 2;
|
||||||
|
const DEFAULT_MAX_RETRIES: usize = 5;
|
||||||
|
|
||||||
|
impl Default for SubmitConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
initial_gas_price: None,
|
||||||
|
max_gas_price: None,
|
||||||
|
max_gas: None,
|
||||||
|
gas_increase_factor: Some(11), // implies 10% bump if we do (gas*11)/10
|
||||||
|
max_retries: Some(DEFAULT_MAX_RETRIES),
|
||||||
|
interval_secs: Some(DEFAULT_INTERVAL_SECS),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A simple function to detect if the retry is from a mempool or timeout error.
|
||||||
|
/// Right now, we rely on `submit_once` returning `SubmissionAction::Retry` for ANY error
|
||||||
|
/// that is "retryable," so we must parse the error string from `submit_once`, or
|
||||||
|
/// store that string. Another approach is to return an enum with a reason from `submit_once`.
|
||||||
|
fn is_mempool_or_timeout_error(error_str: String) -> bool {
|
||||||
|
let lower = error_str.to_lowercase();
|
||||||
|
lower.contains("mempool") || lower.contains("timeout")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A function that performs a single submission attempt:
|
||||||
|
/// - Sends the transaction
|
||||||
|
/// - Awaits the receipt with limited internal retries
|
||||||
|
/// - Returns a `SubmissionAction` indicating success, retry, or error.
|
||||||
|
pub async fn submit_once<M, T>(call: ContractCall<M, T>) -> SubmissionAction
|
||||||
|
where
|
||||||
|
M: Middleware + 'static,
|
||||||
|
T: Detokenize,
|
||||||
|
{
|
||||||
|
let pending_tx = match call.send().await {
|
||||||
|
Ok(tx) => tx,
|
||||||
|
Err(e) => {
|
||||||
|
let msg = e.to_string();
|
||||||
|
if is_mempool_or_timeout_error(msg.clone()) {
|
||||||
|
return SubmissionAction::Retry(format!("mempool/timeout: {:?}", e));
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("Error sending transaction: {:?}", msg);
|
||||||
|
return SubmissionAction::Error(format!("Transaction failed: {}", msg));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
debug!("Signed tx hash: {:?}", pending_tx.tx_hash());
|
||||||
|
|
||||||
|
let receipt_result = pending_tx.await;
|
||||||
|
match receipt_result {
|
||||||
|
Ok(Some(receipt)) => {
|
||||||
|
info!("Transaction mined, receipt: {:?}", receipt);
|
||||||
|
SubmissionAction::Success(receipt)
|
||||||
|
}
|
||||||
|
Ok(None) => {
|
||||||
|
debug!("Transaction probably timed out; retrying");
|
||||||
|
SubmissionAction::Retry("timeout, receipt is none".to_string())
|
||||||
|
}
|
||||||
|
Err(ProviderError::HTTPError(e)) => {
|
||||||
|
debug!("HTTP error retrieving receipt: {:?}", e);
|
||||||
|
SubmissionAction::Retry(format!("http error: {:?}", e))
|
||||||
|
}
|
||||||
|
Err(e) => SubmissionAction::Error(format!("Transaction unrecoverable: {:?}", e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Increase gas price using integer arithmetic: (gp * factor_num) / factor_den
|
||||||
|
fn increase_gas_price_u256(gp: U256, factor_num: u64, factor_den: u64) -> U256 {
|
||||||
|
let num = U256::from(factor_num);
|
||||||
|
let den = U256::from(factor_den);
|
||||||
|
gp.checked_mul(num).unwrap_or(U256::MAX) / den
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A higher-level function that wraps `submit_once` in a gas-price–adjustment loop,
|
||||||
|
/// plus a global timeout, plus distinct behavior for mempool/timeout vs other errors.
|
||||||
|
pub async fn submit_with_retry<M, T>(
|
||||||
|
mut call: ContractCall<M, T>,
|
||||||
|
config: &SubmitConfig,
|
||||||
|
middleware: Arc<M>,
|
||||||
|
) -> Result<TransactionReceipt, String>
|
||||||
|
where
|
||||||
|
M: Middleware + 'static,
|
||||||
|
T: Detokenize,
|
||||||
|
{
|
||||||
|
if let Some(max_gas) = config.max_gas {
|
||||||
|
call = call.gas(max_gas);
|
||||||
|
}
|
||||||
|
let mut gas_price = if let Some(gp) = config.initial_gas_price {
|
||||||
|
gp
|
||||||
|
} else {
|
||||||
|
middleware
|
||||||
|
.get_gas_price()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to fetch gas price: {:?}", e))?
|
||||||
|
};
|
||||||
|
|
||||||
|
// If no factor is set, default to 11 => 10% bump
|
||||||
|
let factor_num = config.gas_increase_factor.unwrap_or(11);
|
||||||
|
let factor_den = 10u64;
|
||||||
|
|
||||||
|
// Two counters: one for gas bumps, one for non-gas retries
|
||||||
|
let mut non_gas_retries = 0;
|
||||||
|
let max_retries = config.max_retries.unwrap_or(DEFAULT_MAX_RETRIES);
|
||||||
|
|
||||||
|
loop {
|
||||||
|
// Set gas price on the call
|
||||||
|
call = call.gas_price(gas_price);
|
||||||
|
|
||||||
|
match submit_once(call.clone()).await {
|
||||||
|
SubmissionAction::Success(receipt) => {
|
||||||
|
return Ok(receipt);
|
||||||
|
}
|
||||||
|
SubmissionAction::Retry(error_str) => {
|
||||||
|
// We need to figure out if it's "mempool/timeout" or some other reason.
|
||||||
|
// Right now, we don't have the error string from `submit_once` easily,
|
||||||
|
// so let's assume we store it or we do a separate function that returns it.
|
||||||
|
// For simplicity, let's do a hack: let's define a placeholder "error_str" and parse it.
|
||||||
|
// In reality, you'd likely return `SubmissionAction::Retry(reason_str)` from `submit_once`.
|
||||||
|
if is_mempool_or_timeout_error(error_str.clone()) {
|
||||||
|
// Mempool/timeout error
|
||||||
|
if let Some(max_gp) = config.max_gas_price {
|
||||||
|
if gas_price >= max_gp {
|
||||||
|
return Err(format!(
|
||||||
|
"Exceeded max gas price: {}, with error msg: {}",
|
||||||
|
max_gp, error_str
|
||||||
|
));
|
||||||
|
}
|
||||||
|
// Bump the gas
|
||||||
|
let new_price = increase_gas_price_u256(gas_price, factor_num, factor_den);
|
||||||
|
gas_price = std::cmp::min(new_price, max_gp);
|
||||||
|
debug!("Bumping gas price to {}", gas_price);
|
||||||
|
} else {
|
||||||
|
// No maxGasPrice => we do NOT bump => fail
|
||||||
|
return Err(
|
||||||
|
"Mempool/timeout error, no maxGasPrice set => aborting".to_string()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Non-gas error => increment nonGasRetries
|
||||||
|
non_gas_retries += 1;
|
||||||
|
if non_gas_retries > max_retries {
|
||||||
|
return Err(format!("Exceeded non-gas retries: {}", max_retries));
|
||||||
|
}
|
||||||
|
debug!(
|
||||||
|
"Non-gas retry #{} (same gas price: {})",
|
||||||
|
non_gas_retries, gas_price
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SubmissionAction::Error(e) => {
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sleep between attempts
|
||||||
|
sleep(Duration::from_secs(
|
||||||
|
config.interval_secs.unwrap_or(DEFAULT_INTERVAL_SECS),
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
@ -7,7 +7,7 @@
|
|||||||
//! block processing time).
|
//! block processing time).
|
||||||
//! - `IncCounter`: used to represent an ideally ever-growing, never-shrinking integer (e.g.,
|
//! - `IncCounter`: used to represent an ideally ever-growing, never-shrinking integer (e.g.,
|
||||||
//! number of block processing requests).
|
//! number of block processing requests).
|
||||||
//! - `IntGauge`: used to represent an varying integer (e.g., number of attestations per block).
|
//! - `IntGauge`: used to represent a varying integer (e.g., number of attestations per block).
|
||||||
//!
|
//!
|
||||||
//! ## Important
|
//! ## Important
|
||||||
//!
|
//!
|
||||||
|
@ -5,7 +5,7 @@ pub const TB: usize = 1024 * GB;
|
|||||||
|
|
||||||
pub const BYTES_PER_SECTOR: usize = 256;
|
pub const BYTES_PER_SECTOR: usize = 256;
|
||||||
pub const BYTES_PER_SEAL: usize = 4 * KB;
|
pub const BYTES_PER_SEAL: usize = 4 * KB;
|
||||||
pub const BYTES_PER_SCRATCHPAD: usize = 64 * KB;
|
pub const BYTES_PER_SCRATCHPAD: usize = 16 * KB;
|
||||||
pub const BYTES_PER_LOAD: usize = 256 * KB;
|
pub const BYTES_PER_LOAD: usize = 256 * KB;
|
||||||
pub const BYTES_PER_PRICING: usize = 8 * GB;
|
pub const BYTES_PER_PRICING: usize = 8 * GB;
|
||||||
pub const BYTES_PER_MAX_MINING_RANGE: usize = 8 * TB;
|
pub const BYTES_PER_MAX_MINING_RANGE: usize = 8 * TB;
|
||||||
|
@ -11,7 +11,7 @@ pub fn unused_tcp_port() -> Result<u16, String> {
|
|||||||
unused_port(Transport::Tcp)
|
unused_port(Transport::Tcp)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A convenience function for `unused_port(Transport::Tcp)`.
|
/// A convenience function for `unused_port(Transport::Udp)`.
|
||||||
pub fn unused_udp_port() -> Result<u16, String> {
|
pub fn unused_udp_port() -> Result<u16, String> {
|
||||||
unused_port(Transport::Udp)
|
unused_port(Transport::Udp)
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
### Checks
|
### Checks
|
||||||
|
|
||||||
* [ ] I've made sure the lint is passing in this PR.
|
* [ ] I've made sure the linter is passing in this PR.
|
||||||
* [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, in that case, please comment that they are not relevant.
|
* [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, in that case, please comment that they are not relevant.
|
||||||
* [ ] Testing Strategy
|
* [ ] Testing Strategy
|
||||||
* [ ] Unit tests
|
* [ ] Unit tests
|
||||||
|
@ -4,15 +4,15 @@
|
|||||||
|
|
||||||
ZeroGravity system consists of a data availability layer (0G DA) on top of a decentralized storage system (0G Storage). There is a separate consensus network that is part of both the 0G DA and the 0G Storage. For 0G Storage, the consensus is responsible for determining the ordering of the uploaded data blocks, realizing the storage mining verification and the corresponding incentive mechanism through smart contracts.
|
ZeroGravity system consists of a data availability layer (0G DA) on top of a decentralized storage system (0G Storage). There is a separate consensus network that is part of both the 0G DA and the 0G Storage. For 0G Storage, the consensus is responsible for determining the ordering of the uploaded data blocks, realizing the storage mining verification and the corresponding incentive mechanism through smart contracts.
|
||||||
|
|
||||||
Figure 1 illustrates the architecture of the 0G system. When a data block enters the 0G DA, it is first erasure coded and organized into multiple consecutive chunks through erasure coding. The merkle root as a commitment of the encoded data block is then submitted to the consensus layer to keep the order of the data entering the system. The chunks are then dispersed to different storage nodes in 0G Storage where the data may be further replicated to other nodes depending on the storage fee that the user pays. The storage nodes periodically participate the mining process by interacting with the consensus network to accrue rewards from the system.
|
Figure 1 illustrates the architecture of the 0G system. When a data block enters the 0G DA, it is first erasure coded and organized into multiple consecutive chunks through erasure coding. The merkle root as a commitment of the encoded data block is then submitted to the consensus layer to keep the order of the data entering the system. The chunks are then dispersed to different storage nodes in 0G Storage where the data may be further replicated to other nodes depending on the storage fee that the user pays. The storage nodes periodically participate in the mining process by interacting with the consensus network to accrue rewards from the system.
|
||||||
|
|
||||||
<figure><img src="../../.gitbook/assets/zg-storage-architecture.png" alt=""><figcaption><p>Figure 1. The Architecture of 0G System</p></figcaption></figure>
|
<figure><img src="../.gitbook/assets/zg-storage-architecture.png" alt=""><figcaption><p>Figure 1. The Architecture of 0G System</p></figcaption></figure>
|
||||||
|
|
||||||
## 0G Storage
|
## 0G Storage
|
||||||
|
|
||||||
0G Storage employs layered design targeting to support different types of decentralized applications. Figure 2 shows the overview of the full stack layers of 0G Storage.
|
0G Storage employs layered design targeting to support different types of decentralized applications. Figure 2 shows the overview of the full stack layers of 0G Storage.
|
||||||
|
|
||||||
<figure><img src="../../.gitbook/assets/zg-storage-layer.png" alt=""><figcaption><p>Figure 2. Full Stack Solution of 0G Storage</p></figcaption></figure>
|
<figure><img src="../.gitbook/assets/zg-storage-layer.png" alt=""><figcaption><p>Figure 2. Full Stack Solution of 0G Storage</p></figcaption></figure>
|
||||||
|
|
||||||
The lowest is a log layer which is a decentralized system. It consists of multiple storage nodes to form a storage network. The network has built-in incentive mechanism to reward the data storage. The ordering of the uploaded data is guaranteed by a sequencing mechanism to provide a log-based semantics and abstraction. This layer is used to store unstructured raw data for permanent persistency.
|
The lowest is a log layer which is a decentralized system. It consists of multiple storage nodes to form a storage network. The network has built-in incentive mechanism to reward the data storage. The ordering of the uploaded data is guaranteed by a sequencing mechanism to provide a log-based semantics and abstraction. This layer is used to store unstructured raw data for permanent persistency.
|
||||||
|
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
# Mining Reward
|
# Mining Reward
|
||||||
|
|
||||||
0G Storage creates pricing segments every 8 GB of data chunks over the data flow. Each pricing segment is associated with an Endowment Pool and a Reward Pool. The Endowment Pool collects the storage endowments of all the data chunks belongs to this pricing segment and releases a fixed ratio of balance to the Reward Pool every second. The rate of reward release is set to 4% per year.
|
0G Storage creates pricing segments every 8 GB of data chunks over the data flow. Each pricing segment is associated with an Endowment Pool and a Reward Pool. The Endowment Pool collects the storage endowments of all the data chunks belong to this pricing segment and releases a fixed ratio of balance to the Reward Pool every second. The rate of reward release is set to 4% per year.
|
||||||
|
|
||||||
The mining reward is paid to miners for providing data service. Miners receive mining reward when submit the first legitimate PoRA for a mining epoch to 0G Storage contract. The mining reward consists of two parts:
|
The mining reward is paid to miners for providing data service. Miners receive mining reward when submit the first legitimate PoRA for a mining epoch to 0G Storage contract.
|
||||||
|
|
||||||
The mining reward consists of two parts:
|
The mining reward consists of two parts:
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
The ZeroGravity network adopts a Proof of Random Access (PoRA) mechanism to incentivize miners to store data. By requiring miners to answer randomly produced queries to archived data chunks, the PoRA mechanism establishes the relation between mining proof generation power and data storage. Miners answer the queries repeatedly and computes an output digest for each loaded chunk until find a digest that satisfies the mining difficulty (i.e., has enough leading zeros). PoRA will stress the miners' disk I/O and reduce their capability to respond user queries. So 0G Storage adopts intermittent mining, in which a mining epoch starts with a block generation at a specific block height on the host chain and stops when a valid PoRA is submitted to the 0G Storage contract.
|
The ZeroGravity network adopts a Proof of Random Access (PoRA) mechanism to incentivize miners to store data. By requiring miners to answer randomly produced queries to archived data chunks, the PoRA mechanism establishes the relation between mining proof generation power and data storage. Miners answer the queries repeatedly and computes an output digest for each loaded chunk until find a digest that satisfies the mining difficulty (i.e., has enough leading zeros). PoRA will stress the miners' disk I/O and reduce their capability to respond user queries. So 0G Storage adopts intermittent mining, in which a mining epoch starts with a block generation at a specific block height on the host chain and stops when a valid PoRA is submitted to the 0G Storage contract.
|
||||||
|
|
||||||
In a strawman design, a PoRA iteration consists of a computing stage and a loading stage. In the computing stage, a miner computes a random recall position (the universal offset in the flow) based on an arbitrary picked random nonce and a mining status read from the host chain. In the loading stage, a miner loads the archived data chunks at the given recall position, and computes output digest by hashing the tuple of mining status and the data chunks. If the output digest satisfies the target difficulty, the miner can construct a legitimate PoRA consists of the chosen random nonce, the loaded data chunk and the proof for the correctness of data chunk to the mining contract.
|
In a strawman design, a PoRA iteration consists of a computing stage and a loading stage. In the computing stage, a miner computes a random recall position (the universal offset in the flow) based on an arbitrary picked random nonce and a mining status read from the host chain. In the loading stage, a miner loads the archived data chunks at the given recall position, and computes output digest by hashing the tuple of mining status and the data chunks. If the output digest satisfies the target difficulty, the miner can construct a legitimate PoRA, which consists of the chosen random nonce, the loaded data chunk and the proof for the correctness of data chunk to the mining contract.
|
||||||
|
|
||||||
## Fairness
|
## Fairness
|
||||||
|
|
||||||
@ -30,4 +30,4 @@ Precisely, the mining process has the following steps:
|
|||||||
6. For each piece $$\overrightarrow{v}$$, compute the Blake2b hash of the tuple ($$\mathsf{miner\_id}$$, $$\mathsf{nonce}$$, $$\mathsf{context\_digest}$$, $$\mathsf{start\_position}$$, $$\mathsf{mine\_length}$$, $$\overrightarrow{v}$$).
|
6. For each piece $$\overrightarrow{v}$$, compute the Blake2b hash of the tuple ($$\mathsf{miner\_id}$$, $$\mathsf{nonce}$$, $$\mathsf{context\_digest}$$, $$\mathsf{start\_position}$$, $$\mathsf{mine\_length}$$, $$\overrightarrow{v}$$).
|
||||||
7. If one of Blake2b hash output is smaller than a target value, the miner finds a legitimate PoRA solution.
|
7. If one of Blake2b hash output is smaller than a target value, the miner finds a legitimate PoRA solution.
|
||||||
|
|
||||||
<figure><img src="../../../.gitbook/assets/zg-storage-algorithm.png" alt=""><figcaption><p>Figure 1. Recall Position and Scratchpat Computation</p></figcaption></figure>
|
<figure><img src="../../.gitbook/assets/zg-storage-algorithm.png" alt=""><figcaption><p>Figure 1. Recall Position and Scratchpad Computation</p></figcaption></figure>
|
||||||
|
@ -5,12 +5,12 @@
|
|||||||
0G Storage is the storage layer for the ZeroGravity data availability (DA) system. The 0G Storage layer holds three important features:
|
0G Storage is the storage layer for the ZeroGravity data availability (DA) system. The 0G Storage layer holds three important features:
|
||||||
|
|
||||||
* Built-in - It is natively built into the ZeroGravity DA system for data storage and retrieval.
|
* Built-in - It is natively built into the ZeroGravity DA system for data storage and retrieval.
|
||||||
* General purpose - It is designed to support atomic transactions, mutable kv stores as well as archive log systems to enable wide range of applications with various data types.
|
* General purpose - It is designed to support atomic transactions, mutable kv stores as well as archive log systems to enable a wide range of applications with various data types.
|
||||||
* Incentive - Instead of being just a decentralized database, 0G Storage introduces PoRA mining algorithm to incentivize storage network participants.
|
* Incentive - Instead of being just a decentralized database, 0G Storage introduces PoRA mining algorithm to incentivize storage network participants.
|
||||||
|
|
||||||
## Integration
|
## Integration
|
||||||
|
|
||||||
We provide a [SDK](https://github.com/0glabs/0g-js-storage-sdk) for users to easily integrate 0G Storage in their applications with the following features:
|
We provide a [SDK](https://github.com/0glabs/0g-ts-sdk) for users to easily integrate 0G Storage in their applications with the following features:
|
||||||
|
|
||||||
* File Merkle Tree Class
|
* File Merkle Tree Class
|
||||||
* Flow Contract Types
|
* Flow Contract Types
|
||||||
@ -22,7 +22,7 @@ We provide a [SDK](https://github.com/0glabs/0g-js-storage-sdk) for users to eas
|
|||||||
|
|
||||||
## Deployment
|
## Deployment
|
||||||
|
|
||||||
Please refer to [Deployment](../0G%20Storage/doc/install.md) page for detailed steps to compile and start a 0G Storage node.
|
Please refer to [Deployment](run.md) page for detailed steps to compile and start a 0G Storage node.
|
||||||
|
|
||||||
## Test
|
## Test
|
||||||
|
|
||||||
|
@ -2,8 +2,8 @@
|
|||||||
|
|
||||||
0G Storage provides a Key-Value runtime upon the log layer. Each key-value node can access the key-value store state through the runtime interface. The key-value runtime provides the standard interface like `Put()` and `Get()`, and accepts serialized key-value pair from any application-specific structure. During the normal execution of the key-value store node, it maintains the latest key-value state locally. It updates the value of a key through `Put()` API which composes a log entry containing the updated key-value pair and appends to the log. The runtime constantly monitors the new log entries in the log and fetches them back to the key-value node and updates the local key-value state according to the log entry contents. In this sense, multiple key-value store nodes essentially synchronize with each other through the shared decentralized log.
|
0G Storage provides a Key-Value runtime upon the log layer. Each key-value node can access the key-value store state through the runtime interface. The key-value runtime provides the standard interface like `Put()` and `Get()`, and accepts serialized key-value pair from any application-specific structure. During the normal execution of the key-value store node, it maintains the latest key-value state locally. It updates the value of a key through `Put()` API which composes a log entry containing the updated key-value pair and appends to the log. The runtime constantly monitors the new log entries in the log and fetches them back to the key-value node and updates the local key-value state according to the log entry contents. In this sense, multiple key-value store nodes essentially synchronize with each other through the shared decentralized log.
|
||||||
|
|
||||||
A user-defined function will be used to deserialize the raw content in the log entry to the application-specific key-value structure. Application can use `Get()` API to access the latest value of a given key. To improve the efficiency of the updates for small key-value pairs, the `Put()` allows batched updates with multiple key-value pairs at once. Figure 1 illustrates the architecture of the decentralized key- value store. To manage the access control, the ownership information of each key can also be maintained in the log entries. All the honest key-value nodes follow the same update rule for the keys based on the ownership to achieve the state consistency.
|
A user-defined function will be used to deserialize the raw content in the log entry to the application-specific key-value structure. Application can use `Get()` API to access the latest value of a given key. To improve the efficiency of the updates for small key-value pairs, the `Put()` allows batched updates with multiple key-value pairs at once. Figure 1 illustrates the architecture of the decentralized key-value store. To manage the access control, the ownership information of each key can also be maintained in the log entries. All the honest key-value nodes follow the same update rule for the keys based on the ownership to achieve the state consistency.
|
||||||
|
|
||||||
When a new key-value node just joins the network, it connects to the log layer and plays the log entries from head to tail to construct the latest state of the key-value store. During the log entry playing, an application-specific key-value node can skip irrelevant log entries which do not contain stream IDs that it cares.
|
When a new key-value node just joins the network, it connects to the log layer and plays the log entries from head to tail to construct the latest state of the key-value store. During the log entry playing, an application-specific key-value node can skip irrelevant log entries which do not contain stream IDs that it cares.
|
||||||
|
|
||||||
<figure><img src="../../.gitbook/assets/zg-storage-log.png" alt=""><figcaption><p>Figure 1. Decentralized K-V Store</p></figcaption></figure>
|
<figure><img src="../.gitbook/assets/zg-storage-log.png" alt=""><figcaption><p>Figure 1. Decentralized K-V Store</p></figcaption></figure>
|
||||||
|
38
docs/onebox-test.md
Normal file
38
docs/onebox-test.md
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# One Box Test
|
||||||
|
|
||||||
|
0G storage node provides one box test framework for developers to verify system functionalities via RPC.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Requires python version: 3.8, 3.9 or 3.10, higher version is not guaranteed (e.g. failed to install `pysha3`).
|
||||||
|
- Install dependencies under root folder: `pip3 install -r requirements.txt`
|
||||||
|
|
||||||
|
## Install Blockchain Nodes
|
||||||
|
|
||||||
|
Python test framework will launch blockchain nodes at local machine for storage nodes to interact with. There are 3 kinds of blockchains available:
|
||||||
|
|
||||||
|
- 0G blockchain (by default).
|
||||||
|
- Conflux eSpace (for chain reorg test purpose).
|
||||||
|
- BSC node (geth).
|
||||||
|
|
||||||
|
The blockchain node binaries will be compiled or downloaded from github to `tests/tmp` folder automatically. Alternatively, developers could also manually copy binaries of specific version to the `tests/tmp` folder.
|
||||||
|
|
||||||
|
## Run Tests
|
||||||
|
|
||||||
|
Change to the `tests` folder and run the following command to run all tests:
|
||||||
|
|
||||||
|
```
|
||||||
|
python test_all.py
|
||||||
|
```
|
||||||
|
|
||||||
|
or, run any single test, e.g.
|
||||||
|
|
||||||
|
```
|
||||||
|
python example_test.py
|
||||||
|
```
|
||||||
|
|
||||||
|
*Note, please ensure blockchain nodes installed before running any single test, e.g. run all tests at first.*
|
||||||
|
|
||||||
|
## Add New Test
|
||||||
|
|
||||||
|
Please follow the `example_test.py` to add a new `xxx_test.py` file under `tests` folder.
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
### Setup Environment
|
### Setup Environment
|
||||||
|
|
||||||
Install dependencies Node.js, yarn, hardhat.
|
Install the dependencies: Node.js, yarn, hardhat.
|
||||||
|
|
||||||
- Linux
|
- Linux
|
||||||
|
|
||||||
|
@ -6,10 +6,10 @@
|
|||||||
|
|
||||||
When an application server linking with the 0G Storage key-value runtime starts a transaction using `BeginTx()` interface, it notifies the runtime that the transaction will work on the current state snapshot constructed by playing the log to the current tail. The further key-value operations before the invocation of `EndTx()` updates the key-values locally in the server without exposing the updates to the log. When `EndTx()` is invoked, the runtime composes a commit record containing the log position the transaction starts from and the read-write set of the transaction. This commit record is then appended to the log.
|
When an application server linking with the 0G Storage key-value runtime starts a transaction using `BeginTx()` interface, it notifies the runtime that the transaction will work on the current state snapshot constructed by playing the log to the current tail. The further key-value operations before the invocation of `EndTx()` updates the key-values locally in the server without exposing the updates to the log. When `EndTx()` is invoked, the runtime composes a commit record containing the log position the transaction starts from and the read-write set of the transaction. This commit record is then appended to the log.
|
||||||
|
|
||||||
When an application server with the key-value runtime encounters the commit record during playing the log, it identifies a conflict window consists of all the log entries between the start log position of the transaction and the position of the commit record. The log entries in the conflict window therefore contain the key-value operations concurrent with the transaction submitting the commit record. The runtime further detects whether these concurrent operations contain the updates on the keys that belong to the read set of the transaction. If yes, the transaction is aborted, otherwise committed successfully.
|
When an application server with the key-value runtime encounters the commit record during playing the log, it identifies a conflict window consisting of all the log entries between the start log position of the transaction and the position of the commit record. The log entries in the conflict window therefore contain the key-value operations concurrent with the transaction submitting the commit record. The runtime further detects whether these concurrent operations contain the updates on the keys belonging to the read set of the transaction. If yes, the transaction is aborted, otherwise committed successfully.
|
||||||
|
|
||||||
<figure><img src="../../.gitbook/assets/zg-storage-transaction.png" alt=""><figcaption><p>Figure 1. Transaction Processing on 0G K-V Store</p></figcaption></figure>
|
<figure><img src="../.gitbook/assets/zg-storage-transaction.png" alt=""><figcaption><p>Figure 1. Transaction Processing on 0G K-V Store</p></figcaption></figure>
|
||||||
|
|
||||||
## Concurrent Assumption
|
## Concurrent Assumption
|
||||||
|
|
||||||
This transaction model assumes that the transaction participants are collaborative and will honestly compose the commit record with the correct content. Although this assumption in a decentralized environment is too strong, it is still achievable for specific applications. For example, for an application like Google Docs, a user normally shares the access to others who can be trusted. In case this assumption cannot hold, the code of the transaction can be stored in ZeroGravity log and some mechanism of verifiable computation like zero-knowledge proof or hardware with trust execution environment (TEE) can be employed by the transaction executors to detect the validity of the commit record.
|
This transaction model assumes that the transaction participants are collaborative and will honestly compose the commit record with the correct content. Although this assumption in a decentralized environment is too strong, it is still achievable for specific applications. For example, for an application like Google Docs, a user normally shares the access to others who can be trusted. In case this assumption cannot hold, the code of the transaction can be stored in the ZeroGravity log and some mechanism of verifiable computation like zero-knowledge proof or hardware with trusted execution environment (TEE) can be employed by the transaction executors to detect the validity of the commit record.
|
||||||
|
@ -39,8 +39,16 @@ config = "0.14"
|
|||||||
public-ip = "0.2"
|
public-ip = "0.2"
|
||||||
ethers = "2.0.14"
|
ethers = "2.0.14"
|
||||||
metrics = { workspace = true }
|
metrics = { workspace = true }
|
||||||
|
rust-log = { package = "log", version = "0.4.22" }
|
||||||
|
tracing-core = "0.1.32"
|
||||||
|
tracing-log = "0.2.0"
|
||||||
|
console-subscriber = { version = "0.4.1", optional = true }
|
||||||
|
contract-wrapper = { path = "../common/contract-wrapper" }
|
||||||
|
|
||||||
[dependencies.libp2p]
|
[dependencies.libp2p]
|
||||||
version = "0.45.1"
|
version = "0.45.1"
|
||||||
default-features = true
|
default-features = true
|
||||||
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"]
|
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"]
|
||||||
|
|
||||||
|
[features]
|
||||||
|
tokio-console = ["console-subscriber"]
|
@ -13,7 +13,7 @@ enum SlotStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Sliding window is used to control the concurrent uploading process of a file.
|
/// Sliding window is used to control the concurrent uploading process of a file.
|
||||||
/// Bounded window allows segments to be uploaded concurrenly, while having a capacity
|
/// Bounded window allows segments to be uploaded concurrently, while having a capacity
|
||||||
/// limit on writing threads per file. Meanwhile, the left_boundary field records
|
/// limit on writing threads per file. Meanwhile, the left_boundary field records
|
||||||
/// how many segments have been uploaded.
|
/// how many segments have been uploaded.
|
||||||
struct CtrlWindow {
|
struct CtrlWindow {
|
||||||
@ -165,7 +165,7 @@ impl ChunkPoolWriteCtrl {
|
|||||||
|
|
||||||
if file_ctrl.total_segments != total_segments {
|
if file_ctrl.total_segments != total_segments {
|
||||||
bail!(
|
bail!(
|
||||||
"file size in segment doesn't match with file size declared in previous segment. Previous total segments:{}, current total segments:{}s",
|
"file size in segment doesn't match with file size declared in previous segment. Previous total segments:{}, current total segments:{}",
|
||||||
file_ctrl.total_segments,
|
file_ctrl.total_segments,
|
||||||
total_segments
|
total_segments
|
||||||
);
|
);
|
||||||
|
@ -297,10 +297,9 @@ impl FileLocationCache {
|
|||||||
INSERT_BATCH.update(announcement.tx_ids.len() as u64);
|
INSERT_BATCH.update(announcement.tx_ids.len() as u64);
|
||||||
|
|
||||||
let peer_id = *announcement.peer_id;
|
let peer_id = *announcement.peer_id;
|
||||||
// FIXME: Check validity.
|
let shard_config = match ShardConfig::try_from(announcement.shard_config) {
|
||||||
let shard_config = ShardConfig {
|
Ok(v) => v,
|
||||||
shard_id: announcement.shard_id,
|
Err(_) => return,
|
||||||
num_shard: announcement.num_shard,
|
|
||||||
};
|
};
|
||||||
self.insert_peer_config(peer_id, shard_config);
|
self.insert_peer_config(peer_id, shard_config);
|
||||||
|
|
||||||
@ -359,7 +358,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_annoucement_cache_peek_priority() {
|
fn test_announcement_cache_peek_priority() {
|
||||||
let mut cache = AnnouncementCache::new(100, 3600);
|
let mut cache = AnnouncementCache::new(100, 3600);
|
||||||
let now = timestamp_now();
|
let now = timestamp_now();
|
||||||
|
|
||||||
@ -383,7 +382,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_annoucement_cache_pop_len() {
|
fn test_announcement_cache_pop_len() {
|
||||||
let mut cache = AnnouncementCache::new(100, 3600);
|
let mut cache = AnnouncementCache::new(100, 3600);
|
||||||
let now = timestamp_now();
|
let now = timestamp_now();
|
||||||
|
|
||||||
@ -405,7 +404,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_annoucement_cache_garbage_collect() {
|
fn test_announcement_cache_garbage_collect() {
|
||||||
let mut cache = AnnouncementCache::new(100, 3600);
|
let mut cache = AnnouncementCache::new(100, 3600);
|
||||||
let now = timestamp_now();
|
let now = timestamp_now();
|
||||||
|
|
||||||
@ -423,7 +422,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_annoucement_cache_insert_gc() {
|
fn test_announcement_cache_insert_gc() {
|
||||||
let mut cache = AnnouncementCache::new(100, 3600);
|
let mut cache = AnnouncementCache::new(100, 3600);
|
||||||
let now = timestamp_now();
|
let now = timestamp_now();
|
||||||
|
|
||||||
@ -439,7 +438,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_annoucement_cache_insert_ignore_older() {
|
fn test_announcement_cache_insert_ignore_older() {
|
||||||
let mut cache = AnnouncementCache::new(100, 3600);
|
let mut cache = AnnouncementCache::new(100, 3600);
|
||||||
let now = timestamp_now();
|
let now = timestamp_now();
|
||||||
|
|
||||||
@ -462,7 +461,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_annoucement_cache_insert_overwrite() {
|
fn test_announcement_cache_insert_overwrite() {
|
||||||
let mut cache = AnnouncementCache::new(100, 3600);
|
let mut cache = AnnouncementCache::new(100, 3600);
|
||||||
let now = timestamp_now();
|
let now = timestamp_now();
|
||||||
|
|
||||||
@ -480,7 +479,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_annoucement_cache_insert_cap_exceeded() {
|
fn test_announcement_cache_insert_cap_exceeded() {
|
||||||
let mut cache = AnnouncementCache::new(3, 3600);
|
let mut cache = AnnouncementCache::new(3, 3600);
|
||||||
let now = timestamp_now();
|
let now = timestamp_now();
|
||||||
|
|
||||||
@ -500,7 +499,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_annoucement_cache_random() {
|
fn test_announcement_cache_random() {
|
||||||
let mut cache = AnnouncementCache::new(100, 3600);
|
let mut cache = AnnouncementCache::new(100, 3600);
|
||||||
let now = timestamp_now();
|
let now = timestamp_now();
|
||||||
|
|
||||||
@ -516,7 +515,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_annoucement_cache_all() {
|
fn test_announcement_cache_all() {
|
||||||
let mut cache = AnnouncementCache::new(100, 3600);
|
let mut cache = AnnouncementCache::new(100, 3600);
|
||||||
let now = timestamp_now();
|
let now = timestamp_now();
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use network::{
|
use network::{
|
||||||
libp2p::identity,
|
libp2p::identity,
|
||||||
types::{AnnounceFile, SignedAnnounceFile, SignedMessage},
|
types::{AnnounceFile, SignedAnnounceFile, SignedMessage, TimedMessage},
|
||||||
Multiaddr, PeerId,
|
Multiaddr, PeerId,
|
||||||
};
|
};
|
||||||
use shared_types::{timestamp_now, TxID};
|
use shared_types::{timestamp_now, TxID};
|
||||||
@ -34,12 +34,13 @@ impl AnnounceFileBuilder {
|
|||||||
let at: Multiaddr = "/ip4/127.0.0.1/tcp/10000".parse().unwrap();
|
let at: Multiaddr = "/ip4/127.0.0.1/tcp/10000".parse().unwrap();
|
||||||
let timestamp = self.timestamp.unwrap_or_else(timestamp_now);
|
let timestamp = self.timestamp.unwrap_or_else(timestamp_now);
|
||||||
|
|
||||||
let msg = AnnounceFile {
|
let msg = TimedMessage {
|
||||||
tx_ids: vec![tx_id],
|
inner: AnnounceFile {
|
||||||
num_shard: 1,
|
tx_ids: vec![tx_id],
|
||||||
shard_id: 0,
|
shard_config: Default::default(),
|
||||||
peer_id: peer_id.into(),
|
peer_id: peer_id.into(),
|
||||||
at: at.into(),
|
at: at.into(),
|
||||||
|
},
|
||||||
timestamp,
|
timestamp,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ pub struct LogSyncConfig {
|
|||||||
// blockchain provider retry params
|
// blockchain provider retry params
|
||||||
// the number of retries after a connection times out
|
// the number of retries after a connection times out
|
||||||
pub rate_limit_retries: u32,
|
pub rate_limit_retries: u32,
|
||||||
// the nubmer of retries for rate limited responses
|
// the number of retries for rate limited responses
|
||||||
pub timeout_retries: u32,
|
pub timeout_retries: u32,
|
||||||
// the duration to wait before retry, in ms
|
// the duration to wait before retry, in ms
|
||||||
pub initial_backoff: u64,
|
pub initial_backoff: u64,
|
||||||
|
@ -14,7 +14,7 @@ use thiserror::Error;
|
|||||||
pub(crate) type PinBoxFut<'a, T> =
|
pub(crate) type PinBoxFut<'a, T> =
|
||||||
Pin<Box<dyn Future<Output = Result<T, ProviderError>> + Send + 'a>>;
|
Pin<Box<dyn Future<Output = Result<T, ProviderError>> + Send + 'a>>;
|
||||||
|
|
||||||
const TOO_MANY_LOGS_ERROR_MSG: [&str; 2] = ["query returned more than", "too large with more than"];
|
const TOO_MANY_LOGS_ERROR_MSG: [&str; 2] = ["exceeds the max limit of", "too large with more than"];
|
||||||
|
|
||||||
/// A log query provides streaming access to historical logs via a paginated
|
/// A log query provides streaming access to historical logs via a paginated
|
||||||
/// request. For streaming access to future logs, use [`Middleware::watch`] or
|
/// request. For streaming access to future logs, use [`Middleware::watch`] or
|
||||||
|
@ -10,6 +10,7 @@ zgs_spec = { path = "../../common/spec" }
|
|||||||
zgs_seal = { path = "../../common/zgs_seal" }
|
zgs_seal = { path = "../../common/zgs_seal" }
|
||||||
task_executor = { path = "../../common/task_executor" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
contract-interface = { path = "../../common/contract-interface" }
|
contract-interface = { path = "../../common/contract-interface" }
|
||||||
|
contract-wrapper = { path = "../../common/contract-wrapper" }
|
||||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||||
ethereum-types = "0.14"
|
ethereum-types = "0.14"
|
||||||
tokio = { version = "1.19.2", features = ["full"] }
|
tokio = { version = "1.19.2", features = ["full"] }
|
||||||
|
@ -2,7 +2,8 @@ use std::str::FromStr;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use ethereum_types::{Address, H256, U256};
|
use contract_wrapper::SubmitConfig;
|
||||||
|
use ethereum_types::{Address, H256};
|
||||||
use ethers::core::k256::SecretKey;
|
use ethers::core::k256::SecretKey;
|
||||||
use ethers::middleware::SignerMiddleware;
|
use ethers::middleware::SignerMiddleware;
|
||||||
use ethers::providers::Http;
|
use ethers::providers::Http;
|
||||||
@ -21,7 +22,6 @@ pub struct MinerConfig {
|
|||||||
pub(crate) rpc_endpoint_url: String,
|
pub(crate) rpc_endpoint_url: String,
|
||||||
pub(crate) mine_address: Address,
|
pub(crate) mine_address: Address,
|
||||||
pub(crate) flow_address: Address,
|
pub(crate) flow_address: Address,
|
||||||
pub(crate) submission_gas: Option<U256>,
|
|
||||||
pub(crate) cpu_percentage: u64,
|
pub(crate) cpu_percentage: u64,
|
||||||
pub(crate) iter_batch: usize,
|
pub(crate) iter_batch: usize,
|
||||||
pub(crate) shard_config: ShardConfig,
|
pub(crate) shard_config: ShardConfig,
|
||||||
@ -29,6 +29,7 @@ pub struct MinerConfig {
|
|||||||
pub(crate) rate_limit_retries: u32,
|
pub(crate) rate_limit_retries: u32,
|
||||||
pub(crate) timeout_retries: u32,
|
pub(crate) timeout_retries: u32,
|
||||||
pub(crate) initial_backoff: u64,
|
pub(crate) initial_backoff: u64,
|
||||||
|
pub(crate) submission_config: SubmitConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type MineServiceMiddleware = SignerMiddleware<Arc<Provider<RetryClient<Http>>>, LocalWallet>;
|
pub type MineServiceMiddleware = SignerMiddleware<Arc<Provider<RetryClient<Http>>>, LocalWallet>;
|
||||||
@ -41,7 +42,6 @@ impl MinerConfig {
|
|||||||
rpc_endpoint_url: String,
|
rpc_endpoint_url: String,
|
||||||
mine_address: Address,
|
mine_address: Address,
|
||||||
flow_address: Address,
|
flow_address: Address,
|
||||||
submission_gas: Option<U256>,
|
|
||||||
cpu_percentage: u64,
|
cpu_percentage: u64,
|
||||||
iter_batch: usize,
|
iter_batch: usize,
|
||||||
context_query_seconds: u64,
|
context_query_seconds: u64,
|
||||||
@ -49,6 +49,7 @@ impl MinerConfig {
|
|||||||
rate_limit_retries: u32,
|
rate_limit_retries: u32,
|
||||||
timeout_retries: u32,
|
timeout_retries: u32,
|
||||||
initial_backoff: u64,
|
initial_backoff: u64,
|
||||||
|
submission_config: SubmitConfig,
|
||||||
) -> Option<MinerConfig> {
|
) -> Option<MinerConfig> {
|
||||||
miner_key.map(|miner_key| MinerConfig {
|
miner_key.map(|miner_key| MinerConfig {
|
||||||
miner_id,
|
miner_id,
|
||||||
@ -56,7 +57,6 @@ impl MinerConfig {
|
|||||||
rpc_endpoint_url,
|
rpc_endpoint_url,
|
||||||
mine_address,
|
mine_address,
|
||||||
flow_address,
|
flow_address,
|
||||||
submission_gas,
|
|
||||||
cpu_percentage,
|
cpu_percentage,
|
||||||
iter_batch,
|
iter_batch,
|
||||||
shard_config,
|
shard_config,
|
||||||
@ -64,6 +64,7 @@ impl MinerConfig {
|
|||||||
rate_limit_retries,
|
rate_limit_retries,
|
||||||
timeout_retries,
|
timeout_retries,
|
||||||
initial_backoff,
|
initial_backoff,
|
||||||
|
submission_config,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use contract_interface::zgs_flow::MineContext;
|
use contract_interface::pora_mine::MineContext;
|
||||||
use ethereum_types::{H256, U256};
|
use ethereum_types::{H256, U256};
|
||||||
use rand::{self, Rng};
|
use rand::{self, Rng};
|
||||||
use std::time;
|
use std::time;
|
||||||
@ -35,16 +35,23 @@ pub struct PoraService {
|
|||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub(super) struct PoraPuzzle {
|
pub(super) struct PoraPuzzle {
|
||||||
context: MineContext,
|
context: MineContext,
|
||||||
target_quality: U256,
|
pora_target: U256,
|
||||||
max_shards: u64,
|
max_shards: u64,
|
||||||
|
subtask_digest: H256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PoraPuzzle {
|
impl PoraPuzzle {
|
||||||
pub fn new(context: MineContext, target_quality: U256, max_shards: u64) -> Self {
|
pub fn new(
|
||||||
|
context: MineContext,
|
||||||
|
pora_target: U256,
|
||||||
|
max_shards: u64,
|
||||||
|
subtask_digest: H256,
|
||||||
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
context,
|
context,
|
||||||
target_quality,
|
pora_target,
|
||||||
max_shards,
|
max_shards,
|
||||||
|
subtask_digest,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -255,7 +262,8 @@ impl PoraService {
|
|||||||
miner_id: &self.miner_id,
|
miner_id: &self.miner_id,
|
||||||
mine_range_config: &self.mine_range,
|
mine_range_config: &self.mine_range,
|
||||||
context: &puzzle.context,
|
context: &puzzle.context,
|
||||||
target_quality: &puzzle.target_quality,
|
subtask_digest: &puzzle.subtask_digest,
|
||||||
|
pora_target: &puzzle.pora_target,
|
||||||
loader: &*self.loader,
|
loader: &*self.loader,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -57,7 +57,7 @@ pub(crate) async fn check_and_request_miner_id(
|
|||||||
}
|
}
|
||||||
(None, None) => {
|
(None, None) => {
|
||||||
let beneficiary = provider.address();
|
let beneficiary = provider.address();
|
||||||
let id = request_miner_id(&mine_contract, beneficiary).await?;
|
let id = request_miner_id(config, &mine_contract, beneficiary).await?;
|
||||||
set_miner_id(store, &id)
|
set_miner_id(store, &id)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("set miner id on db corrupt: {:?}", e))?;
|
.map_err(|e| format!("set miner id on db corrupt: {:?}", e))?;
|
||||||
@ -86,6 +86,7 @@ async fn check_miner_id(
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn request_miner_id(
|
async fn request_miner_id(
|
||||||
|
config: &MinerConfig,
|
||||||
mine_contract: &PoraMine<MineServiceMiddleware>,
|
mine_contract: &PoraMine<MineServiceMiddleware>,
|
||||||
beneficiary: Address,
|
beneficiary: Address,
|
||||||
) -> Result<H256, String> {
|
) -> Result<H256, String> {
|
||||||
@ -94,16 +95,13 @@ async fn request_miner_id(
|
|||||||
let submission_call: ContractCall<_, _> =
|
let submission_call: ContractCall<_, _> =
|
||||||
mine_contract.request_miner_id(beneficiary, 0).legacy();
|
mine_contract.request_miner_id(beneficiary, 0).legacy();
|
||||||
|
|
||||||
let pending_tx = submission_call
|
let receipt = contract_wrapper::submit_with_retry(
|
||||||
.send()
|
submission_call,
|
||||||
.await
|
&config.submission_config,
|
||||||
.map_err(|e| format!("Fail to request miner id: {:?}", e))?;
|
mine_contract.client().clone(),
|
||||||
|
)
|
||||||
let receipt = pending_tx
|
.await
|
||||||
.retries(3)
|
.map_err(|e| format!("Fail to submit miner id request: {:?}", e))?;
|
||||||
.await
|
|
||||||
.map_err(|e| format!("Fail to execute mine answer transaction: {:?}", e))?
|
|
||||||
.ok_or("Request miner id transaction dropped after 3 retries")?;
|
|
||||||
|
|
||||||
let first_log = receipt
|
let first_log = receipt
|
||||||
.logs
|
.logs
|
||||||
|
@ -2,8 +2,9 @@ use super::metrics::*;
|
|||||||
use crate::recall_range::RecallRange;
|
use crate::recall_range::RecallRange;
|
||||||
use crate::{MineRangeConfig, PoraLoader};
|
use crate::{MineRangeConfig, PoraLoader};
|
||||||
use blake2::{Blake2b512, Digest};
|
use blake2::{Blake2b512, Digest};
|
||||||
use contract_interface::zgs_flow::MineContext;
|
use contract_interface::pora_mine::MineContext;
|
||||||
use ethereum_types::{H256, U256};
|
use ethereum_types::{H256, U256};
|
||||||
|
use ethers::utils::keccak256;
|
||||||
use lighthouse_metrics::inc_counter;
|
use lighthouse_metrics::inc_counter;
|
||||||
use storage::log_store::MineLoadChunk;
|
use storage::log_store::MineLoadChunk;
|
||||||
use tiny_keccak::{Hasher, Keccak};
|
use tiny_keccak::{Hasher, Keccak};
|
||||||
@ -24,7 +25,8 @@ pub(crate) struct Miner<'a> {
|
|||||||
pub range: RecallRange,
|
pub range: RecallRange,
|
||||||
pub miner_id: &'a H256,
|
pub miner_id: &'a H256,
|
||||||
pub context: &'a MineContext,
|
pub context: &'a MineContext,
|
||||||
pub target_quality: &'a U256,
|
pub subtask_digest: &'a H256,
|
||||||
|
pub pora_target: &'a U256,
|
||||||
pub loader: &'a dyn PoraLoader,
|
pub loader: &'a dyn PoraLoader,
|
||||||
pub mine_range_config: &'a MineRangeConfig,
|
pub mine_range_config: &'a MineRangeConfig,
|
||||||
}
|
}
|
||||||
@ -106,11 +108,11 @@ impl<'a> Miner<'a> {
|
|||||||
.range
|
.range
|
||||||
.difficulty_scale_x64(self.context.flow_length.as_u64());
|
.difficulty_scale_x64(self.context.flow_length.as_u64());
|
||||||
|
|
||||||
if quality <= (self.target_quality / difficulty_scale_x64) << 64 {
|
if quality <= (self.pora_target / difficulty_scale_x64) << 64 {
|
||||||
debug!(
|
debug!(
|
||||||
"Find a PoRA valid answer, quality: {}, target_quality {}, scale {:.3}",
|
"Find a PoRA valid answer, quality: {}, pora_target {}, scale {:.3}",
|
||||||
U256::MAX / quality,
|
U256::MAX / quality,
|
||||||
U256::MAX / self.target_quality,
|
U256::MAX / self.pora_target,
|
||||||
difficulty_scale_x64.as_u128() as f64 / u64::MAX as f64
|
difficulty_scale_x64.as_u128() as f64 / u64::MAX as f64
|
||||||
);
|
);
|
||||||
inc_counter(&HIT_COUNT);
|
inc_counter(&HIT_COUNT);
|
||||||
@ -138,7 +140,7 @@ impl<'a> Miner<'a> {
|
|||||||
let mut hasher = Blake2b512::new();
|
let mut hasher = Blake2b512::new();
|
||||||
hasher.update(self.miner_id);
|
hasher.update(self.miner_id);
|
||||||
hasher.update(nonce);
|
hasher.update(nonce);
|
||||||
hasher.update(self.context.digest);
|
hasher.update(self.subtask_digest);
|
||||||
hasher.update(self.range.digest());
|
hasher.update(self.range.digest());
|
||||||
hasher.finalize().into()
|
hasher.finalize().into()
|
||||||
};
|
};
|
||||||
@ -148,7 +150,11 @@ impl<'a> Miner<'a> {
|
|||||||
let mut scratch_pad =
|
let mut scratch_pad =
|
||||||
[[0u8; BLAKE2B_OUTPUT_BYTES]; BYTES_PER_SCRATCHPAD / BLAKE2B_OUTPUT_BYTES];
|
[[0u8; BLAKE2B_OUTPUT_BYTES]; BYTES_PER_SCRATCHPAD / BLAKE2B_OUTPUT_BYTES];
|
||||||
for scratch_pad_cell in scratch_pad.iter_mut() {
|
for scratch_pad_cell in scratch_pad.iter_mut() {
|
||||||
digest = Blake2b512::new().chain_update(digest).finalize().into();
|
let output0 = keccak256(digest);
|
||||||
|
digest[..32].copy_from_slice(&output0);
|
||||||
|
let output1 = keccak256(digest);
|
||||||
|
digest[32..].copy_from_slice(&output1);
|
||||||
|
|
||||||
*scratch_pad_cell = digest;
|
*scratch_pad_cell = digest;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ impl Sealer {
|
|||||||
|
|
||||||
async fn update_flow_length(&mut self) -> Result<()> {
|
async fn update_flow_length(&mut self) -> Result<()> {
|
||||||
let recent_context = self.flow_contract.make_context_with_result().call().await?;
|
let recent_context = self.flow_contract.make_context_with_result().call().await?;
|
||||||
debug!(target: "seal", "Recent context is {:?}", recent_context);
|
debug!("Recent context is {:?}", recent_context);
|
||||||
|
|
||||||
let recent_flow_length = recent_context.flow_length.as_u64();
|
let recent_flow_length = recent_context.flow_length.as_u64();
|
||||||
if self.last_context_flow_length < recent_flow_length {
|
if self.last_context_flow_length < recent_flow_length {
|
||||||
|
@ -46,6 +46,7 @@ impl MineService {
|
|||||||
msg_recv.resubscribe(),
|
msg_recv.resubscribe(),
|
||||||
provider.clone(),
|
provider.clone(),
|
||||||
&config,
|
&config,
|
||||||
|
miner_id,
|
||||||
);
|
);
|
||||||
|
|
||||||
let mine_answer_receiver = PoraService::spawn(
|
let mine_answer_receiver = PoraService::spawn(
|
||||||
|
@ -1,13 +1,11 @@
|
|||||||
use contract_interface::PoraAnswer;
|
use contract_interface::PoraAnswer;
|
||||||
use contract_interface::{PoraMine, ZgsFlow};
|
use contract_interface::{PoraMine, ZgsFlow};
|
||||||
use ethereum_types::U256;
|
use contract_wrapper::SubmitConfig;
|
||||||
use ethers::contract::ContractCall;
|
use ethers::contract::ContractCall;
|
||||||
use ethers::prelude::{Http, Provider, RetryClient};
|
use ethers::prelude::{Http, Provider, RetryClient};
|
||||||
use ethers::providers::PendingTransaction;
|
|
||||||
use hex::ToHex;
|
use hex::ToHex;
|
||||||
use shared_types::FlowRangeProof;
|
use shared_types::FlowRangeProof;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
|
||||||
use storage::H256;
|
use storage::H256;
|
||||||
use storage_async::Store;
|
use storage_async::Store;
|
||||||
use task_executor::TaskExecutor;
|
use task_executor::TaskExecutor;
|
||||||
@ -19,15 +17,13 @@ use crate::watcher::MineContextMessage;
|
|||||||
|
|
||||||
use zgs_spec::{BYTES_PER_SEAL, SECTORS_PER_SEAL};
|
use zgs_spec::{BYTES_PER_SEAL, SECTORS_PER_SEAL};
|
||||||
|
|
||||||
const SUBMISSION_RETRIES: usize = 15;
|
|
||||||
|
|
||||||
pub struct Submitter {
|
pub struct Submitter {
|
||||||
mine_answer_receiver: mpsc::UnboundedReceiver<AnswerWithoutProof>,
|
mine_answer_receiver: mpsc::UnboundedReceiver<AnswerWithoutProof>,
|
||||||
mine_context_receiver: broadcast::Receiver<MineContextMessage>,
|
mine_context_receiver: broadcast::Receiver<MineContextMessage>,
|
||||||
mine_contract: PoraMine<MineServiceMiddleware>,
|
mine_contract: PoraMine<MineServiceMiddleware>,
|
||||||
flow_contract: ZgsFlow<Provider<RetryClient<Http>>>,
|
flow_contract: ZgsFlow<Provider<RetryClient<Http>>>,
|
||||||
default_gas_limit: Option<U256>,
|
|
||||||
store: Arc<Store>,
|
store: Arc<Store>,
|
||||||
|
config: SubmitConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Submitter {
|
impl Submitter {
|
||||||
@ -41,8 +37,7 @@ impl Submitter {
|
|||||||
config: &MinerConfig,
|
config: &MinerConfig,
|
||||||
) {
|
) {
|
||||||
let mine_contract = PoraMine::new(config.mine_address, signing_provider);
|
let mine_contract = PoraMine::new(config.mine_address, signing_provider);
|
||||||
let flow_contract = ZgsFlow::new(config.flow_address, provider);
|
let flow_contract = ZgsFlow::new(config.flow_address, provider.clone());
|
||||||
let default_gas_limit = config.submission_gas;
|
|
||||||
|
|
||||||
let submitter = Submitter {
|
let submitter = Submitter {
|
||||||
mine_answer_receiver,
|
mine_answer_receiver,
|
||||||
@ -50,7 +45,7 @@ impl Submitter {
|
|||||||
mine_contract,
|
mine_contract,
|
||||||
flow_contract,
|
flow_contract,
|
||||||
store,
|
store,
|
||||||
default_gas_limit,
|
config: config.submission_config,
|
||||||
};
|
};
|
||||||
executor.spawn(
|
executor.spawn(
|
||||||
async move { Box::pin(submitter.start()).await },
|
async move { Box::pin(submitter.start()).await },
|
||||||
@ -134,11 +129,7 @@ impl Submitter {
|
|||||||
};
|
};
|
||||||
trace!("submit_answer: answer={:?}", answer);
|
trace!("submit_answer: answer={:?}", answer);
|
||||||
|
|
||||||
let mut submission_call: ContractCall<_, _> = self.mine_contract.submit(answer).legacy();
|
let submission_call: ContractCall<_, _> = self.mine_contract.submit(answer).legacy();
|
||||||
|
|
||||||
if let Some(gas_limit) = self.default_gas_limit {
|
|
||||||
submission_call = submission_call.gas(gas_limit);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(calldata) = submission_call.calldata() {
|
if let Some(calldata) = submission_call.calldata() {
|
||||||
debug!(
|
debug!(
|
||||||
@ -153,27 +144,13 @@ impl Submitter {
|
|||||||
submission_call.estimate_gas().await
|
submission_call.estimate_gas().await
|
||||||
);
|
);
|
||||||
|
|
||||||
let pending_transaction: PendingTransaction<'_, _> = submission_call
|
contract_wrapper::submit_with_retry(
|
||||||
.send()
|
submission_call,
|
||||||
.await
|
&self.config,
|
||||||
.map_err(|e| format!("Fail to send PoRA submission transaction: {:?}", e))?;
|
self.mine_contract.client().clone(),
|
||||||
|
)
|
||||||
debug!(
|
.await
|
||||||
"Signed submission transaction hash: {:?}",
|
.map_err(|e| format!("Failed to submit mine answer: {:?}", e))?;
|
||||||
pending_transaction.tx_hash()
|
|
||||||
);
|
|
||||||
|
|
||||||
let receipt = pending_transaction
|
|
||||||
.retries(SUBMISSION_RETRIES)
|
|
||||||
.interval(Duration::from_secs(2))
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("Fail to execute PoRA submission transaction: {:?}", e))?
|
|
||||||
.ok_or(format!(
|
|
||||||
"PoRA submission transaction dropped after {} retries",
|
|
||||||
SUBMISSION_RETRIES
|
|
||||||
))?;
|
|
||||||
|
|
||||||
info!("Submit PoRA success, receipt: {:?}", receipt);
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#![allow(unused)]
|
#![allow(unused)]
|
||||||
|
|
||||||
use contract_interface::{zgs_flow::MineContext, PoraMine, ZgsFlow};
|
use contract_interface::{zgs_flow::MineContext, PoraMine, WorkerContext, ZgsFlow};
|
||||||
use ethereum_types::{Address, H256, U256};
|
use ethereum_types::{Address, H256, U256};
|
||||||
use ethers::{
|
use ethers::{
|
||||||
contract::Contract,
|
contract::Contract,
|
||||||
@ -28,6 +28,8 @@ lazy_static! {
|
|||||||
H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap();
|
H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const PORA_VERSION: u64 = 1;
|
||||||
|
|
||||||
pub struct MineContextWatcher {
|
pub struct MineContextWatcher {
|
||||||
provider: Arc<Provider<RetryClient<Http>>>,
|
provider: Arc<Provider<RetryClient<Http>>>,
|
||||||
flow_contract: ZgsFlow<Provider<RetryClient<Http>>>,
|
flow_contract: ZgsFlow<Provider<RetryClient<Http>>>,
|
||||||
@ -36,6 +38,7 @@ pub struct MineContextWatcher {
|
|||||||
mine_context_sender: broadcast::Sender<MineContextMessage>,
|
mine_context_sender: broadcast::Sender<MineContextMessage>,
|
||||||
last_report: MineContextMessage,
|
last_report: MineContextMessage,
|
||||||
query_interval: Duration,
|
query_interval: Duration,
|
||||||
|
miner_id: H256,
|
||||||
|
|
||||||
msg_recv: broadcast::Receiver<MinerMessage>,
|
msg_recv: broadcast::Receiver<MinerMessage>,
|
||||||
}
|
}
|
||||||
@ -46,6 +49,7 @@ impl MineContextWatcher {
|
|||||||
msg_recv: broadcast::Receiver<MinerMessage>,
|
msg_recv: broadcast::Receiver<MinerMessage>,
|
||||||
provider: Arc<Provider<RetryClient<Http>>>,
|
provider: Arc<Provider<RetryClient<Http>>>,
|
||||||
config: &MinerConfig,
|
config: &MinerConfig,
|
||||||
|
miner_id: H256,
|
||||||
) -> broadcast::Receiver<MineContextMessage> {
|
) -> broadcast::Receiver<MineContextMessage> {
|
||||||
let mine_contract = PoraMine::new(config.mine_address, provider.clone());
|
let mine_contract = PoraMine::new(config.mine_address, provider.clone());
|
||||||
let flow_contract = ZgsFlow::new(config.flow_address, provider.clone());
|
let flow_contract = ZgsFlow::new(config.flow_address, provider.clone());
|
||||||
@ -60,6 +64,7 @@ impl MineContextWatcher {
|
|||||||
msg_recv,
|
msg_recv,
|
||||||
last_report: None,
|
last_report: None,
|
||||||
query_interval: config.context_query_interval,
|
query_interval: config.context_query_interval,
|
||||||
|
miner_id,
|
||||||
};
|
};
|
||||||
executor.spawn(
|
executor.spawn(
|
||||||
async move { Box::pin(watcher.start()).await },
|
async move { Box::pin(watcher.start()).await },
|
||||||
@ -105,28 +110,14 @@ impl MineContextWatcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn query_recent_context(&mut self) -> Result<(), String> {
|
async fn query_recent_context(&mut self) -> Result<(), String> {
|
||||||
let context_call = self.flow_contract.make_context_with_result();
|
let report = self.fetch_pora_puzzle().await?;
|
||||||
let valid_call = self.mine_contract.can_submit();
|
|
||||||
let quality_call = self.mine_contract.pora_target();
|
|
||||||
let shards_call = self.mine_contract.max_shards();
|
|
||||||
|
|
||||||
let (context, can_submit, quality, max_shards) = try_join!(
|
|
||||||
context_call.call(),
|
|
||||||
valid_call.call(),
|
|
||||||
quality_call.call(),
|
|
||||||
shards_call.call()
|
|
||||||
)
|
|
||||||
.map_err(|e| format!("Failed to query mining context: {:?}", e))?;
|
|
||||||
let report = if can_submit && context.digest != EMPTY_HASH.0 {
|
|
||||||
Some(PoraPuzzle::new(context, quality, max_shards))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
if report == self.last_report {
|
if report == self.last_report {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
debug!("Update pora puzzle: {:?}", report);
|
||||||
|
|
||||||
self.mine_context_sender
|
self.mine_context_sender
|
||||||
.send(report.clone())
|
.send(report.clone())
|
||||||
.map_err(|e| format!("Failed to send out the most recent mine context: {:?}", e))?;
|
.map_err(|e| format!("Failed to send out the most recent mine context: {:?}", e))?;
|
||||||
@ -134,4 +125,41 @@ impl MineContextWatcher {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn fetch_pora_puzzle(&self) -> Result<Option<PoraPuzzle>, String> {
|
||||||
|
let pora_version = self
|
||||||
|
.mine_contract
|
||||||
|
.pora_version()
|
||||||
|
.call()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to query mining version: {:?}", e))?;
|
||||||
|
|
||||||
|
if pora_version != PORA_VERSION {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let miner_id = self.miner_id.0;
|
||||||
|
let WorkerContext {
|
||||||
|
context,
|
||||||
|
pora_target,
|
||||||
|
subtask_digest,
|
||||||
|
max_shards,
|
||||||
|
} = self
|
||||||
|
.mine_contract
|
||||||
|
.compute_worker_context(miner_id)
|
||||||
|
.call()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to query mining context: {:?}", e))?;
|
||||||
|
|
||||||
|
if pora_target.is_zero() || context.digest == EMPTY_HASH.0 {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Some(PoraPuzzle::new(
|
||||||
|
context,
|
||||||
|
pora_target,
|
||||||
|
max_shards,
|
||||||
|
H256(subtask_digest),
|
||||||
|
)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -13,141 +13,34 @@ use tokio_util::time::delay_queue::{DelayQueue, Key};
|
|||||||
/// messages are ignored. This behaviour can be changed using `GossipCacheBuilder::default_timeout`
|
/// messages are ignored. This behaviour can be changed using `GossipCacheBuilder::default_timeout`
|
||||||
/// to apply the same delay to every kind. Individual timeouts for specific kinds can be set and
|
/// to apply the same delay to every kind. Individual timeouts for specific kinds can be set and
|
||||||
/// will overwrite the default_timeout if present.
|
/// will overwrite the default_timeout if present.
|
||||||
|
#[derive(Default)]
|
||||||
pub struct GossipCache {
|
pub struct GossipCache {
|
||||||
/// Expire timeouts for each topic-msg pair.
|
/// Expire timeouts for each topic-msg pair.
|
||||||
expirations: DelayQueue<(GossipTopic, Vec<u8>)>,
|
expirations: DelayQueue<(GossipTopic, Vec<u8>)>,
|
||||||
/// Messages cached for each topic.
|
/// Messages cached for each topic.
|
||||||
topic_msgs: HashMap<GossipTopic, HashMap<Vec<u8>, Key>>,
|
topic_msgs: HashMap<GossipTopic, HashMap<Vec<u8>, Key>>,
|
||||||
/// Timeout for Example messages.
|
|
||||||
example: Option<Duration>,
|
|
||||||
/// Timeout for NewFile messages.
|
|
||||||
new_file: Option<Duration>,
|
|
||||||
/// Timeout for FindFile messages.
|
|
||||||
find_file: Option<Duration>,
|
|
||||||
/// Timeout for FindChunks messages.
|
|
||||||
find_chunks: Option<Duration>,
|
|
||||||
/// Timeout for AnnounceFile.
|
|
||||||
announce_file: Option<Duration>,
|
|
||||||
/// Timeout for AnnounceChunks.
|
|
||||||
announce_chunks: Option<Duration>,
|
|
||||||
/// Timeout for AnnounceShardConfig.
|
|
||||||
announce_shard_config: Option<Duration>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct GossipCacheBuilder {
|
|
||||||
default_timeout: Option<Duration>,
|
default_timeout: Option<Duration>,
|
||||||
/// Timeout for Example messages.
|
/// Timeout for pubsub messages.
|
||||||
example: Option<Duration>,
|
timeouts: HashMap<GossipKind, Duration>,
|
||||||
/// Timeout for NewFile messages.
|
|
||||||
new_file: Option<Duration>,
|
|
||||||
/// Timeout for blocks FindFile messages.
|
|
||||||
find_file: Option<Duration>,
|
|
||||||
/// Timeout for blocks FindChunks messages.
|
|
||||||
find_chunks: Option<Duration>,
|
|
||||||
/// Timeout for AnnounceFile messages.
|
|
||||||
announce_file: Option<Duration>,
|
|
||||||
/// Timeout for AnnounceChunks messages.
|
|
||||||
announce_chunks: Option<Duration>,
|
|
||||||
/// Timeout for AnnounceShardConfig messages.
|
|
||||||
announce_shard_config: Option<Duration>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
impl GossipCacheBuilder {
|
|
||||||
/// By default, all timeouts all disabled. Setting a default timeout will enable all timeout
|
|
||||||
/// that are not already set.
|
|
||||||
pub fn default_timeout(mut self, timeout: Duration) -> Self {
|
|
||||||
self.default_timeout = Some(timeout);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Timeout for Example messages.
|
|
||||||
pub fn example_timeout(mut self, timeout: Duration) -> Self {
|
|
||||||
self.example = Some(timeout);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Timeout for NewFile messages.
|
|
||||||
pub fn new_file_timeout(mut self, timeout: Duration) -> Self {
|
|
||||||
self.new_file = Some(timeout);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Timeout for FindFile messages.
|
|
||||||
pub fn find_file_timeout(mut self, timeout: Duration) -> Self {
|
|
||||||
self.find_file = Some(timeout);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Timeout for FindChunks messages.
|
|
||||||
pub fn find_chunks_timeout(mut self, timeout: Duration) -> Self {
|
|
||||||
self.find_chunks = Some(timeout);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Timeout for AnnounceFile messages.
|
|
||||||
pub fn announce_file_timeout(mut self, timeout: Duration) -> Self {
|
|
||||||
self.announce_file = Some(timeout);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Timeout for AnnounceChunks messages.
|
|
||||||
pub fn announce_chunks_timeout(mut self, timeout: Duration) -> Self {
|
|
||||||
self.announce_chunks = Some(timeout);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Timeout for AnnounceShardConfig messages.
|
|
||||||
pub fn announce_shard_config_timeout(mut self, timeout: Duration) -> Self {
|
|
||||||
self.announce_shard_config = Some(timeout);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(self) -> GossipCache {
|
|
||||||
let GossipCacheBuilder {
|
|
||||||
default_timeout,
|
|
||||||
example,
|
|
||||||
new_file,
|
|
||||||
find_file,
|
|
||||||
find_chunks,
|
|
||||||
announce_file,
|
|
||||||
announce_chunks,
|
|
||||||
announce_shard_config,
|
|
||||||
} = self;
|
|
||||||
|
|
||||||
GossipCache {
|
|
||||||
expirations: DelayQueue::default(),
|
|
||||||
topic_msgs: HashMap::default(),
|
|
||||||
example: example.or(default_timeout),
|
|
||||||
new_file: new_file.or(default_timeout),
|
|
||||||
find_file: find_file.or(default_timeout),
|
|
||||||
find_chunks: find_chunks.or(default_timeout),
|
|
||||||
announce_file: announce_file.or(default_timeout),
|
|
||||||
announce_chunks: announce_chunks.or(default_timeout),
|
|
||||||
announce_shard_config: announce_shard_config.or(default_timeout),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GossipCache {
|
impl GossipCache {
|
||||||
/// Get a builder of a `GossipCache`. Topic kinds for which no timeout is defined will be
|
#[cfg(test)]
|
||||||
/// ignored if added in `insert`.
|
pub fn new_with_default_timeout(timeout: Duration) -> Self {
|
||||||
pub fn builder() -> GossipCacheBuilder {
|
Self {
|
||||||
GossipCacheBuilder::default()
|
default_timeout: Some(timeout),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Insert a message to be sent later.
|
// Insert a message to be sent later.
|
||||||
pub fn insert(&mut self, topic: GossipTopic, data: Vec<u8>) {
|
pub fn insert(&mut self, topic: GossipTopic, data: Vec<u8>) {
|
||||||
let expire_timeout = match topic.kind() {
|
let expire_timeout = self
|
||||||
GossipKind::Example => self.example,
|
.timeouts
|
||||||
GossipKind::NewFile => self.new_file,
|
.get(topic.kind())
|
||||||
GossipKind::FindFile => self.find_file,
|
.cloned()
|
||||||
GossipKind::FindChunks => self.find_chunks,
|
.or(self.default_timeout);
|
||||||
GossipKind::AnnounceFile => self.announce_file,
|
|
||||||
GossipKind::AnnounceChunks => self.announce_chunks,
|
|
||||||
GossipKind::AnnounceShardConfig => self.announce_shard_config,
|
|
||||||
};
|
|
||||||
|
|
||||||
let expire_timeout = match expire_timeout {
|
let expire_timeout = match expire_timeout {
|
||||||
Some(expire_timeout) => expire_timeout,
|
Some(expire_timeout) => expire_timeout,
|
||||||
@ -221,9 +114,7 @@ mod tests {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_stream() {
|
async fn test_stream() {
|
||||||
let mut cache = GossipCache::builder()
|
let mut cache = GossipCache::new_with_default_timeout(Duration::from_millis(300));
|
||||||
.default_timeout(Duration::from_millis(300))
|
|
||||||
.build();
|
|
||||||
let test_topic =
|
let test_topic =
|
||||||
GossipTopic::new(GossipKind::Example, crate::types::GossipEncoding::SSZSnappy);
|
GossipTopic::new(GossipKind::Example, crate::types::GossipEncoding::SSZSnappy);
|
||||||
cache.insert(test_topic, vec![]);
|
cache.insert(test_topic, vec![]);
|
||||||
|
@ -6,7 +6,6 @@ use crate::peer_manager::{
|
|||||||
ConnectionDirection, PeerManager, PeerManagerEvent,
|
ConnectionDirection, PeerManager, PeerManagerEvent,
|
||||||
};
|
};
|
||||||
use crate::rpc::methods::DataByHashRequest;
|
use crate::rpc::methods::DataByHashRequest;
|
||||||
use crate::rpc::methods::FileAnnouncement;
|
|
||||||
use crate::rpc::methods::GetChunksRequest;
|
use crate::rpc::methods::GetChunksRequest;
|
||||||
use crate::rpc::*;
|
use crate::rpc::*;
|
||||||
use crate::service::Context as ServiceContext;
|
use crate::service::Context as ServiceContext;
|
||||||
@ -32,7 +31,7 @@ use libp2p::{
|
|||||||
},
|
},
|
||||||
NetworkBehaviour, PeerId,
|
NetworkBehaviour, PeerId,
|
||||||
};
|
};
|
||||||
use shared_types::ChunkArrayWithProof;
|
use shared_types::{ChunkArrayWithProof, ShardedFile};
|
||||||
use std::{
|
use std::{
|
||||||
collections::VecDeque,
|
collections::VecDeque,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
@ -236,6 +235,9 @@ impl<AppReqId: ReqId> Behaviour<AppReqId> {
|
|||||||
params
|
params
|
||||||
.topics
|
.topics
|
||||||
.insert(get_hash(GossipKind::NewFile), TopicScoreParams::default());
|
.insert(get_hash(GossipKind::NewFile), TopicScoreParams::default());
|
||||||
|
params
|
||||||
|
.topics
|
||||||
|
.insert(get_hash(GossipKind::AskFile), TopicScoreParams::default());
|
||||||
params
|
params
|
||||||
.topics
|
.topics
|
||||||
.insert(get_hash(GossipKind::FindFile), TopicScoreParams::default());
|
.insert(get_hash(GossipKind::FindFile), TopicScoreParams::default());
|
||||||
@ -270,12 +272,10 @@ impl<AppReqId: ReqId> Behaviour<AppReqId> {
|
|||||||
..config.peer_manager
|
..config.peer_manager
|
||||||
};
|
};
|
||||||
|
|
||||||
let slot_duration = std::time::Duration::from_secs(12);
|
// let slot_duration = std::time::Duration::from_secs(12);
|
||||||
// let slot_duration = std::time::Duration::from_secs(ctx.chain_spec.seconds_per_slot);
|
// let slot_duration = std::time::Duration::from_secs(ctx.chain_spec.seconds_per_slot);
|
||||||
|
|
||||||
let gossip_cache = GossipCache::builder()
|
let gossip_cache = GossipCache::default();
|
||||||
.example_timeout(slot_duration) // TODO
|
|
||||||
.build();
|
|
||||||
|
|
||||||
Ok(Behaviour {
|
Ok(Behaviour {
|
||||||
// Sub-behaviours
|
// Sub-behaviours
|
||||||
@ -390,7 +390,7 @@ impl<AppReqId: ReqId> Behaviour<AppReqId> {
|
|||||||
.gossipsub
|
.gossipsub
|
||||||
.publish(topic.clone().into(), message_data.clone())
|
.publish(topic.clone().into(), message_data.clone())
|
||||||
{
|
{
|
||||||
warn!(error = ?e, "Could not publish message");
|
warn!(error = ?e, topic = ?topic.kind(), "Failed to publish message");
|
||||||
|
|
||||||
// add to metrics
|
// add to metrics
|
||||||
if let Some(v) = metrics::get_int_gauge(
|
if let Some(v) = metrics::get_int_gauge(
|
||||||
@ -547,8 +547,8 @@ impl<AppReqId: ReqId> Behaviour<AppReqId> {
|
|||||||
Request::DataByHash { .. } => {
|
Request::DataByHash { .. } => {
|
||||||
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_by_hash"])
|
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_by_hash"])
|
||||||
}
|
}
|
||||||
Request::AnnounceFile { .. } => {
|
Request::AnswerFile { .. } => {
|
||||||
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["announce_file"])
|
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["answer_file"])
|
||||||
}
|
}
|
||||||
Request::GetChunks { .. } => {
|
Request::GetChunks { .. } => {
|
||||||
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["get_chunks"])
|
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["get_chunks"])
|
||||||
@ -740,7 +740,7 @@ where
|
|||||||
&error,
|
&error,
|
||||||
ConnectionDirection::Outgoing,
|
ConnectionDirection::Outgoing,
|
||||||
);
|
);
|
||||||
// inform failures of requests comming outside the behaviour
|
// inform failures of requests coming outside the behaviour
|
||||||
if let RequestId::Application(id) = id {
|
if let RequestId::Application(id) = id {
|
||||||
self.add_event(BehaviourEvent::RPCFailed { peer_id, id });
|
self.add_event(BehaviourEvent::RPCFailed { peer_id, id });
|
||||||
}
|
}
|
||||||
@ -780,8 +780,8 @@ where
|
|||||||
InboundRequest::DataByHash(req) => {
|
InboundRequest::DataByHash(req) => {
|
||||||
self.propagate_request(peer_request_id, peer_id, Request::DataByHash(req))
|
self.propagate_request(peer_request_id, peer_id, Request::DataByHash(req))
|
||||||
}
|
}
|
||||||
InboundRequest::AnnounceFile(req) => {
|
InboundRequest::AnswerFile(req) => {
|
||||||
self.propagate_request(peer_request_id, peer_id, Request::AnnounceFile(req))
|
self.propagate_request(peer_request_id, peer_id, Request::AnswerFile(req))
|
||||||
}
|
}
|
||||||
InboundRequest::GetChunks(req) => {
|
InboundRequest::GetChunks(req) => {
|
||||||
self.propagate_request(peer_request_id, peer_id, Request::GetChunks(req))
|
self.propagate_request(peer_request_id, peer_id, Request::GetChunks(req))
|
||||||
@ -997,8 +997,8 @@ pub enum Request {
|
|||||||
Status(StatusMessage),
|
Status(StatusMessage),
|
||||||
/// A data by hash request.
|
/// A data by hash request.
|
||||||
DataByHash(DataByHashRequest),
|
DataByHash(DataByHashRequest),
|
||||||
/// An AnnounceFile message.
|
/// An AnswerFile message.
|
||||||
AnnounceFile(FileAnnouncement),
|
AnswerFile(ShardedFile),
|
||||||
/// A GetChunks request.
|
/// A GetChunks request.
|
||||||
GetChunks(GetChunksRequest),
|
GetChunks(GetChunksRequest),
|
||||||
}
|
}
|
||||||
@ -1008,7 +1008,7 @@ impl std::convert::From<Request> for OutboundRequest {
|
|||||||
match req {
|
match req {
|
||||||
Request::Status(s) => OutboundRequest::Status(s),
|
Request::Status(s) => OutboundRequest::Status(s),
|
||||||
Request::DataByHash(r) => OutboundRequest::DataByHash(r),
|
Request::DataByHash(r) => OutboundRequest::DataByHash(r),
|
||||||
Request::AnnounceFile(r) => OutboundRequest::AnnounceFile(r),
|
Request::AnswerFile(r) => OutboundRequest::AnswerFile(r),
|
||||||
Request::GetChunks(r) => OutboundRequest::GetChunks(r),
|
Request::GetChunks(r) => OutboundRequest::GetChunks(r),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -103,7 +103,7 @@ pub struct Config {
|
|||||||
/// Subscribe to all subnets for the duration of the runtime.
|
/// Subscribe to all subnets for the duration of the runtime.
|
||||||
pub subscribe_all_subnets: bool,
|
pub subscribe_all_subnets: bool,
|
||||||
|
|
||||||
/// Import/aggregate all attestations recieved on subscribed subnets for the duration of the
|
/// Import/aggregate all attestations received on subscribed subnets for the duration of the
|
||||||
/// runtime.
|
/// runtime.
|
||||||
pub import_all_attestations: bool,
|
pub import_all_attestations: bool,
|
||||||
|
|
||||||
@ -133,6 +133,9 @@ pub struct Config {
|
|||||||
/// Whether to disable network identity in ENR.
|
/// Whether to disable network identity in ENR.
|
||||||
/// This is for test purpose only.
|
/// This is for test purpose only.
|
||||||
pub disable_enr_network_id: bool,
|
pub disable_enr_network_id: bool,
|
||||||
|
|
||||||
|
/// Whether to allow find chunks from peers.
|
||||||
|
pub find_chunks_enabled: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@ -214,6 +217,7 @@ impl Default for Config {
|
|||||||
peer_db: Default::default(),
|
peer_db: Default::default(),
|
||||||
peer_manager: Default::default(),
|
peer_manager: Default::default(),
|
||||||
disable_enr_network_id: false,
|
disable_enr_network_id: false,
|
||||||
|
find_chunks_enabled: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -290,32 +294,6 @@ impl From<u8> for NetworkLoad {
|
|||||||
|
|
||||||
/// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork.
|
/// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork.
|
||||||
pub fn gossipsub_config(network_load: u8) -> GossipsubConfig {
|
pub fn gossipsub_config(network_load: u8) -> GossipsubConfig {
|
||||||
// The function used to generate a gossipsub message id
|
|
||||||
// We use the first 8 bytes of SHA256(data) for content addressing
|
|
||||||
let fast_gossip_message_id =
|
|
||||||
|message: &RawGossipsubMessage| FastMessageId::from(&Sha256::digest(&message.data)[..8]);
|
|
||||||
fn prefix(prefix: [u8; 4], message: &GossipsubMessage) -> Vec<u8> {
|
|
||||||
let topic_bytes = message.topic.as_str().as_bytes();
|
|
||||||
|
|
||||||
// according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub
|
|
||||||
// the derivation of the message-id remains the same in the merge
|
|
||||||
let topic_len_bytes = topic_bytes.len().to_le_bytes();
|
|
||||||
let mut vec = Vec::with_capacity(
|
|
||||||
prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(),
|
|
||||||
);
|
|
||||||
vec.extend_from_slice(&prefix);
|
|
||||||
vec.extend_from_slice(&topic_len_bytes);
|
|
||||||
vec.extend_from_slice(topic_bytes);
|
|
||||||
vec.extend_from_slice(&message.data);
|
|
||||||
vec
|
|
||||||
}
|
|
||||||
|
|
||||||
let gossip_message_id = move |message: &GossipsubMessage| {
|
|
||||||
MessageId::from(
|
|
||||||
&Sha256::digest(prefix(MESSAGE_DOMAIN_VALID_SNAPPY, message).as_slice())[..20],
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
let load = NetworkLoad::from(network_load);
|
let load = NetworkLoad::from(network_load);
|
||||||
|
|
||||||
GossipsubConfigBuilder::default()
|
GossipsubConfigBuilder::default()
|
||||||
@ -364,3 +342,41 @@ fn is_global(addr: &std::net::Ipv4Addr) -> bool {
|
|||||||
// Make sure the address is not in 0.0.0.0/8
|
// Make sure the address is not in 0.0.0.0/8
|
||||||
&& addr.octets()[0] != 0
|
&& addr.octets()[0] != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn fast_gossip_message_id(message: &RawGossipsubMessage) -> FastMessageId {
|
||||||
|
// source | topic | data | nonce
|
||||||
|
let topic_bytes = message.topic.as_str().as_bytes();
|
||||||
|
let mut buf = Vec::with_capacity(64 + topic_bytes.len() + message.data.len() + 8);
|
||||||
|
|
||||||
|
if let Some(peer_id) = message.source {
|
||||||
|
buf.extend_from_slice(&peer_id.to_bytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.extend_from_slice(&topic_bytes);
|
||||||
|
buf.extend_from_slice(&message.data);
|
||||||
|
|
||||||
|
if let Some(nonce) = message.sequence_number {
|
||||||
|
buf.extend_from_slice(&nonce.to_le_bytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
FastMessageId::from(&Sha256::digest(&buf)[..8])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn gossip_message_id(message: &GossipsubMessage) -> MessageId {
|
||||||
|
// prefix | source | topic | data | nonce
|
||||||
|
let topic_bytes = message.topic.as_str().as_bytes();
|
||||||
|
let mut vec = Vec::with_capacity(
|
||||||
|
MESSAGE_DOMAIN_VALID_SNAPPY.len() + 64 + topic_bytes.len() + message.data.len() + 8,
|
||||||
|
);
|
||||||
|
vec.extend_from_slice(&MESSAGE_DOMAIN_VALID_SNAPPY);
|
||||||
|
if let Some(peer_id) = message.source {
|
||||||
|
vec.extend_from_slice(&peer_id.to_bytes());
|
||||||
|
}
|
||||||
|
vec.extend_from_slice(topic_bytes);
|
||||||
|
vec.extend_from_slice(&message.data);
|
||||||
|
if let Some(nonce) = message.sequence_number {
|
||||||
|
vec.extend_from_slice(&nonce.to_le_bytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
MessageId::from(&Sha256::digest(&vec)[..20])
|
||||||
|
}
|
||||||
|
@ -84,7 +84,7 @@ enum EventStream {
|
|||||||
InActive,
|
InActive,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The main discovery service. This can be disabled via CLI arguements. When disabled the
|
/// The main discovery service. This can be disabled via CLI arguments. When disabled the
|
||||||
/// underlying processes are not started, but this struct still maintains our current ENR.
|
/// underlying processes are not started, but this struct still maintains our current ENR.
|
||||||
pub struct Discovery {
|
pub struct Discovery {
|
||||||
/// A collection of seen live ENRs for quick lookup and to map peer-id's to ENRs.
|
/// A collection of seen live ENRs for quick lookup and to map peer-id's to ENRs.
|
||||||
|
@ -96,7 +96,12 @@ pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_F
|
|||||||
/// Defines the current P2P protocol version.
|
/// Defines the current P2P protocol version.
|
||||||
/// - v1: Broadcast FindFile & AnnounceFile messages in the whole network, which caused network too heavey.
|
/// - v1: Broadcast FindFile & AnnounceFile messages in the whole network, which caused network too heavey.
|
||||||
/// - v2: Publish NewFile to neighbors only and announce file via RPC message.
|
/// - v2: Publish NewFile to neighbors only and announce file via RPC message.
|
||||||
pub const PROTOCOL_VERSION: [u8; 3] = [0, 2, 0];
|
/// - v3: Add shard config in Status message.
|
||||||
|
/// - v4: Refactor pubsub messages.
|
||||||
|
pub const PROTOCOL_VERSION_V1: [u8; 3] = [0, 1, 1];
|
||||||
|
pub const PROTOCOL_VERSION_V2: [u8; 3] = [0, 2, 1];
|
||||||
|
pub const PROTOCOL_VERSION_V3: [u8; 3] = [0, 3, 0];
|
||||||
|
pub const PROTOCOL_VERSION_V4: [u8; 3] = [0, 4, 0];
|
||||||
|
|
||||||
/// Application level requests sent to the network.
|
/// Application level requests sent to the network.
|
||||||
#[derive(Debug, Clone, Copy)]
|
#[derive(Debug, Clone, Copy)]
|
||||||
@ -149,6 +154,8 @@ pub enum NetworkMessage {
|
|||||||
},
|
},
|
||||||
/// Start dialing a new peer.
|
/// Start dialing a new peer.
|
||||||
DialPeer { address: Multiaddr, peer_id: PeerId },
|
DialPeer { address: Multiaddr, peer_id: PeerId },
|
||||||
|
/// Disconnect a peer.
|
||||||
|
DisconnectPeer { peer_id: PeerId },
|
||||||
/// Notify that new file stored in db.
|
/// Notify that new file stored in db.
|
||||||
AnnounceLocalFile { tx_id: TxID },
|
AnnounceLocalFile { tx_id: TxID },
|
||||||
/// Called if a known external TCP socket address has been updated.
|
/// Called if a known external TCP socket address has been updated.
|
||||||
@ -164,5 +171,5 @@ pub type NetworkSender = channel::metrics::Sender<NetworkMessage>;
|
|||||||
pub type NetworkReceiver = channel::metrics::Receiver<NetworkMessage>;
|
pub type NetworkReceiver = channel::metrics::Receiver<NetworkMessage>;
|
||||||
|
|
||||||
pub fn new_network_channel() -> (NetworkSender, NetworkReceiver) {
|
pub fn new_network_channel() -> (NetworkSender, NetworkReceiver) {
|
||||||
channel::metrics::unbounded_channel("network")
|
channel::metrics::unbounded_channel("network_channel")
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use std::time::Duration;
|
use std::{fmt::Debug, sync::Arc, time::Duration};
|
||||||
|
|
||||||
use duration_str::deserialize_duration;
|
use duration_str::deserialize_duration;
|
||||||
|
use libp2p::PeerId;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// The time in seconds between re-status's peers.
|
/// The time in seconds between re-status's peers.
|
||||||
@ -16,7 +17,7 @@ pub const DEFAULT_PING_INTERVAL_INBOUND: u64 = 20;
|
|||||||
pub const DEFAULT_TARGET_PEERS: usize = 50;
|
pub const DEFAULT_TARGET_PEERS: usize = 50;
|
||||||
|
|
||||||
/// Configurations for the PeerManager.
|
/// Configurations for the PeerManager.
|
||||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
/* Peer count related configurations */
|
/* Peer count related configurations */
|
||||||
@ -40,6 +41,9 @@ pub struct Config {
|
|||||||
pub ping_interval_inbound: u64,
|
pub ping_interval_inbound: u64,
|
||||||
/// Interval between PING events for peers dialed by us.
|
/// Interval between PING events for peers dialed by us.
|
||||||
pub ping_interval_outbound: u64,
|
pub ping_interval_outbound: u64,
|
||||||
|
|
||||||
|
#[serde(skip)]
|
||||||
|
pub filters: Filters,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@ -52,6 +56,29 @@ impl Default for Config {
|
|||||||
status_interval: DEFAULT_STATUS_INTERVAL,
|
status_interval: DEFAULT_STATUS_INTERVAL,
|
||||||
ping_interval_inbound: DEFAULT_PING_INTERVAL_INBOUND,
|
ping_interval_inbound: DEFAULT_PING_INTERVAL_INBOUND,
|
||||||
ping_interval_outbound: DEFAULT_PING_INTERVAL_OUTBOUND,
|
ping_interval_outbound: DEFAULT_PING_INTERVAL_OUTBOUND,
|
||||||
|
filters: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#[derive(Clone)]
|
||||||
|
|
||||||
|
pub struct Filters {
|
||||||
|
/// Decide whether to dial to specified peer.
|
||||||
|
pub dial_peer_filter: Option<Arc<dyn Fn(&PeerId) -> bool + Sync + Send + 'static>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Filters {
|
||||||
|
fn default() -> Self {
|
||||||
|
Filters {
|
||||||
|
dial_peer_filter: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Debug for Filters {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("Filters")
|
||||||
|
.field("dial_peer_filter", &self.dial_peer_filter.is_some())
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -69,6 +69,8 @@ pub struct PeerManager {
|
|||||||
discovery_enabled: bool,
|
discovery_enabled: bool,
|
||||||
/// Keeps track if the current instance is reporting metrics or not.
|
/// Keeps track if the current instance is reporting metrics or not.
|
||||||
metrics_enabled: bool,
|
metrics_enabled: bool,
|
||||||
|
|
||||||
|
filters: config::Filters,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The events that the `PeerManager` outputs (requests).
|
/// The events that the `PeerManager` outputs (requests).
|
||||||
@ -108,6 +110,7 @@ impl PeerManager {
|
|||||||
status_interval,
|
status_interval,
|
||||||
ping_interval_inbound,
|
ping_interval_inbound,
|
||||||
ping_interval_outbound,
|
ping_interval_outbound,
|
||||||
|
filters,
|
||||||
} = cfg;
|
} = cfg;
|
||||||
|
|
||||||
// Set up the peer manager heartbeat interval
|
// Set up the peer manager heartbeat interval
|
||||||
@ -123,6 +126,7 @@ impl PeerManager {
|
|||||||
heartbeat,
|
heartbeat,
|
||||||
discovery_enabled,
|
discovery_enabled,
|
||||||
metrics_enabled,
|
metrics_enabled,
|
||||||
|
filters,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -277,6 +281,10 @@ impl PeerManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(dial_peer_filter) = self.filters.dial_peer_filter.clone() {
|
||||||
|
to_dial_peers.retain(|peer_id| dial_peer_filter(peer_id));
|
||||||
|
}
|
||||||
|
|
||||||
// Queue another discovery if we need to
|
// Queue another discovery if we need to
|
||||||
self.maintain_peer_count(to_dial_peers.len());
|
self.maintain_peer_count(to_dial_peers.len());
|
||||||
|
|
||||||
@ -457,7 +465,7 @@ impl PeerManager {
|
|||||||
Protocol::Goodbye => PeerAction::LowToleranceError,
|
Protocol::Goodbye => PeerAction::LowToleranceError,
|
||||||
Protocol::Status => PeerAction::LowToleranceError,
|
Protocol::Status => PeerAction::LowToleranceError,
|
||||||
Protocol::DataByHash => PeerAction::MidToleranceError,
|
Protocol::DataByHash => PeerAction::MidToleranceError,
|
||||||
Protocol::AnnounceFile => PeerAction::MidToleranceError,
|
Protocol::AnswerFile => PeerAction::MidToleranceError,
|
||||||
Protocol::GetChunks => PeerAction::MidToleranceError,
|
Protocol::GetChunks => PeerAction::MidToleranceError,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -472,7 +480,7 @@ impl PeerManager {
|
|||||||
Protocol::Goodbye => return,
|
Protocol::Goodbye => return,
|
||||||
Protocol::Status => PeerAction::LowToleranceError,
|
Protocol::Status => PeerAction::LowToleranceError,
|
||||||
Protocol::DataByHash => return,
|
Protocol::DataByHash => return,
|
||||||
Protocol::AnnounceFile => return,
|
Protocol::AnswerFile => return,
|
||||||
Protocol::GetChunks => return,
|
Protocol::GetChunks => return,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -487,7 +495,7 @@ impl PeerManager {
|
|||||||
Protocol::Goodbye => return,
|
Protocol::Goodbye => return,
|
||||||
Protocol::Status => return,
|
Protocol::Status => return,
|
||||||
Protocol::DataByHash => PeerAction::MidToleranceError,
|
Protocol::DataByHash => PeerAction::MidToleranceError,
|
||||||
Protocol::AnnounceFile => PeerAction::MidToleranceError,
|
Protocol::AnswerFile => PeerAction::MidToleranceError,
|
||||||
Protocol::GetChunks => PeerAction::MidToleranceError,
|
Protocol::GetChunks => PeerAction::MidToleranceError,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -693,7 +701,7 @@ impl PeerManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Gracefully disconnects a peer without banning them.
|
// Gracefully disconnects a peer without banning them.
|
||||||
fn disconnect_peer(&mut self, peer_id: PeerId, reason: GoodbyeReason) {
|
pub fn disconnect_peer(&mut self, peer_id: PeerId, reason: GoodbyeReason) {
|
||||||
self.events
|
self.events
|
||||||
.push(PeerManagerEvent::DisconnectPeer(peer_id, reason));
|
.push(PeerManagerEvent::DisconnectPeer(peer_id, reason));
|
||||||
self.network_globals
|
self.network_globals
|
||||||
|
@ -134,7 +134,7 @@ impl NetworkBehaviour for PeerManager {
|
|||||||
BanResult::NotBanned => {}
|
BanResult::NotBanned => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Count dialing peers in the limit if the peer dialied us.
|
// Count dialing peers in the limit if the peer dialed us.
|
||||||
let count_dialing = endpoint.is_listener();
|
let count_dialing = endpoint.is_listener();
|
||||||
// Check the connection limits
|
// Check the connection limits
|
||||||
if self.peer_limit_reached(count_dialing)
|
if self.peer_limit_reached(count_dialing)
|
||||||
|
@ -35,7 +35,7 @@ pub struct PeerDBConfig {
|
|||||||
pub allowed_negative_gossipsub_factor: f32,
|
pub allowed_negative_gossipsub_factor: f32,
|
||||||
/// The time we allow peers to be in the dialing state in our PeerDb before we revert them to a disconnected state.
|
/// The time we allow peers to be in the dialing state in our PeerDb before we revert them to a disconnected state.
|
||||||
#[serde(deserialize_with = "deserialize_duration")]
|
#[serde(deserialize_with = "deserialize_duration")]
|
||||||
pub dail_timeout: Duration,
|
pub dial_timeout: Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for PeerDBConfig {
|
impl Default for PeerDBConfig {
|
||||||
@ -45,7 +45,7 @@ impl Default for PeerDBConfig {
|
|||||||
max_banned_peers: 1000,
|
max_banned_peers: 1000,
|
||||||
banned_peers_per_ip_threshold: 5,
|
banned_peers_per_ip_threshold: 5,
|
||||||
allowed_negative_gossipsub_factor: 0.1,
|
allowed_negative_gossipsub_factor: 0.1,
|
||||||
dail_timeout: Duration::from_secs(15),
|
dial_timeout: Duration::from_secs(15),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -339,7 +339,7 @@ impl PeerDB {
|
|||||||
.iter()
|
.iter()
|
||||||
.filter_map(|(peer_id, info)| {
|
.filter_map(|(peer_id, info)| {
|
||||||
if let PeerConnectionStatus::Dialing { since } = info.connection_status() {
|
if let PeerConnectionStatus::Dialing { since } = info.connection_status() {
|
||||||
if (*since) + self.config.dail_timeout < std::time::Instant::now() {
|
if (*since) + self.config.dial_timeout < std::time::Instant::now() {
|
||||||
return Some(*peer_id);
|
return Some(*peer_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ pub struct Client {
|
|||||||
|
|
||||||
#[derive(Clone, Copy, Debug, Serialize, PartialEq, Eq, AsRefStr, IntoStaticStr, EnumIter)]
|
#[derive(Clone, Copy, Debug, Serialize, PartialEq, Eq, AsRefStr, IntoStaticStr, EnumIter)]
|
||||||
pub enum ClientKind {
|
pub enum ClientKind {
|
||||||
/// An Zgs node.
|
/// A Zgs node.
|
||||||
Zgs,
|
Zgs,
|
||||||
/// An unknown client.
|
/// An unknown client.
|
||||||
Unknown,
|
Unknown,
|
||||||
|
@ -5,7 +5,7 @@ use crate::rpc::{
|
|||||||
};
|
};
|
||||||
use crate::rpc::{InboundRequest, OutboundRequest, RPCCodedResponse, RPCResponse};
|
use crate::rpc::{InboundRequest, OutboundRequest, RPCCodedResponse, RPCResponse};
|
||||||
use libp2p::bytes::BytesMut;
|
use libp2p::bytes::BytesMut;
|
||||||
use shared_types::ChunkArrayWithProof;
|
use shared_types::{ChunkArrayWithProof, ShardedFile};
|
||||||
use snap::read::FrameDecoder;
|
use snap::read::FrameDecoder;
|
||||||
use snap::write::FrameEncoder;
|
use snap::write::FrameEncoder;
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
@ -159,7 +159,7 @@ impl Encoder<OutboundRequest> for SSZSnappyOutboundCodec {
|
|||||||
OutboundRequest::Goodbye(req) => req.as_ssz_bytes(),
|
OutboundRequest::Goodbye(req) => req.as_ssz_bytes(),
|
||||||
OutboundRequest::Ping(req) => req.as_ssz_bytes(),
|
OutboundRequest::Ping(req) => req.as_ssz_bytes(),
|
||||||
OutboundRequest::DataByHash(req) => req.hashes.as_ssz_bytes(),
|
OutboundRequest::DataByHash(req) => req.hashes.as_ssz_bytes(),
|
||||||
OutboundRequest::AnnounceFile(req) => req.as_ssz_bytes(),
|
OutboundRequest::AnswerFile(req) => req.as_ssz_bytes(),
|
||||||
OutboundRequest::GetChunks(req) => req.as_ssz_bytes(),
|
OutboundRequest::GetChunks(req) => req.as_ssz_bytes(),
|
||||||
};
|
};
|
||||||
// SSZ encoded bytes should be within `max_packet_size`
|
// SSZ encoded bytes should be within `max_packet_size`
|
||||||
@ -347,8 +347,8 @@ fn handle_v1_request(
|
|||||||
Protocol::DataByHash => Ok(Some(InboundRequest::DataByHash(DataByHashRequest {
|
Protocol::DataByHash => Ok(Some(InboundRequest::DataByHash(DataByHashRequest {
|
||||||
hashes: VariableList::from_ssz_bytes(decoded_buffer)?,
|
hashes: VariableList::from_ssz_bytes(decoded_buffer)?,
|
||||||
}))),
|
}))),
|
||||||
Protocol::AnnounceFile => Ok(Some(InboundRequest::AnnounceFile(
|
Protocol::AnswerFile => Ok(Some(InboundRequest::AnswerFile(
|
||||||
FileAnnouncement::from_ssz_bytes(decoded_buffer)?,
|
ShardedFile::from_ssz_bytes(decoded_buffer)?,
|
||||||
))),
|
))),
|
||||||
Protocol::GetChunks => Ok(Some(InboundRequest::GetChunks(
|
Protocol::GetChunks => Ok(Some(InboundRequest::GetChunks(
|
||||||
GetChunksRequest::from_ssz_bytes(decoded_buffer)?,
|
GetChunksRequest::from_ssz_bytes(decoded_buffer)?,
|
||||||
@ -377,9 +377,9 @@ fn handle_v1_response(
|
|||||||
Protocol::DataByHash => Ok(Some(RPCResponse::DataByHash(Box::new(
|
Protocol::DataByHash => Ok(Some(RPCResponse::DataByHash(Box::new(
|
||||||
ZgsData::from_ssz_bytes(decoded_buffer)?,
|
ZgsData::from_ssz_bytes(decoded_buffer)?,
|
||||||
)))),
|
)))),
|
||||||
// This case should be unreachable as `AnnounceFile` has no response.
|
// This case should be unreachable as `AnswerFile` has no response.
|
||||||
Protocol::AnnounceFile => Err(RPCError::InvalidData(
|
Protocol::AnswerFile => Err(RPCError::InvalidData(
|
||||||
"AnnounceFile RPC message has no valid response".to_string(),
|
"AnswerFile RPC message has no valid response".to_string(),
|
||||||
)),
|
)),
|
||||||
Protocol::GetChunks => Ok(Some(RPCResponse::Chunks(
|
Protocol::GetChunks => Ok(Some(RPCResponse::Chunks(
|
||||||
ChunkArrayWithProof::from_ssz_bytes(decoded_buffer)?,
|
ChunkArrayWithProof::from_ssz_bytes(decoded_buffer)?,
|
||||||
@ -399,9 +399,7 @@ mod tests {
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
fn status_message() -> StatusMessage {
|
fn status_message() -> StatusMessage {
|
||||||
StatusMessage {
|
Default::default()
|
||||||
data: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ping_message() -> Ping {
|
fn ping_message() -> Ping {
|
||||||
@ -570,10 +568,7 @@ mod tests {
|
|||||||
assert_eq!(stream_identifier.len(), 10);
|
assert_eq!(stream_identifier.len(), 10);
|
||||||
|
|
||||||
// Status message is 84 bytes uncompressed. `max_compressed_len` is 32 + 84 + 84/6 = 130.
|
// Status message is 84 bytes uncompressed. `max_compressed_len` is 32 + 84 + 84/6 = 130.
|
||||||
let status_message_bytes = StatusMessage {
|
let status_message_bytes = StatusMessage::default().as_ssz_bytes();
|
||||||
data: Default::default(),
|
|
||||||
}
|
|
||||||
.as_ssz_bytes();
|
|
||||||
|
|
||||||
let mut uvi_codec: Uvi<usize> = Uvi::default();
|
let mut uvi_codec: Uvi<usize> = Uvi::default();
|
||||||
let mut dst = BytesMut::with_capacity(1024);
|
let mut dst = BytesMut::with_capacity(1024);
|
||||||
|
@ -69,9 +69,13 @@ impl ToString for ErrorType {
|
|||||||
/* Requests */
|
/* Requests */
|
||||||
|
|
||||||
/// The STATUS request/response handshake message.
|
/// The STATUS request/response handshake message.
|
||||||
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
|
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq, Default)]
|
||||||
pub struct StatusMessage {
|
pub struct StatusMessage {
|
||||||
pub data: NetworkIdentity,
|
pub data: NetworkIdentity,
|
||||||
|
|
||||||
|
// shard config
|
||||||
|
pub num_shard: usize,
|
||||||
|
pub shard_id: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The PING request/response message.
|
/// The PING request/response message.
|
||||||
@ -178,14 +182,6 @@ pub struct DataByHashRequest {
|
|||||||
pub hashes: VariableList<Hash256, MaxRequestBlocks>,
|
pub hashes: VariableList<Hash256, MaxRequestBlocks>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// The message of `AnnounceFile` RPC message.
|
|
||||||
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
|
|
||||||
pub struct FileAnnouncement {
|
|
||||||
pub tx_id: TxID,
|
|
||||||
pub num_shard: usize,
|
|
||||||
pub shard_id: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Request a chunk array from a peer.
|
/// Request a chunk array from a peer.
|
||||||
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
|
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
|
||||||
pub struct GetChunksRequest {
|
pub struct GetChunksRequest {
|
||||||
|
@ -33,7 +33,7 @@ mod handler;
|
|||||||
pub mod methods;
|
pub mod methods;
|
||||||
mod outbound;
|
mod outbound;
|
||||||
mod protocol;
|
mod protocol;
|
||||||
mod rate_limiter;
|
pub mod rate_limiter;
|
||||||
|
|
||||||
/// Composite trait for a request id.
|
/// Composite trait for a request id.
|
||||||
pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {}
|
pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {}
|
||||||
@ -118,7 +118,7 @@ impl<Id: ReqId> RPC<Id> {
|
|||||||
.n_every(Protocol::Status, 5, Duration::from_secs(15))
|
.n_every(Protocol::Status, 5, Duration::from_secs(15))
|
||||||
.one_every(Protocol::Goodbye, Duration::from_secs(10))
|
.one_every(Protocol::Goodbye, Duration::from_secs(10))
|
||||||
.n_every(Protocol::DataByHash, 128, Duration::from_secs(10))
|
.n_every(Protocol::DataByHash, 128, Duration::from_secs(10))
|
||||||
.n_every(Protocol::AnnounceFile, 256, Duration::from_secs(10))
|
.n_every(Protocol::AnswerFile, 256, Duration::from_secs(10))
|
||||||
.n_every(Protocol::GetChunks, 4096, Duration::from_secs(10))
|
.n_every(Protocol::GetChunks, 4096, Duration::from_secs(10))
|
||||||
.build()
|
.build()
|
||||||
.expect("Configuration parameters are valid");
|
.expect("Configuration parameters are valid");
|
||||||
|
@ -12,6 +12,7 @@ use futures::future::BoxFuture;
|
|||||||
use futures::prelude::{AsyncRead, AsyncWrite};
|
use futures::prelude::{AsyncRead, AsyncWrite};
|
||||||
use futures::{FutureExt, SinkExt};
|
use futures::{FutureExt, SinkExt};
|
||||||
use libp2p::core::{OutboundUpgrade, UpgradeInfo};
|
use libp2p::core::{OutboundUpgrade, UpgradeInfo};
|
||||||
|
use shared_types::ShardedFile;
|
||||||
use tokio_util::{
|
use tokio_util::{
|
||||||
codec::Framed,
|
codec::Framed,
|
||||||
compat::{Compat, FuturesAsyncReadCompatExt},
|
compat::{Compat, FuturesAsyncReadCompatExt},
|
||||||
@ -34,7 +35,7 @@ pub enum OutboundRequest {
|
|||||||
Goodbye(GoodbyeReason),
|
Goodbye(GoodbyeReason),
|
||||||
Ping(Ping),
|
Ping(Ping),
|
||||||
DataByHash(DataByHashRequest),
|
DataByHash(DataByHashRequest),
|
||||||
AnnounceFile(FileAnnouncement),
|
AnswerFile(ShardedFile),
|
||||||
GetChunks(GetChunksRequest),
|
GetChunks(GetChunksRequest),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,8 +74,8 @@ impl OutboundRequest {
|
|||||||
Version::V1,
|
Version::V1,
|
||||||
Encoding::SSZSnappy,
|
Encoding::SSZSnappy,
|
||||||
)],
|
)],
|
||||||
OutboundRequest::AnnounceFile(_) => vec![ProtocolId::new(
|
OutboundRequest::AnswerFile(_) => vec![ProtocolId::new(
|
||||||
Protocol::AnnounceFile,
|
Protocol::AnswerFile,
|
||||||
Version::V1,
|
Version::V1,
|
||||||
Encoding::SSZSnappy,
|
Encoding::SSZSnappy,
|
||||||
)],
|
)],
|
||||||
@ -95,7 +96,7 @@ impl OutboundRequest {
|
|||||||
OutboundRequest::Goodbye(_) => 0,
|
OutboundRequest::Goodbye(_) => 0,
|
||||||
OutboundRequest::Ping(_) => 1,
|
OutboundRequest::Ping(_) => 1,
|
||||||
OutboundRequest::DataByHash(req) => req.hashes.len() as u64,
|
OutboundRequest::DataByHash(req) => req.hashes.len() as u64,
|
||||||
OutboundRequest::AnnounceFile(_) => 0,
|
OutboundRequest::AnswerFile(_) => 0,
|
||||||
OutboundRequest::GetChunks(_) => 1,
|
OutboundRequest::GetChunks(_) => 1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -107,7 +108,7 @@ impl OutboundRequest {
|
|||||||
OutboundRequest::Goodbye(_) => Protocol::Goodbye,
|
OutboundRequest::Goodbye(_) => Protocol::Goodbye,
|
||||||
OutboundRequest::Ping(_) => Protocol::Ping,
|
OutboundRequest::Ping(_) => Protocol::Ping,
|
||||||
OutboundRequest::DataByHash(_) => Protocol::DataByHash,
|
OutboundRequest::DataByHash(_) => Protocol::DataByHash,
|
||||||
OutboundRequest::AnnounceFile(_) => Protocol::AnnounceFile,
|
OutboundRequest::AnswerFile(_) => Protocol::AnswerFile,
|
||||||
OutboundRequest::GetChunks(_) => Protocol::GetChunks,
|
OutboundRequest::GetChunks(_) => Protocol::GetChunks,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -122,7 +123,7 @@ impl OutboundRequest {
|
|||||||
OutboundRequest::Status(_) => unreachable!(),
|
OutboundRequest::Status(_) => unreachable!(),
|
||||||
OutboundRequest::Goodbye(_) => unreachable!(),
|
OutboundRequest::Goodbye(_) => unreachable!(),
|
||||||
OutboundRequest::Ping(_) => unreachable!(),
|
OutboundRequest::Ping(_) => unreachable!(),
|
||||||
OutboundRequest::AnnounceFile(_) => unreachable!(),
|
OutboundRequest::AnswerFile(_) => unreachable!(),
|
||||||
OutboundRequest::GetChunks(_) => unreachable!(),
|
OutboundRequest::GetChunks(_) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -179,8 +180,8 @@ impl std::fmt::Display for OutboundRequest {
|
|||||||
OutboundRequest::DataByHash(req) => {
|
OutboundRequest::DataByHash(req) => {
|
||||||
write!(f, "Data by hash: {:?}", req)
|
write!(f, "Data by hash: {:?}", req)
|
||||||
}
|
}
|
||||||
OutboundRequest::AnnounceFile(req) => {
|
OutboundRequest::AnswerFile(req) => {
|
||||||
write!(f, "AnnounceFile: {:?}", req)
|
write!(f, "AnswerFile: {:?}", req)
|
||||||
}
|
}
|
||||||
OutboundRequest::GetChunks(req) => {
|
OutboundRequest::GetChunks(req) => {
|
||||||
write!(f, "GetChunks: {:?}", req)
|
write!(f, "GetChunks: {:?}", req)
|
||||||
|
@ -8,7 +8,7 @@ use futures::future::BoxFuture;
|
|||||||
use futures::prelude::{AsyncRead, AsyncWrite};
|
use futures::prelude::{AsyncRead, AsyncWrite};
|
||||||
use futures::{FutureExt, StreamExt};
|
use futures::{FutureExt, StreamExt};
|
||||||
use libp2p::core::{InboundUpgrade, ProtocolName, UpgradeInfo};
|
use libp2p::core::{InboundUpgrade, ProtocolName, UpgradeInfo};
|
||||||
use shared_types::{ChunkArray, ChunkArrayWithProof, FlowRangeProof};
|
use shared_types::{ChunkArray, ChunkArrayWithProof, FlowRangeProof, ShardedFile};
|
||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
use ssz_types::VariableList;
|
use ssz_types::VariableList;
|
||||||
use std::io;
|
use std::io;
|
||||||
@ -91,8 +91,8 @@ pub enum Protocol {
|
|||||||
/// TODO
|
/// TODO
|
||||||
DataByHash,
|
DataByHash,
|
||||||
|
|
||||||
/// The file announce protocol.
|
/// The file answer protocol.
|
||||||
AnnounceFile,
|
AnswerFile,
|
||||||
/// The Chunk sync protocol.
|
/// The Chunk sync protocol.
|
||||||
GetChunks,
|
GetChunks,
|
||||||
}
|
}
|
||||||
@ -104,7 +104,7 @@ pub enum Version {
|
|||||||
V1,
|
V1,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// RPC Encondings supported.
|
/// RPC Encodings supported.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub enum Encoding {
|
pub enum Encoding {
|
||||||
SSZSnappy,
|
SSZSnappy,
|
||||||
@ -117,7 +117,7 @@ impl std::fmt::Display for Protocol {
|
|||||||
Protocol::Goodbye => "goodbye",
|
Protocol::Goodbye => "goodbye",
|
||||||
Protocol::Ping => "ping",
|
Protocol::Ping => "ping",
|
||||||
Protocol::DataByHash => "data_by_hash",
|
Protocol::DataByHash => "data_by_hash",
|
||||||
Protocol::AnnounceFile => "announce_file",
|
Protocol::AnswerFile => "answer_file",
|
||||||
Protocol::GetChunks => "get_chunks",
|
Protocol::GetChunks => "get_chunks",
|
||||||
};
|
};
|
||||||
f.write_str(repr)
|
f.write_str(repr)
|
||||||
@ -158,7 +158,7 @@ impl UpgradeInfo for RPCProtocol {
|
|||||||
ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy),
|
||||||
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy),
|
||||||
ProtocolId::new(Protocol::DataByHash, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(Protocol::DataByHash, Version::V1, Encoding::SSZSnappy),
|
||||||
ProtocolId::new(Protocol::AnnounceFile, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(Protocol::AnswerFile, Version::V1, Encoding::SSZSnappy),
|
||||||
ProtocolId::new(Protocol::GetChunks, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(Protocol::GetChunks, Version::V1, Encoding::SSZSnappy),
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@ -220,9 +220,9 @@ impl ProtocolId {
|
|||||||
// TODO
|
// TODO
|
||||||
RpcLimits::new(1, *DATA_BY_HASH_REQUEST_MAX)
|
RpcLimits::new(1, *DATA_BY_HASH_REQUEST_MAX)
|
||||||
}
|
}
|
||||||
Protocol::AnnounceFile => RpcLimits::new(
|
Protocol::AnswerFile => RpcLimits::new(
|
||||||
<FileAnnouncement as Encode>::ssz_fixed_len(),
|
<ShardedFile as Encode>::ssz_fixed_len(),
|
||||||
<FileAnnouncement as Encode>::ssz_fixed_len(),
|
<ShardedFile as Encode>::ssz_fixed_len(),
|
||||||
),
|
),
|
||||||
Protocol::GetChunks => RpcLimits::new(
|
Protocol::GetChunks => RpcLimits::new(
|
||||||
<GetChunksRequest as Encode>::ssz_fixed_len(),
|
<GetChunksRequest as Encode>::ssz_fixed_len(),
|
||||||
@ -251,7 +251,7 @@ impl ProtocolId {
|
|||||||
<ZgsData as Encode>::ssz_fixed_len(),
|
<ZgsData as Encode>::ssz_fixed_len(),
|
||||||
),
|
),
|
||||||
|
|
||||||
Protocol::AnnounceFile => RpcLimits::new(0, 0), // AnnounceFile request has no response
|
Protocol::AnswerFile => RpcLimits::new(0, 0), // AnswerFile request has no response
|
||||||
Protocol::GetChunks => RpcLimits::new(*CHUNKS_RESPONSE_MIN, *CHUNKS_RESPONSE_MAX),
|
Protocol::GetChunks => RpcLimits::new(*CHUNKS_RESPONSE_MIN, *CHUNKS_RESPONSE_MAX),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -334,7 +334,7 @@ pub enum InboundRequest {
|
|||||||
Goodbye(GoodbyeReason),
|
Goodbye(GoodbyeReason),
|
||||||
Ping(Ping),
|
Ping(Ping),
|
||||||
DataByHash(DataByHashRequest),
|
DataByHash(DataByHashRequest),
|
||||||
AnnounceFile(FileAnnouncement),
|
AnswerFile(ShardedFile),
|
||||||
GetChunks(GetChunksRequest),
|
GetChunks(GetChunksRequest),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -373,8 +373,8 @@ impl InboundRequest {
|
|||||||
Version::V1,
|
Version::V1,
|
||||||
Encoding::SSZSnappy,
|
Encoding::SSZSnappy,
|
||||||
)],
|
)],
|
||||||
InboundRequest::AnnounceFile(_) => vec![ProtocolId::new(
|
InboundRequest::AnswerFile(_) => vec![ProtocolId::new(
|
||||||
Protocol::AnnounceFile,
|
Protocol::AnswerFile,
|
||||||
Version::V1,
|
Version::V1,
|
||||||
Encoding::SSZSnappy,
|
Encoding::SSZSnappy,
|
||||||
)],
|
)],
|
||||||
@ -395,7 +395,7 @@ impl InboundRequest {
|
|||||||
InboundRequest::Goodbye(_) => 0,
|
InboundRequest::Goodbye(_) => 0,
|
||||||
InboundRequest::DataByHash(req) => req.hashes.len() as u64,
|
InboundRequest::DataByHash(req) => req.hashes.len() as u64,
|
||||||
InboundRequest::Ping(_) => 1,
|
InboundRequest::Ping(_) => 1,
|
||||||
InboundRequest::AnnounceFile(_) => 0,
|
InboundRequest::AnswerFile(_) => 0,
|
||||||
InboundRequest::GetChunks(_) => 1,
|
InboundRequest::GetChunks(_) => 1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -407,7 +407,7 @@ impl InboundRequest {
|
|||||||
InboundRequest::Goodbye(_) => Protocol::Goodbye,
|
InboundRequest::Goodbye(_) => Protocol::Goodbye,
|
||||||
InboundRequest::Ping(_) => Protocol::Ping,
|
InboundRequest::Ping(_) => Protocol::Ping,
|
||||||
InboundRequest::DataByHash(_) => Protocol::DataByHash,
|
InboundRequest::DataByHash(_) => Protocol::DataByHash,
|
||||||
InboundRequest::AnnounceFile(_) => Protocol::AnnounceFile,
|
InboundRequest::AnswerFile(_) => Protocol::AnswerFile,
|
||||||
InboundRequest::GetChunks(_) => Protocol::GetChunks,
|
InboundRequest::GetChunks(_) => Protocol::GetChunks,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -422,7 +422,7 @@ impl InboundRequest {
|
|||||||
InboundRequest::Status(_) => unreachable!(),
|
InboundRequest::Status(_) => unreachable!(),
|
||||||
InboundRequest::Goodbye(_) => unreachable!(),
|
InboundRequest::Goodbye(_) => unreachable!(),
|
||||||
InboundRequest::Ping(_) => unreachable!(),
|
InboundRequest::Ping(_) => unreachable!(),
|
||||||
InboundRequest::AnnounceFile(_) => unreachable!(),
|
InboundRequest::AnswerFile(_) => unreachable!(),
|
||||||
InboundRequest::GetChunks(_) => unreachable!(),
|
InboundRequest::GetChunks(_) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -541,8 +541,8 @@ impl std::fmt::Display for InboundRequest {
|
|||||||
InboundRequest::DataByHash(req) => {
|
InboundRequest::DataByHash(req) => {
|
||||||
write!(f, "Data by hash: {:?}", req)
|
write!(f, "Data by hash: {:?}", req)
|
||||||
}
|
}
|
||||||
InboundRequest::AnnounceFile(req) => {
|
InboundRequest::AnswerFile(req) => {
|
||||||
write!(f, "Announce File: {:?}", req)
|
write!(f, "Answer File: {:?}", req)
|
||||||
}
|
}
|
||||||
InboundRequest::GetChunks(req) => {
|
InboundRequest::GetChunks(req) => {
|
||||||
write!(f, "Get Chunks: {:?}", req)
|
write!(f, "Get Chunks: {:?}", req)
|
||||||
|
@ -54,6 +54,22 @@ pub struct Quota {
|
|||||||
max_tokens: u64,
|
max_tokens: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Quota {
|
||||||
|
pub fn one_every(period: Duration) -> Self {
|
||||||
|
Self {
|
||||||
|
replenish_all_every: period,
|
||||||
|
max_tokens: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn n_every(n: u64, period: Duration) -> Self {
|
||||||
|
Self {
|
||||||
|
replenish_all_every: period,
|
||||||
|
max_tokens: n,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Manages rate limiting of requests per peer, with differentiated rates per protocol.
|
/// Manages rate limiting of requests per peer, with differentiated rates per protocol.
|
||||||
pub struct RPCRateLimiter {
|
pub struct RPCRateLimiter {
|
||||||
/// Interval to prune peers for which their timer ran out.
|
/// Interval to prune peers for which their timer ran out.
|
||||||
@ -68,8 +84,8 @@ pub struct RPCRateLimiter {
|
|||||||
status_rl: Limiter<PeerId>,
|
status_rl: Limiter<PeerId>,
|
||||||
/// DataByHash rate limiter.
|
/// DataByHash rate limiter.
|
||||||
data_by_hash_rl: Limiter<PeerId>,
|
data_by_hash_rl: Limiter<PeerId>,
|
||||||
/// AnnounceFile rate limiter.
|
/// AnswerFile rate limiter.
|
||||||
announce_file_rl: Limiter<PeerId>,
|
answer_file_rl: Limiter<PeerId>,
|
||||||
/// GetChunks rate limiter.
|
/// GetChunks rate limiter.
|
||||||
get_chunks_rl: Limiter<PeerId>,
|
get_chunks_rl: Limiter<PeerId>,
|
||||||
}
|
}
|
||||||
@ -93,8 +109,8 @@ pub struct RPCRateLimiterBuilder {
|
|||||||
status_quota: Option<Quota>,
|
status_quota: Option<Quota>,
|
||||||
/// Quota for the DataByHash protocol.
|
/// Quota for the DataByHash protocol.
|
||||||
data_by_hash_quota: Option<Quota>,
|
data_by_hash_quota: Option<Quota>,
|
||||||
/// Quota for the AnnounceFile protocol.
|
/// Quota for the AnswerFile protocol.
|
||||||
announce_file_quota: Option<Quota>,
|
answer_file_quota: Option<Quota>,
|
||||||
/// Quota for the GetChunks protocol.
|
/// Quota for the GetChunks protocol.
|
||||||
get_chunks_quota: Option<Quota>,
|
get_chunks_quota: Option<Quota>,
|
||||||
}
|
}
|
||||||
@ -113,7 +129,7 @@ impl RPCRateLimiterBuilder {
|
|||||||
Protocol::Status => self.status_quota = q,
|
Protocol::Status => self.status_quota = q,
|
||||||
Protocol::Goodbye => self.goodbye_quota = q,
|
Protocol::Goodbye => self.goodbye_quota = q,
|
||||||
Protocol::DataByHash => self.data_by_hash_quota = q,
|
Protocol::DataByHash => self.data_by_hash_quota = q,
|
||||||
Protocol::AnnounceFile => self.announce_file_quota = q,
|
Protocol::AnswerFile => self.answer_file_quota = q,
|
||||||
Protocol::GetChunks => self.get_chunks_quota = q,
|
Protocol::GetChunks => self.get_chunks_quota = q,
|
||||||
}
|
}
|
||||||
self
|
self
|
||||||
@ -122,24 +138,12 @@ impl RPCRateLimiterBuilder {
|
|||||||
/// Allow one token every `time_period` to be used for this `protocol`.
|
/// Allow one token every `time_period` to be used for this `protocol`.
|
||||||
/// This produces a hard limit.
|
/// This produces a hard limit.
|
||||||
pub fn one_every(self, protocol: Protocol, time_period: Duration) -> Self {
|
pub fn one_every(self, protocol: Protocol, time_period: Duration) -> Self {
|
||||||
self.set_quota(
|
self.set_quota(protocol, Quota::one_every(time_period))
|
||||||
protocol,
|
|
||||||
Quota {
|
|
||||||
replenish_all_every: time_period,
|
|
||||||
max_tokens: 1,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allow `n` tokens to be use used every `time_period` for this `protocol`.
|
/// Allow `n` tokens to be use used every `time_period` for this `protocol`.
|
||||||
pub fn n_every(self, protocol: Protocol, n: u64, time_period: Duration) -> Self {
|
pub fn n_every(self, protocol: Protocol, n: u64, time_period: Duration) -> Self {
|
||||||
self.set_quota(
|
self.set_quota(protocol, Quota::n_every(n, time_period))
|
||||||
protocol,
|
|
||||||
Quota {
|
|
||||||
max_tokens: n,
|
|
||||||
replenish_all_every: time_period,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn build(self) -> Result<RPCRateLimiter, &'static str> {
|
pub fn build(self) -> Result<RPCRateLimiter, &'static str> {
|
||||||
@ -150,9 +154,9 @@ impl RPCRateLimiterBuilder {
|
|||||||
let data_by_hash_quota = self
|
let data_by_hash_quota = self
|
||||||
.data_by_hash_quota
|
.data_by_hash_quota
|
||||||
.ok_or("DataByHash quota not specified")?;
|
.ok_or("DataByHash quota not specified")?;
|
||||||
let announce_file_quota = self
|
let answer_file_quota = self
|
||||||
.announce_file_quota
|
.answer_file_quota
|
||||||
.ok_or("AnnounceFile quota not specified")?;
|
.ok_or("AnswerFile quota not specified")?;
|
||||||
let get_chunks_quota = self
|
let get_chunks_quota = self
|
||||||
.get_chunks_quota
|
.get_chunks_quota
|
||||||
.ok_or("GetChunks quota not specified")?;
|
.ok_or("GetChunks quota not specified")?;
|
||||||
@ -162,7 +166,7 @@ impl RPCRateLimiterBuilder {
|
|||||||
let status_rl = Limiter::from_quota(status_quota)?;
|
let status_rl = Limiter::from_quota(status_quota)?;
|
||||||
let goodbye_rl = Limiter::from_quota(goodbye_quota)?;
|
let goodbye_rl = Limiter::from_quota(goodbye_quota)?;
|
||||||
let data_by_hash_rl = Limiter::from_quota(data_by_hash_quota)?;
|
let data_by_hash_rl = Limiter::from_quota(data_by_hash_quota)?;
|
||||||
let announce_file_rl = Limiter::from_quota(announce_file_quota)?;
|
let answer_file_rl = Limiter::from_quota(answer_file_quota)?;
|
||||||
let get_chunks_rl = Limiter::from_quota(get_chunks_quota)?;
|
let get_chunks_rl = Limiter::from_quota(get_chunks_quota)?;
|
||||||
|
|
||||||
// check for peers to prune every 30 seconds, starting in 30 seconds
|
// check for peers to prune every 30 seconds, starting in 30 seconds
|
||||||
@ -175,7 +179,7 @@ impl RPCRateLimiterBuilder {
|
|||||||
status_rl,
|
status_rl,
|
||||||
goodbye_rl,
|
goodbye_rl,
|
||||||
data_by_hash_rl,
|
data_by_hash_rl,
|
||||||
announce_file_rl,
|
answer_file_rl,
|
||||||
get_chunks_rl,
|
get_chunks_rl,
|
||||||
init_time: Instant::now(),
|
init_time: Instant::now(),
|
||||||
})
|
})
|
||||||
@ -220,7 +224,7 @@ impl RPCRateLimiter {
|
|||||||
Protocol::Status => &mut self.status_rl,
|
Protocol::Status => &mut self.status_rl,
|
||||||
Protocol::Goodbye => &mut self.goodbye_rl,
|
Protocol::Goodbye => &mut self.goodbye_rl,
|
||||||
Protocol::DataByHash => &mut self.data_by_hash_rl,
|
Protocol::DataByHash => &mut self.data_by_hash_rl,
|
||||||
Protocol::AnnounceFile => &mut self.announce_file_rl,
|
Protocol::AnswerFile => &mut self.answer_file_rl,
|
||||||
Protocol::GetChunks => &mut self.get_chunks_rl,
|
Protocol::GetChunks => &mut self.get_chunks_rl,
|
||||||
};
|
};
|
||||||
check(limiter)
|
check(limiter)
|
||||||
|
@ -243,7 +243,19 @@ impl<AppReqId: ReqId> Service<AppReqId> {
|
|||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
|
|
||||||
for topic_kind in &crate::types::CORE_TOPICS {
|
let mut topics = vec![
|
||||||
|
GossipKind::NewFile,
|
||||||
|
GossipKind::AskFile,
|
||||||
|
GossipKind::FindFile,
|
||||||
|
GossipKind::AnnounceFile,
|
||||||
|
GossipKind::AnnounceShardConfig,
|
||||||
|
];
|
||||||
|
if config.find_chunks_enabled {
|
||||||
|
topics.push(GossipKind::FindChunks);
|
||||||
|
topics.push(GossipKind::AnnounceChunks);
|
||||||
|
}
|
||||||
|
|
||||||
|
for topic_kind in topics {
|
||||||
if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) {
|
if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) {
|
||||||
subscribed_topics.push(topic_kind.clone());
|
subscribed_topics.push(topic_kind.clone());
|
||||||
} else {
|
} else {
|
||||||
|
@ -7,8 +7,7 @@ pub type Enr = discv5::enr::Enr<discv5::enr::CombinedKey>;
|
|||||||
|
|
||||||
pub use globals::NetworkGlobals;
|
pub use globals::NetworkGlobals;
|
||||||
pub use pubsub::{
|
pub use pubsub::{
|
||||||
AnnounceChunks, AnnounceFile, AnnounceShardConfig, FindChunks, FindFile, HasSignature, NewFile,
|
AnnounceChunks, AnnounceFile, FindChunks, FindFile, HasSignature, PubsubMessage,
|
||||||
PubsubMessage, SignedAnnounceChunks, SignedAnnounceFile, SignedAnnounceShardConfig,
|
SignedAnnounceFile, SignedMessage, SnappyTransform, TimedMessage,
|
||||||
SignedMessage, SnappyTransform,
|
|
||||||
};
|
};
|
||||||
pub use topics::{GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS};
|
pub use topics::{GossipEncoding, GossipKind, GossipTopic};
|
||||||
|
@ -6,7 +6,7 @@ use libp2p::{
|
|||||||
gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage},
|
gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage},
|
||||||
Multiaddr, PeerId,
|
Multiaddr, PeerId,
|
||||||
};
|
};
|
||||||
use shared_types::TxID;
|
use shared_types::{timestamp_now, ShardConfig, ShardedFile, TxID};
|
||||||
use snap::raw::{decompress_len, Decoder, Encoder};
|
use snap::raw::{decompress_len, Decoder, Encoder};
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
@ -114,23 +114,10 @@ impl ssz::Decode for WrappedPeerId {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Published when file uploaded or completed to sync from other peers.
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
|
|
||||||
pub struct NewFile {
|
|
||||||
pub tx_id: TxID,
|
|
||||||
pub num_shard: usize,
|
|
||||||
pub shard_id: usize,
|
|
||||||
pub timestamp: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
|
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
|
||||||
pub struct FindFile {
|
pub struct FindFile {
|
||||||
pub tx_id: TxID,
|
pub tx_id: TxID,
|
||||||
pub num_shard: usize,
|
pub maybe_shard_config: Option<ShardConfig>,
|
||||||
pub shard_id: usize,
|
|
||||||
/// Indicates whether publish to neighboar nodes only.
|
|
||||||
pub neighbors_only: bool,
|
|
||||||
pub timestamp: u32,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
|
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
|
||||||
@ -138,17 +125,14 @@ pub struct FindChunks {
|
|||||||
pub tx_id: TxID,
|
pub tx_id: TxID,
|
||||||
pub index_start: u64, // inclusive
|
pub index_start: u64, // inclusive
|
||||||
pub index_end: u64, // exclusive
|
pub index_end: u64, // exclusive
|
||||||
pub timestamp: u32,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
||||||
pub struct AnnounceFile {
|
pub struct AnnounceFile {
|
||||||
pub tx_ids: Vec<TxID>,
|
pub tx_ids: Vec<TxID>,
|
||||||
pub num_shard: usize,
|
pub shard_config: ShardConfig,
|
||||||
pub shard_id: usize,
|
|
||||||
pub peer_id: WrappedPeerId,
|
pub peer_id: WrappedPeerId,
|
||||||
pub at: WrappedMultiaddr,
|
pub at: WrappedMultiaddr,
|
||||||
pub timestamp: u32,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
||||||
@ -158,18 +142,31 @@ pub struct AnnounceChunks {
|
|||||||
pub index_end: u64, // exclusive
|
pub index_end: u64, // exclusive
|
||||||
pub peer_id: WrappedPeerId,
|
pub peer_id: WrappedPeerId,
|
||||||
pub at: WrappedMultiaddr,
|
pub at: WrappedMultiaddr,
|
||||||
pub timestamp: u32,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
||||||
pub struct AnnounceShardConfig {
|
pub struct TimedMessage<T: Encode + Decode> {
|
||||||
pub num_shard: usize,
|
pub inner: T,
|
||||||
pub shard_id: usize,
|
|
||||||
pub peer_id: WrappedPeerId,
|
|
||||||
pub at: WrappedMultiaddr,
|
|
||||||
pub timestamp: u32,
|
pub timestamp: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: Encode + Decode> Deref for TimedMessage<T> {
|
||||||
|
type Target = T;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.inner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Encode + Decode> From<T> for TimedMessage<T> {
|
||||||
|
fn from(value: T) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: value,
|
||||||
|
timestamp: timestamp_now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
||||||
pub struct SignedMessage<T: Encode + Decode> {
|
pub struct SignedMessage<T: Encode + Decode> {
|
||||||
pub inner: T,
|
pub inner: T,
|
||||||
@ -209,21 +206,26 @@ impl<T: Encode + Decode> HasSignature for SignedMessage<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type SignedAnnounceFile = SignedMessage<AnnounceFile>;
|
pub type SignedAnnounceFile = SignedMessage<TimedMessage<AnnounceFile>>;
|
||||||
pub type SignedAnnounceShardConfig = SignedMessage<AnnounceShardConfig>;
|
|
||||||
pub type SignedAnnounceChunks = SignedMessage<AnnounceChunks>;
|
|
||||||
|
|
||||||
type SignedAnnounceFiles = Vec<SignedAnnounceFile>;
|
type SignedAnnounceFiles = Vec<SignedAnnounceFile>;
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub enum PubsubMessage {
|
pub enum PubsubMessage {
|
||||||
ExampleMessage(u64),
|
ExampleMessage(u64),
|
||||||
NewFile(NewFile),
|
/// Published to neighbors when new file uploaded or completed to sync file.
|
||||||
FindFile(FindFile),
|
NewFile(TimedMessage<ShardedFile>),
|
||||||
FindChunks(FindChunks),
|
/// Published to neighbors for file sync, and answered by `AnswerFile` RPC.
|
||||||
|
AskFile(TimedMessage<ShardedFile>),
|
||||||
|
/// Published to network to find specified file.
|
||||||
|
FindFile(TimedMessage<FindFile>),
|
||||||
|
/// Published to network to find specified chunks.
|
||||||
|
FindChunks(TimedMessage<FindChunks>),
|
||||||
|
/// Published to network to announce file.
|
||||||
AnnounceFile(Vec<SignedAnnounceFile>),
|
AnnounceFile(Vec<SignedAnnounceFile>),
|
||||||
AnnounceShardConfig(SignedAnnounceShardConfig),
|
/// Published to network to announce shard config.
|
||||||
AnnounceChunks(SignedAnnounceChunks),
|
AnnounceShardConfig(TimedMessage<ShardConfig>),
|
||||||
|
/// Published to network to announce chunks.
|
||||||
|
AnnounceChunks(TimedMessage<AnnounceChunks>),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implements the `DataTransform` trait of gossipsub to employ snappy compression
|
// Implements the `DataTransform` trait of gossipsub to employ snappy compression
|
||||||
@ -298,6 +300,7 @@ impl PubsubMessage {
|
|||||||
match self {
|
match self {
|
||||||
PubsubMessage::ExampleMessage(_) => GossipKind::Example,
|
PubsubMessage::ExampleMessage(_) => GossipKind::Example,
|
||||||
PubsubMessage::NewFile(_) => GossipKind::NewFile,
|
PubsubMessage::NewFile(_) => GossipKind::NewFile,
|
||||||
|
PubsubMessage::AskFile(_) => GossipKind::AskFile,
|
||||||
PubsubMessage::FindFile(_) => GossipKind::FindFile,
|
PubsubMessage::FindFile(_) => GossipKind::FindFile,
|
||||||
PubsubMessage::FindChunks(_) => GossipKind::FindChunks,
|
PubsubMessage::FindChunks(_) => GossipKind::FindChunks,
|
||||||
PubsubMessage::AnnounceFile(_) => GossipKind::AnnounceFile,
|
PubsubMessage::AnnounceFile(_) => GossipKind::AnnounceFile,
|
||||||
@ -325,24 +328,31 @@ impl PubsubMessage {
|
|||||||
u64::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?,
|
u64::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?,
|
||||||
)),
|
)),
|
||||||
GossipKind::NewFile => Ok(PubsubMessage::NewFile(
|
GossipKind::NewFile => Ok(PubsubMessage::NewFile(
|
||||||
NewFile::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?,
|
TimedMessage::<ShardedFile>::from_ssz_bytes(data)
|
||||||
|
.map_err(|e| format!("{:?}", e))?,
|
||||||
|
)),
|
||||||
|
GossipKind::AskFile => Ok(PubsubMessage::AskFile(
|
||||||
|
TimedMessage::<ShardedFile>::from_ssz_bytes(data)
|
||||||
|
.map_err(|e| format!("{:?}", e))?,
|
||||||
)),
|
)),
|
||||||
GossipKind::FindFile => Ok(PubsubMessage::FindFile(
|
GossipKind::FindFile => Ok(PubsubMessage::FindFile(
|
||||||
FindFile::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?,
|
TimedMessage::<FindFile>::from_ssz_bytes(data)
|
||||||
|
.map_err(|e| format!("{:?}", e))?,
|
||||||
)),
|
)),
|
||||||
GossipKind::FindChunks => Ok(PubsubMessage::FindChunks(
|
GossipKind::FindChunks => Ok(PubsubMessage::FindChunks(
|
||||||
FindChunks::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?,
|
TimedMessage::<FindChunks>::from_ssz_bytes(data)
|
||||||
|
.map_err(|e| format!("{:?}", e))?,
|
||||||
)),
|
)),
|
||||||
GossipKind::AnnounceFile => Ok(PubsubMessage::AnnounceFile(
|
GossipKind::AnnounceFile => Ok(PubsubMessage::AnnounceFile(
|
||||||
SignedAnnounceFiles::from_ssz_bytes(data)
|
SignedAnnounceFiles::from_ssz_bytes(data)
|
||||||
.map_err(|e| format!("{:?}", e))?,
|
.map_err(|e| format!("{:?}", e))?,
|
||||||
)),
|
)),
|
||||||
GossipKind::AnnounceChunks => Ok(PubsubMessage::AnnounceChunks(
|
GossipKind::AnnounceChunks => Ok(PubsubMessage::AnnounceChunks(
|
||||||
SignedAnnounceChunks::from_ssz_bytes(data)
|
TimedMessage::<AnnounceChunks>::from_ssz_bytes(data)
|
||||||
.map_err(|e| format!("{:?}", e))?,
|
.map_err(|e| format!("{:?}", e))?,
|
||||||
)),
|
)),
|
||||||
GossipKind::AnnounceShardConfig => Ok(PubsubMessage::AnnounceShardConfig(
|
GossipKind::AnnounceShardConfig => Ok(PubsubMessage::AnnounceShardConfig(
|
||||||
SignedAnnounceShardConfig::from_ssz_bytes(data)
|
TimedMessage::<ShardConfig>::from_ssz_bytes(data)
|
||||||
.map_err(|e| format!("{:?}", e))?,
|
.map_err(|e| format!("{:?}", e))?,
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
@ -360,6 +370,7 @@ impl PubsubMessage {
|
|||||||
match &self {
|
match &self {
|
||||||
PubsubMessage::ExampleMessage(data) => data.as_ssz_bytes(),
|
PubsubMessage::ExampleMessage(data) => data.as_ssz_bytes(),
|
||||||
PubsubMessage::NewFile(data) => data.as_ssz_bytes(),
|
PubsubMessage::NewFile(data) => data.as_ssz_bytes(),
|
||||||
|
PubsubMessage::AskFile(data) => data.as_ssz_bytes(),
|
||||||
PubsubMessage::FindFile(data) => data.as_ssz_bytes(),
|
PubsubMessage::FindFile(data) => data.as_ssz_bytes(),
|
||||||
PubsubMessage::FindChunks(data) => data.as_ssz_bytes(),
|
PubsubMessage::FindChunks(data) => data.as_ssz_bytes(),
|
||||||
PubsubMessage::AnnounceFile(data) => data.as_ssz_bytes(),
|
PubsubMessage::AnnounceFile(data) => data.as_ssz_bytes(),
|
||||||
@ -378,6 +389,9 @@ impl std::fmt::Display for PubsubMessage {
|
|||||||
PubsubMessage::NewFile(msg) => {
|
PubsubMessage::NewFile(msg) => {
|
||||||
write!(f, "NewFile message: {:?}", msg)
|
write!(f, "NewFile message: {:?}", msg)
|
||||||
}
|
}
|
||||||
|
PubsubMessage::AskFile(msg) => {
|
||||||
|
write!(f, "AskFile message: {:?}", msg)
|
||||||
|
}
|
||||||
PubsubMessage::FindFile(msg) => {
|
PubsubMessage::FindFile(msg) => {
|
||||||
write!(f, "FindFile message: {:?}", msg)
|
write!(f, "FindFile message: {:?}", msg)
|
||||||
}
|
}
|
||||||
|
@ -8,20 +8,13 @@ use strum::AsRefStr;
|
|||||||
pub const TOPIC_PREFIX: &str = "eth2";
|
pub const TOPIC_PREFIX: &str = "eth2";
|
||||||
pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy";
|
pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy";
|
||||||
pub const EXAMPLE_TOPIC: &str = "example";
|
pub const EXAMPLE_TOPIC: &str = "example";
|
||||||
pub const NEW_FILE_TOPIC: &str = "new_file";
|
pub const NEW_FILE_TOPIC: &str = "new_file_v2";
|
||||||
pub const FIND_FILE_TOPIC: &str = "find_file";
|
pub const ASK_FILE_TOPIC: &str = "ask_file";
|
||||||
pub const FIND_CHUNKS_TOPIC: &str = "find_chunks";
|
pub const FIND_FILE_TOPIC: &str = "find_file_v2";
|
||||||
pub const ANNOUNCE_FILE_TOPIC: &str = "announce_file";
|
pub const FIND_CHUNKS_TOPIC: &str = "find_chunks_v2";
|
||||||
pub const ANNOUNCE_CHUNKS_TOPIC: &str = "announce_chunks";
|
pub const ANNOUNCE_FILE_TOPIC: &str = "announce_file_v2";
|
||||||
pub const ANNOUNCE_SHARD_CONFIG_TOPIC: &str = "announce_shard_config";
|
pub const ANNOUNCE_CHUNKS_TOPIC: &str = "announce_chunks_v2";
|
||||||
|
pub const ANNOUNCE_SHARD_CONFIG_TOPIC: &str = "announce_shard_config_v2";
|
||||||
pub const CORE_TOPICS: [GossipKind; 5] = [
|
|
||||||
GossipKind::NewFile,
|
|
||||||
GossipKind::FindFile,
|
|
||||||
GossipKind::FindChunks,
|
|
||||||
GossipKind::AnnounceFile,
|
|
||||||
GossipKind::AnnounceChunks,
|
|
||||||
];
|
|
||||||
|
|
||||||
/// A gossipsub topic which encapsulates the type of messages that should be sent and received over
|
/// A gossipsub topic which encapsulates the type of messages that should be sent and received over
|
||||||
/// the pubsub protocol and the way the messages should be encoded.
|
/// the pubsub protocol and the way the messages should be encoded.
|
||||||
@ -40,6 +33,7 @@ pub struct GossipTopic {
|
|||||||
pub enum GossipKind {
|
pub enum GossipKind {
|
||||||
Example,
|
Example,
|
||||||
NewFile,
|
NewFile,
|
||||||
|
AskFile,
|
||||||
FindFile,
|
FindFile,
|
||||||
FindChunks,
|
FindChunks,
|
||||||
AnnounceFile,
|
AnnounceFile,
|
||||||
@ -81,6 +75,7 @@ impl GossipTopic {
|
|||||||
let kind = match topic_parts[2] {
|
let kind = match topic_parts[2] {
|
||||||
EXAMPLE_TOPIC => GossipKind::Example,
|
EXAMPLE_TOPIC => GossipKind::Example,
|
||||||
NEW_FILE_TOPIC => GossipKind::NewFile,
|
NEW_FILE_TOPIC => GossipKind::NewFile,
|
||||||
|
ASK_FILE_TOPIC => GossipKind::AskFile,
|
||||||
FIND_FILE_TOPIC => GossipKind::FindFile,
|
FIND_FILE_TOPIC => GossipKind::FindFile,
|
||||||
FIND_CHUNKS_TOPIC => GossipKind::FindChunks,
|
FIND_CHUNKS_TOPIC => GossipKind::FindChunks,
|
||||||
ANNOUNCE_FILE_TOPIC => GossipKind::AnnounceFile,
|
ANNOUNCE_FILE_TOPIC => GossipKind::AnnounceFile,
|
||||||
@ -111,6 +106,7 @@ impl From<GossipTopic> for String {
|
|||||||
let kind = match topic.kind {
|
let kind = match topic.kind {
|
||||||
GossipKind::Example => EXAMPLE_TOPIC,
|
GossipKind::Example => EXAMPLE_TOPIC,
|
||||||
GossipKind::NewFile => NEW_FILE_TOPIC,
|
GossipKind::NewFile => NEW_FILE_TOPIC,
|
||||||
|
GossipKind::AskFile => ASK_FILE_TOPIC,
|
||||||
GossipKind::FindFile => FIND_FILE_TOPIC,
|
GossipKind::FindFile => FIND_FILE_TOPIC,
|
||||||
GossipKind::FindChunks => FIND_CHUNKS_TOPIC,
|
GossipKind::FindChunks => FIND_CHUNKS_TOPIC,
|
||||||
GossipKind::AnnounceFile => ANNOUNCE_FILE_TOPIC,
|
GossipKind::AnnounceFile => ANNOUNCE_FILE_TOPIC,
|
||||||
@ -131,6 +127,7 @@ impl std::fmt::Display for GossipTopic {
|
|||||||
let kind = match self.kind {
|
let kind = match self.kind {
|
||||||
GossipKind::Example => EXAMPLE_TOPIC,
|
GossipKind::Example => EXAMPLE_TOPIC,
|
||||||
GossipKind::NewFile => NEW_FILE_TOPIC,
|
GossipKind::NewFile => NEW_FILE_TOPIC,
|
||||||
|
GossipKind::AskFile => ASK_FILE_TOPIC,
|
||||||
GossipKind::FindFile => FIND_FILE_TOPIC,
|
GossipKind::FindFile => FIND_FILE_TOPIC,
|
||||||
GossipKind::FindChunks => FIND_CHUNKS_TOPIC,
|
GossipKind::FindChunks => FIND_CHUNKS_TOPIC,
|
||||||
GossipKind::AnnounceFile => ANNOUNCE_FILE_TOPIC,
|
GossipKind::AnnounceFile => ANNOUNCE_FILE_TOPIC,
|
||||||
|
@ -54,7 +54,7 @@ impl Service {
|
|||||||
struct Ev(PeerManagerEvent);
|
struct Ev(PeerManagerEvent);
|
||||||
impl From<void::Void> for Ev {
|
impl From<void::Void> for Ev {
|
||||||
fn from(_: void::Void) -> Self {
|
fn from(_: void::Void) -> Self {
|
||||||
unreachable!("No events are emmited")
|
unreachable!("No events are emitted")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl From<PeerManagerEvent> for Ev {
|
impl From<PeerManagerEvent> for Ev {
|
||||||
|
@ -23,14 +23,10 @@ fn test_status_rpc() {
|
|||||||
let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt)).await;
|
let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt)).await;
|
||||||
|
|
||||||
// Dummy STATUS RPC message
|
// Dummy STATUS RPC message
|
||||||
let rpc_request = Request::Status(StatusMessage {
|
let rpc_request = Request::Status(Default::default());
|
||||||
data: Default::default(),
|
|
||||||
});
|
|
||||||
|
|
||||||
// Dummy STATUS RPC message
|
// Dummy STATUS RPC message
|
||||||
let rpc_response = Response::Status(StatusMessage {
|
let rpc_response = Response::Status(Default::default());
|
||||||
data: Default::default(),
|
|
||||||
});
|
|
||||||
|
|
||||||
// build the sender future
|
// build the sender future
|
||||||
let sender_future = async {
|
let sender_future = async {
|
||||||
|
@ -276,7 +276,7 @@ impl Pruner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_shard_config(store: &Store) -> Result<Option<ShardConfig>> {
|
pub async fn get_shard_config(store: &Store) -> Result<Option<ShardConfig>> {
|
||||||
store
|
store
|
||||||
.get_config_decoded(&SHARD_CONFIG_KEY, DATA_DB_KEY)
|
.get_config_decoded(&SHARD_CONFIG_KEY, DATA_DB_KEY)
|
||||||
.await
|
.await
|
||||||
|
@ -5,6 +5,7 @@ mod batcher;
|
|||||||
mod libp2p_event_handler;
|
mod libp2p_event_handler;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
mod peer_manager;
|
mod peer_manager;
|
||||||
|
mod rate_limit;
|
||||||
mod service;
|
mod service;
|
||||||
|
|
||||||
use duration_str::deserialize_duration;
|
use duration_str::deserialize_duration;
|
||||||
|
@ -5,19 +5,18 @@ use std::{ops::Neg, sync::Arc};
|
|||||||
use chunk_pool::ChunkPoolMessage;
|
use chunk_pool::ChunkPoolMessage;
|
||||||
use file_location_cache::FileLocationCache;
|
use file_location_cache::FileLocationCache;
|
||||||
use network::multiaddr::Protocol;
|
use network::multiaddr::Protocol;
|
||||||
use network::rpc::methods::FileAnnouncement;
|
use network::types::TimedMessage;
|
||||||
use network::types::{AnnounceShardConfig, NewFile, SignedAnnounceShardConfig};
|
|
||||||
use network::{
|
use network::{
|
||||||
rpc::StatusMessage,
|
rpc::StatusMessage,
|
||||||
types::{
|
types::{
|
||||||
AnnounceChunks, AnnounceFile, FindChunks, FindFile, HasSignature, SignedAnnounceChunks,
|
AnnounceChunks, AnnounceFile, FindChunks, FindFile, HasSignature, SignedAnnounceFile,
|
||||||
SignedAnnounceFile, SignedMessage,
|
SignedMessage,
|
||||||
},
|
},
|
||||||
Keypair, MessageAcceptance, MessageId, NetworkGlobals, NetworkMessage, PeerId, PeerRequestId,
|
Keypair, MessageAcceptance, MessageId, NetworkGlobals, NetworkMessage, PeerId, PeerRequestId,
|
||||||
PublicKey, PubsubMessage, Request, RequestId, Response,
|
PublicKey, PubsubMessage, Request, RequestId, Response,
|
||||||
};
|
};
|
||||||
use network::{Multiaddr, NetworkSender, PeerAction, ReportSource};
|
use network::{Multiaddr, NetworkSender, PeerAction, ReportSource};
|
||||||
use shared_types::{bytes_to_chunks, timestamp_now, NetworkIdentity, TxID};
|
use shared_types::{bytes_to_chunks, timestamp_now, NetworkIdentity, ShardedFile, TxID};
|
||||||
use storage::config::ShardConfig;
|
use storage::config::ShardConfig;
|
||||||
use storage_async::Store;
|
use storage_async::Store;
|
||||||
use sync::{SyncMessage, SyncSender};
|
use sync::{SyncMessage, SyncSender};
|
||||||
@ -25,36 +24,64 @@ use tokio::sync::mpsc::UnboundedSender;
|
|||||||
use tokio::sync::{mpsc, RwLock};
|
use tokio::sync::{mpsc, RwLock};
|
||||||
|
|
||||||
use crate::batcher::Batcher;
|
use crate::batcher::Batcher;
|
||||||
use crate::metrics;
|
use crate::metrics::{self, PubsubMsgHandleMetrics};
|
||||||
use crate::peer_manager::PeerManager;
|
use crate::peer_manager::PeerManager;
|
||||||
use crate::Config;
|
use crate::Config;
|
||||||
|
|
||||||
lazy_static::lazy_static! {
|
lazy_static::lazy_static! {
|
||||||
/// Timeout to publish NewFile message to neighbor nodes.
|
/// Timeout to publish message to neighbor nodes.
|
||||||
pub static ref NEW_FILE_TIMEOUT: chrono::Duration = chrono::Duration::seconds(30);
|
pub static ref PUBSUB_TIMEOUT_NEIGHBORS: chrono::Duration = chrono::Duration::seconds(30);
|
||||||
/// Timeout to publish FindFile message to neighbor nodes.
|
/// Timeout to publish message to network.
|
||||||
pub static ref FIND_FILE_NEIGHBORS_TIMEOUT: chrono::Duration = chrono::Duration::seconds(30);
|
pub static ref PUBSUB_TIMEOUT_NETWORK: chrono::Duration = chrono::Duration::minutes(5);
|
||||||
/// Timeout to publish FindFile message in the whole network.
|
|
||||||
pub static ref FIND_FILE_TIMEOUT: chrono::Duration = chrono::Duration::minutes(5);
|
|
||||||
pub static ref ANNOUNCE_FILE_TIMEOUT: chrono::Duration = chrono::Duration::minutes(5);
|
|
||||||
pub static ref ANNOUNCE_SHARD_CONFIG_TIMEOUT: chrono::Duration = chrono::Duration::minutes(5);
|
|
||||||
pub static ref TOLERABLE_DRIFT: chrono::Duration = chrono::Duration::seconds(10);
|
pub static ref TOLERABLE_DRIFT: chrono::Duration = chrono::Duration::seconds(10);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn duration_since(timestamp: u32, metric: Arc<dyn ::metrics::Histogram>) -> chrono::Duration {
|
fn duration_since(timestamp: u32, latency_ms: Arc<dyn ::metrics::Histogram>) -> chrono::Duration {
|
||||||
let timestamp = i64::from(timestamp);
|
let timestamp = i64::from(timestamp);
|
||||||
let timestamp = chrono::DateTime::from_timestamp(timestamp, 0).expect("should fit");
|
let timestamp = chrono::DateTime::from_timestamp(timestamp, 0).expect("should fit");
|
||||||
let now = chrono::Utc::now();
|
let now = chrono::Utc::now();
|
||||||
let duration = now.signed_duration_since(timestamp);
|
let duration = now.signed_duration_since(timestamp);
|
||||||
|
|
||||||
let num_secs = duration.num_seconds();
|
let num_millis = duration.num_milliseconds();
|
||||||
if num_secs > 0 {
|
if num_millis > 0 {
|
||||||
metric.update(num_secs as u64);
|
latency_ms.update(num_millis as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
duration
|
duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl PubsubMsgHandleMetrics {
|
||||||
|
pub fn verify_timestamp(
|
||||||
|
&self,
|
||||||
|
from: PeerId,
|
||||||
|
timestamp: u32,
|
||||||
|
timeout: chrono::Duration,
|
||||||
|
sender: Option<&NetworkSender>,
|
||||||
|
) -> bool {
|
||||||
|
self.qps.mark(1);
|
||||||
|
|
||||||
|
let d = duration_since(timestamp, self.latency_ms.clone());
|
||||||
|
if d >= TOLERABLE_DRIFT.neg() && d <= timeout {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!(%from, ?timestamp, ?d, topic=%self.topic_name, "Ignore out of date pubsub message");
|
||||||
|
|
||||||
|
self.timeout.mark(1);
|
||||||
|
|
||||||
|
if let Some(sender) = sender {
|
||||||
|
let _ = sender.send(NetworkMessage::ReportPeer {
|
||||||
|
peer_id: from,
|
||||||
|
action: PeerAction::LowToleranceError,
|
||||||
|
source: ReportSource::Gossipsub,
|
||||||
|
msg: "Received out of date pubsub message",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn peer_id_to_public_key(peer_id: &PeerId) -> Result<PublicKey, String> {
|
fn peer_id_to_public_key(peer_id: &PeerId) -> Result<PublicKey, String> {
|
||||||
// A libp2p peer id byte representation should be 2 length bytes + 4 protobuf bytes + compressed pk bytes
|
// A libp2p peer id byte representation should be 2 length bytes + 4 protobuf bytes + compressed pk bytes
|
||||||
// if generated from a PublicKey with Identity multihash.
|
// if generated from a PublicKey with Identity multihash.
|
||||||
@ -147,7 +174,7 @@ impl Libp2pEventHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn send_to_network(&self, message: NetworkMessage) {
|
pub fn send_to_network(&self, message: NetworkMessage) {
|
||||||
self.network_send.send(message).unwrap_or_else(|err| {
|
self.network_send.send(message).unwrap_or_else(|err| {
|
||||||
warn!(%err, "Could not send message to the network service");
|
warn!(%err, "Could not send message to the network service");
|
||||||
});
|
});
|
||||||
@ -172,8 +199,11 @@ impl Libp2pEventHandler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn send_status(&self, peer_id: PeerId) {
|
pub fn send_status(&self, peer_id: PeerId) {
|
||||||
|
let shard_config = self.store.get_store().get_shard_config();
|
||||||
let status_message = StatusMessage {
|
let status_message = StatusMessage {
|
||||||
data: self.network_globals.network_id(),
|
data: self.network_globals.network_id(),
|
||||||
|
num_shard: shard_config.num_shard,
|
||||||
|
shard_id: shard_config.shard_id,
|
||||||
};
|
};
|
||||||
debug!(%peer_id, ?status_message, "Sending Status request");
|
debug!(%peer_id, ?status_message, "Sending Status request");
|
||||||
|
|
||||||
@ -191,7 +221,6 @@ impl Libp2pEventHandler {
|
|||||||
|
|
||||||
if outgoing {
|
if outgoing {
|
||||||
self.send_status(peer_id);
|
self.send_status(peer_id);
|
||||||
self.send_to_sync(SyncMessage::PeerConnected { peer_id });
|
|
||||||
metrics::LIBP2P_HANDLE_PEER_CONNECTED_OUTGOING.mark(1);
|
metrics::LIBP2P_HANDLE_PEER_CONNECTED_OUTGOING.mark(1);
|
||||||
} else {
|
} else {
|
||||||
metrics::LIBP2P_HANDLE_PEER_CONNECTED_INCOMING.mark(1);
|
metrics::LIBP2P_HANDLE_PEER_CONNECTED_INCOMING.mark(1);
|
||||||
@ -225,25 +254,19 @@ impl Libp2pEventHandler {
|
|||||||
});
|
});
|
||||||
metrics::LIBP2P_HANDLE_GET_CHUNKS_REQUEST.mark(1);
|
metrics::LIBP2P_HANDLE_GET_CHUNKS_REQUEST.mark(1);
|
||||||
}
|
}
|
||||||
Request::AnnounceFile(announcement) => {
|
Request::AnswerFile(file) => match ShardConfig::try_from(file.shard_config) {
|
||||||
match ShardConfig::new(announcement.shard_id, announcement.num_shard) {
|
Ok(v) => {
|
||||||
Ok(v) => {
|
self.file_location_cache.insert_peer_config(peer_id, v);
|
||||||
self.file_location_cache.insert_peer_config(peer_id, v);
|
|
||||||
|
|
||||||
self.send_to_sync(SyncMessage::AnnounceFile {
|
self.send_to_sync(SyncMessage::AnswerFile { peer_id, file });
|
||||||
peer_id,
|
|
||||||
request_id,
|
|
||||||
announcement,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Err(_) => self.send_to_network(NetworkMessage::ReportPeer {
|
|
||||||
peer_id,
|
|
||||||
action: PeerAction::Fatal,
|
|
||||||
source: ReportSource::RPC,
|
|
||||||
msg: "Invalid shard config in AnnounceFile RPC message",
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
}
|
Err(_) => self.send_to_network(NetworkMessage::ReportPeer {
|
||||||
|
peer_id,
|
||||||
|
action: PeerAction::Fatal,
|
||||||
|
source: ReportSource::RPC,
|
||||||
|
msg: "Invalid shard config in AnswerFile RPC message",
|
||||||
|
}),
|
||||||
|
},
|
||||||
Request::DataByHash(_) => {
|
Request::DataByHash(_) => {
|
||||||
// ignore
|
// ignore
|
||||||
}
|
}
|
||||||
@ -254,8 +277,11 @@ impl Libp2pEventHandler {
|
|||||||
debug!(%peer_id, ?status, "Received Status request");
|
debug!(%peer_id, ?status, "Received Status request");
|
||||||
|
|
||||||
let network_id = self.network_globals.network_id();
|
let network_id = self.network_globals.network_id();
|
||||||
|
let shard_config = self.store.get_store().get_shard_config();
|
||||||
let status_message = StatusMessage {
|
let status_message = StatusMessage {
|
||||||
data: network_id.clone(),
|
data: network_id.clone(),
|
||||||
|
num_shard: shard_config.num_shard,
|
||||||
|
shard_id: shard_config.shard_id,
|
||||||
};
|
};
|
||||||
debug!(%peer_id, ?status_message, "Sending Status response");
|
debug!(%peer_id, ?status_message, "Sending Status response");
|
||||||
|
|
||||||
@ -264,12 +290,18 @@ impl Libp2pEventHandler {
|
|||||||
id: request_id,
|
id: request_id,
|
||||||
response: Response::Status(status_message),
|
response: Response::Status(status_message),
|
||||||
});
|
});
|
||||||
self.on_status_message(peer_id, status, network_id);
|
|
||||||
|
if self.verify_status_message(peer_id, status, network_id, &shard_config) {
|
||||||
|
self.send_to_sync(SyncMessage::PeerConnected { peer_id });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_status_response(&self, peer_id: PeerId, status: StatusMessage) {
|
fn on_status_response(&self, peer_id: PeerId, status: StatusMessage) {
|
||||||
let network_id = self.network_globals.network_id();
|
let network_id = self.network_globals.network_id();
|
||||||
self.on_status_message(peer_id, status, network_id);
|
let shard_config = self.store.get_store().get_shard_config();
|
||||||
|
if self.verify_status_message(peer_id, status, network_id, &shard_config) {
|
||||||
|
self.send_to_sync(SyncMessage::PeerConnected { peer_id });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn on_rpc_response(
|
pub async fn on_rpc_response(
|
||||||
@ -341,18 +373,10 @@ impl Libp2pEventHandler {
|
|||||||
|
|
||||||
match message {
|
match message {
|
||||||
PubsubMessage::ExampleMessage(_) => MessageAcceptance::Ignore,
|
PubsubMessage::ExampleMessage(_) => MessageAcceptance::Ignore,
|
||||||
PubsubMessage::NewFile(msg) => {
|
PubsubMessage::NewFile(msg) => self.on_new_file(propagation_source, msg).await,
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_NEW_FILE.mark(1);
|
PubsubMessage::AskFile(msg) => self.on_ask_file(propagation_source, msg).await,
|
||||||
self.on_new_file(propagation_source, msg).await
|
PubsubMessage::FindFile(msg) => self.on_find_file(propagation_source, msg).await,
|
||||||
}
|
PubsubMessage::FindChunks(msg) => self.on_find_chunks(propagation_source, msg).await,
|
||||||
PubsubMessage::FindFile(msg) => {
|
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE.mark(1);
|
|
||||||
self.on_find_file(propagation_source, msg).await
|
|
||||||
}
|
|
||||||
PubsubMessage::FindChunks(msg) => {
|
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_FIND_CHUNKS.mark(1);
|
|
||||||
self.on_find_chunks(msg).await
|
|
||||||
}
|
|
||||||
PubsubMessage::AnnounceFile(msgs) => {
|
PubsubMessage::AnnounceFile(msgs) => {
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE.mark(1);
|
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE.mark(1);
|
||||||
|
|
||||||
@ -366,38 +390,27 @@ impl Libp2pEventHandler {
|
|||||||
|
|
||||||
MessageAcceptance::Accept
|
MessageAcceptance::Accept
|
||||||
}
|
}
|
||||||
PubsubMessage::AnnounceChunks(msg) => {
|
PubsubMessage::AnnounceChunks(msg) => self.on_announce_chunks(propagation_source, msg),
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_CHUNKS.mark(1);
|
|
||||||
self.on_announce_chunks(propagation_source, msg)
|
|
||||||
}
|
|
||||||
PubsubMessage::AnnounceShardConfig(msg) => {
|
PubsubMessage::AnnounceShardConfig(msg) => {
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_SHARD.mark(1);
|
self.on_announce_shard_config(propagation_source, source, msg)
|
||||||
self.on_announce_shard_config(propagation_source, msg)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle NewFile pubsub message `msg` that published by `from` peer.
|
/// Handle NewFile pubsub message `msg` that published by `from` peer.
|
||||||
async fn on_new_file(&self, from: PeerId, msg: NewFile) -> MessageAcceptance {
|
async fn on_new_file(&self, from: PeerId, msg: TimedMessage<ShardedFile>) -> MessageAcceptance {
|
||||||
// verify timestamp
|
// verify timestamp
|
||||||
let d = duration_since(
|
if !metrics::LIBP2P_HANDLE_PUBSUB_NEW_FILE.verify_timestamp(
|
||||||
|
from,
|
||||||
msg.timestamp,
|
msg.timestamp,
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_NEW_FILE_LATENCY.clone(),
|
*PUBSUB_TIMEOUT_NEIGHBORS,
|
||||||
);
|
Some(&self.network_send),
|
||||||
if d < TOLERABLE_DRIFT.neg() || d > *NEW_FILE_TIMEOUT {
|
) {
|
||||||
debug!(?d, ?msg, "Invalid timestamp, ignoring NewFile message");
|
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_NEW_FILE_TIMEOUT.mark(1);
|
|
||||||
self.send_to_network(NetworkMessage::ReportPeer {
|
|
||||||
peer_id: from,
|
|
||||||
action: PeerAction::LowToleranceError,
|
|
||||||
source: ReportSource::Gossipsub,
|
|
||||||
msg: "Received out of date NewFile message",
|
|
||||||
});
|
|
||||||
return MessageAcceptance::Ignore;
|
return MessageAcceptance::Ignore;
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify announced shard config
|
// verify announced shard config
|
||||||
let announced_shard_config = match ShardConfig::new(msg.shard_id, msg.num_shard) {
|
let announced_shard_config = match ShardConfig::try_from(msg.shard_config) {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(_) => return MessageAcceptance::Reject,
|
Err(_) => return MessageAcceptance::Reject,
|
||||||
};
|
};
|
||||||
@ -408,28 +421,65 @@ impl Libp2pEventHandler {
|
|||||||
return MessageAcceptance::Ignore;
|
return MessageAcceptance::Ignore;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ignore if already exists
|
// ignore if already pruned or exists
|
||||||
match self.store.check_tx_completed(msg.tx_id.seq).await {
|
match self.store.get_store().get_tx_status(msg.tx_id.seq) {
|
||||||
Ok(true) => return MessageAcceptance::Ignore,
|
Ok(Some(_)) => return MessageAcceptance::Ignore,
|
||||||
Ok(false) => {}
|
Ok(None) => {}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!(?err, tx_seq = %msg.tx_id.seq, "Failed to check tx completed");
|
warn!(?err, tx_seq = %msg.tx_id.seq, "Failed to get tx status");
|
||||||
return MessageAcceptance::Ignore;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ignore if already pruned
|
|
||||||
match self.store.check_tx_pruned(msg.tx_id.seq).await {
|
|
||||||
Ok(true) => return MessageAcceptance::Ignore,
|
|
||||||
Ok(false) => {}
|
|
||||||
Err(err) => {
|
|
||||||
warn!(?err, tx_seq = %msg.tx_id.seq, "Failed to check tx pruned");
|
|
||||||
return MessageAcceptance::Ignore;
|
return MessageAcceptance::Ignore;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// notify sync layer to handle in advance
|
// notify sync layer to handle in advance
|
||||||
self.send_to_sync(SyncMessage::NewFile { from, msg });
|
self.send_to_sync(SyncMessage::NewFile {
|
||||||
|
from,
|
||||||
|
file: msg.inner,
|
||||||
|
});
|
||||||
|
|
||||||
|
MessageAcceptance::Ignore
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn on_ask_file(&self, from: PeerId, msg: TimedMessage<ShardedFile>) -> MessageAcceptance {
|
||||||
|
// verify timestamp
|
||||||
|
if !metrics::LIBP2P_HANDLE_PUBSUB_ASK_FILE.verify_timestamp(
|
||||||
|
from,
|
||||||
|
msg.timestamp,
|
||||||
|
*PUBSUB_TIMEOUT_NEIGHBORS,
|
||||||
|
Some(&self.network_send),
|
||||||
|
) {
|
||||||
|
return MessageAcceptance::Ignore;
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify announced shard config
|
||||||
|
let announced_shard_config = match ShardConfig::try_from(msg.shard_config) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(_) => return MessageAcceptance::Reject,
|
||||||
|
};
|
||||||
|
|
||||||
|
// handle on shard config mismatch
|
||||||
|
let my_shard_config = self.store.get_store().get_shard_config();
|
||||||
|
if !my_shard_config.intersect(&announced_shard_config) {
|
||||||
|
return MessageAcceptance::Ignore;
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if we have it
|
||||||
|
if matches!(self.store.check_tx_completed(msg.tx_id.seq).await, Ok(true)) {
|
||||||
|
if let Ok(Some(tx)) = self.store.get_tx_by_seq_number(msg.tx_id.seq).await {
|
||||||
|
if tx.id() == msg.tx_id {
|
||||||
|
trace!(?msg.tx_id, "Found file locally, responding to FindFile query");
|
||||||
|
|
||||||
|
self.send_to_network(NetworkMessage::SendRequest {
|
||||||
|
peer_id: from,
|
||||||
|
request: Request::AnswerFile(ShardedFile {
|
||||||
|
tx_id: msg.tx_id,
|
||||||
|
shard_config: my_shard_config.into(),
|
||||||
|
}),
|
||||||
|
request_id: RequestId::Router(Instant::now()),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
MessageAcceptance::Ignore
|
MessageAcceptance::Ignore
|
||||||
}
|
}
|
||||||
@ -520,12 +570,13 @@ impl Libp2pEventHandler {
|
|||||||
let timestamp = timestamp_now();
|
let timestamp = timestamp_now();
|
||||||
let shard_config = self.store.get_store().get_shard_config();
|
let shard_config = self.store.get_store().get_shard_config();
|
||||||
|
|
||||||
let msg = AnnounceFile {
|
let msg = TimedMessage {
|
||||||
tx_ids,
|
inner: AnnounceFile {
|
||||||
num_shard: shard_config.num_shard,
|
tx_ids,
|
||||||
shard_id: shard_config.shard_id,
|
shard_config: shard_config.into(),
|
||||||
peer_id: peer_id.into(),
|
peer_id: peer_id.into(),
|
||||||
at: addr.into(),
|
at: addr.into(),
|
||||||
|
},
|
||||||
timestamp,
|
timestamp,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -542,110 +593,44 @@ impl Libp2pEventHandler {
|
|||||||
Some(signed)
|
Some(signed)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn construct_announce_shard_config_message(
|
async fn on_find_file(&self, from: PeerId, msg: TimedMessage<FindFile>) -> MessageAcceptance {
|
||||||
&self,
|
|
||||||
shard_config: ShardConfig,
|
|
||||||
) -> Option<PubsubMessage> {
|
|
||||||
let peer_id = *self.network_globals.peer_id.read();
|
|
||||||
let addr = self.construct_announced_ip().await?;
|
|
||||||
let timestamp = timestamp_now();
|
|
||||||
|
|
||||||
let msg = AnnounceShardConfig {
|
|
||||||
num_shard: shard_config.num_shard,
|
|
||||||
shard_id: shard_config.shard_id,
|
|
||||||
peer_id: peer_id.into(),
|
|
||||||
at: addr.into(),
|
|
||||||
timestamp,
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut signed = match SignedMessage::sign_message(msg, &self.local_keypair) {
|
|
||||||
Ok(signed) => signed,
|
|
||||||
Err(e) => {
|
|
||||||
error!(%e, "Failed to sign AnnounceShardConfig message");
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
signed.resend_timestamp = timestamp;
|
|
||||||
|
|
||||||
Some(PubsubMessage::AnnounceShardConfig(signed))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn on_find_file(&self, from: PeerId, msg: FindFile) -> MessageAcceptance {
|
|
||||||
let FindFile {
|
|
||||||
tx_id, timestamp, ..
|
|
||||||
} = msg;
|
|
||||||
|
|
||||||
// verify timestamp
|
// verify timestamp
|
||||||
let d = duration_since(
|
if !metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE.verify_timestamp(
|
||||||
timestamp,
|
from,
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_LATENCY.clone(),
|
msg.timestamp,
|
||||||
);
|
*PUBSUB_TIMEOUT_NETWORK,
|
||||||
let timeout = if msg.neighbors_only {
|
None,
|
||||||
*FIND_FILE_NEIGHBORS_TIMEOUT
|
) {
|
||||||
} else {
|
|
||||||
*FIND_FILE_TIMEOUT
|
|
||||||
};
|
|
||||||
if d < TOLERABLE_DRIFT.neg() || d > timeout {
|
|
||||||
debug!(%timestamp, ?d, "Invalid timestamp, ignoring FindFile message");
|
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_TIMEOUT.mark(1);
|
|
||||||
if msg.neighbors_only {
|
|
||||||
self.send_to_network(NetworkMessage::ReportPeer {
|
|
||||||
peer_id: from,
|
|
||||||
action: PeerAction::LowToleranceError,
|
|
||||||
source: ReportSource::Gossipsub,
|
|
||||||
msg: "Received out of date FindFile message",
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return MessageAcceptance::Ignore;
|
return MessageAcceptance::Ignore;
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify announced shard config
|
// verify announced shard config if specified
|
||||||
let announced_shard_config = match ShardConfig::new(msg.shard_id, msg.num_shard) {
|
if let Some(shard_config) = msg.maybe_shard_config {
|
||||||
Ok(v) => v,
|
let announced_shard_config = match ShardConfig::try_from(shard_config) {
|
||||||
Err(_) => return MessageAcceptance::Reject,
|
Ok(v) => v,
|
||||||
};
|
Err(_) => return MessageAcceptance::Reject,
|
||||||
|
|
||||||
// handle on shard config mismatch
|
|
||||||
let my_shard_config = self.store.get_store().get_shard_config();
|
|
||||||
if !my_shard_config.intersect(&announced_shard_config) {
|
|
||||||
return if msg.neighbors_only {
|
|
||||||
MessageAcceptance::Ignore
|
|
||||||
} else {
|
|
||||||
MessageAcceptance::Accept
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// forward FIND_FILE to the network if shard config mismatch
|
||||||
|
let my_shard_config = self.store.get_store().get_shard_config();
|
||||||
|
if !my_shard_config.intersect(&announced_shard_config) {
|
||||||
|
return MessageAcceptance::Accept;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if we have it
|
// check if we have it
|
||||||
|
let tx_id = msg.tx_id;
|
||||||
if matches!(self.store.check_tx_completed(tx_id.seq).await, Ok(true)) {
|
if matches!(self.store.check_tx_completed(tx_id.seq).await, Ok(true)) {
|
||||||
if let Ok(Some(tx)) = self.store.get_tx_by_seq_number(tx_id.seq).await {
|
if let Ok(Some(tx)) = self.store.get_tx_by_seq_number(tx_id.seq).await {
|
||||||
if tx.id() == tx_id {
|
if tx.id() == tx_id {
|
||||||
trace!(?tx_id, "Found file locally, responding to FindFile query");
|
trace!(?tx_id, "Found file locally, responding to FindFile query");
|
||||||
|
self.publish_file(tx_id).await;
|
||||||
if msg.neighbors_only {
|
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_STORE.mark(1);
|
||||||
// announce file via RPC to avoid flooding pubsub message
|
return MessageAcceptance::Ignore;
|
||||||
self.send_to_network(NetworkMessage::SendRequest {
|
|
||||||
peer_id: from,
|
|
||||||
request: Request::AnnounceFile(FileAnnouncement {
|
|
||||||
tx_id,
|
|
||||||
num_shard: my_shard_config.num_shard,
|
|
||||||
shard_id: my_shard_config.shard_id,
|
|
||||||
}),
|
|
||||||
request_id: RequestId::Router(Instant::now()),
|
|
||||||
});
|
|
||||||
} else if self.publish_file(tx_id).await.is_some() {
|
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_STORE.mark(1);
|
|
||||||
return MessageAcceptance::Ignore;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// do not forward to whole network if only find file from neighbor nodes
|
|
||||||
if msg.neighbors_only {
|
|
||||||
return MessageAcceptance::Ignore;
|
|
||||||
}
|
|
||||||
|
|
||||||
// try from cache
|
// try from cache
|
||||||
if let Some(mut msg) = self.file_location_cache.get_one(tx_id) {
|
if let Some(mut msg) = self.file_location_cache.get_one(tx_id) {
|
||||||
trace!(?tx_id, "Found file in cache, responding to FindFile query");
|
trace!(?tx_id, "Found file in cache, responding to FindFile query");
|
||||||
@ -671,7 +656,6 @@ impl Libp2pEventHandler {
|
|||||||
) -> Option<PubsubMessage> {
|
) -> Option<PubsubMessage> {
|
||||||
let peer_id = *self.network_globals.peer_id.read();
|
let peer_id = *self.network_globals.peer_id.read();
|
||||||
let addr = self.construct_announced_ip().await?;
|
let addr = self.construct_announced_ip().await?;
|
||||||
let timestamp = timestamp_now();
|
|
||||||
|
|
||||||
let msg = AnnounceChunks {
|
let msg = AnnounceChunks {
|
||||||
tx_id,
|
tx_id,
|
||||||
@ -679,39 +663,32 @@ impl Libp2pEventHandler {
|
|||||||
index_end,
|
index_end,
|
||||||
peer_id: peer_id.into(),
|
peer_id: peer_id.into(),
|
||||||
at: addr.into(),
|
at: addr.into(),
|
||||||
timestamp,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut signed = match SignedMessage::sign_message(msg, &self.local_keypair) {
|
Some(PubsubMessage::AnnounceChunks(msg.into()))
|
||||||
Ok(signed) => signed,
|
|
||||||
Err(e) => {
|
|
||||||
error!(%tx_id.seq, %e, "Failed to sign AnnounceChunks message");
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
signed.resend_timestamp = timestamp;
|
|
||||||
|
|
||||||
Some(PubsubMessage::AnnounceChunks(signed))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn on_find_chunks(&self, msg: FindChunks) -> MessageAcceptance {
|
async fn on_find_chunks(
|
||||||
|
&self,
|
||||||
|
propagation_source: PeerId,
|
||||||
|
msg: TimedMessage<FindChunks>,
|
||||||
|
) -> MessageAcceptance {
|
||||||
|
// verify timestamp
|
||||||
|
if !metrics::LIBP2P_HANDLE_PUBSUB_FIND_CHUNKS.verify_timestamp(
|
||||||
|
propagation_source,
|
||||||
|
msg.timestamp,
|
||||||
|
*PUBSUB_TIMEOUT_NETWORK,
|
||||||
|
None,
|
||||||
|
) {
|
||||||
|
return MessageAcceptance::Ignore;
|
||||||
|
}
|
||||||
|
|
||||||
// validate message
|
// validate message
|
||||||
if msg.index_start >= msg.index_end {
|
if msg.index_start >= msg.index_end {
|
||||||
debug!(?msg, "Invalid chunk index range");
|
debug!(?msg, "Invalid chunk index range");
|
||||||
return MessageAcceptance::Reject;
|
return MessageAcceptance::Reject;
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify timestamp
|
|
||||||
let d = duration_since(
|
|
||||||
msg.timestamp,
|
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_FIND_CHUNKS_LATENCY.clone(),
|
|
||||||
);
|
|
||||||
if d < TOLERABLE_DRIFT.neg() || d > *FIND_FILE_TIMEOUT {
|
|
||||||
debug!(%msg.timestamp, ?d, "Invalid timestamp, ignoring FindChunks message");
|
|
||||||
return MessageAcceptance::Ignore;
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if we have specified chunks even file not finalized yet
|
// check if we have specified chunks even file not finalized yet
|
||||||
// validate end index
|
// validate end index
|
||||||
let tx = match self.store.get_tx_by_seq_number(msg.tx_id.seq).await {
|
let tx = match self.store.get_tx_by_seq_number(msg.tx_id.seq).await {
|
||||||
@ -824,7 +801,7 @@ impl Libp2pEventHandler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// verify announced shard config
|
// verify announced shard config
|
||||||
let announced_shard_config = match ShardConfig::new(msg.shard_id, msg.num_shard) {
|
let announced_shard_config = match ShardConfig::try_from(msg.shard_config) {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(_) => return MessageAcceptance::Reject,
|
Err(_) => return MessageAcceptance::Reject,
|
||||||
};
|
};
|
||||||
@ -834,8 +811,8 @@ impl Libp2pEventHandler {
|
|||||||
msg.resend_timestamp,
|
msg.resend_timestamp,
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_LATENCY.clone(),
|
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_LATENCY.clone(),
|
||||||
);
|
);
|
||||||
if d < TOLERABLE_DRIFT.neg() || d > *ANNOUNCE_FILE_TIMEOUT {
|
if d < TOLERABLE_DRIFT.neg() || d > *PUBSUB_TIMEOUT_NETWORK {
|
||||||
debug!(%msg.resend_timestamp, ?d, "Invalid resend timestamp, ignoring AnnounceFile message");
|
debug!(?d, %propagation_source, "Invalid resend timestamp, ignoring AnnounceFile message");
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_TIMEOUT.mark(1);
|
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_TIMEOUT.mark(1);
|
||||||
return MessageAcceptance::Ignore;
|
return MessageAcceptance::Ignore;
|
||||||
}
|
}
|
||||||
@ -861,51 +838,33 @@ impl Libp2pEventHandler {
|
|||||||
fn on_announce_shard_config(
|
fn on_announce_shard_config(
|
||||||
&self,
|
&self,
|
||||||
propagation_source: PeerId,
|
propagation_source: PeerId,
|
||||||
msg: SignedAnnounceShardConfig,
|
source: PeerId,
|
||||||
|
msg: TimedMessage<shared_types::ShardConfig>,
|
||||||
) -> MessageAcceptance {
|
) -> MessageAcceptance {
|
||||||
// verify message signature
|
// validate timestamp
|
||||||
if !verify_signature(&msg, &msg.peer_id, propagation_source) {
|
if !metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_SHARD.verify_timestamp(
|
||||||
return MessageAcceptance::Reject;
|
propagation_source,
|
||||||
}
|
msg.timestamp,
|
||||||
|
*PUBSUB_TIMEOUT_NETWORK,
|
||||||
// verify public ip address if required
|
None,
|
||||||
let addr = msg.at.clone().into();
|
) {
|
||||||
if !self.config.private_ip_enabled && !Self::contains_public_ip(&addr) {
|
|
||||||
return MessageAcceptance::Reject;
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify announced ip address if required
|
|
||||||
if !self.config.private_ip_enabled
|
|
||||||
&& self.config.check_announced_ip
|
|
||||||
&& !self.verify_announced_address(&msg.peer_id, &addr)
|
|
||||||
{
|
|
||||||
return MessageAcceptance::Reject;
|
|
||||||
}
|
|
||||||
|
|
||||||
// propagate gossip to peers
|
|
||||||
let d = duration_since(
|
|
||||||
msg.resend_timestamp,
|
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_SHARD_LATENCY.clone(),
|
|
||||||
);
|
|
||||||
if d < TOLERABLE_DRIFT.neg() || d > *ANNOUNCE_SHARD_CONFIG_TIMEOUT {
|
|
||||||
debug!(%msg.resend_timestamp, ?d, "Invalid resend timestamp, ignoring AnnounceShardConfig message");
|
|
||||||
return MessageAcceptance::Ignore;
|
return MessageAcceptance::Ignore;
|
||||||
}
|
}
|
||||||
|
|
||||||
let shard_config = ShardConfig {
|
let shard_config = match ShardConfig::try_from(msg.inner) {
|
||||||
shard_id: msg.shard_id,
|
Ok(v) => v,
|
||||||
num_shard: msg.num_shard,
|
Err(_) => return MessageAcceptance::Reject,
|
||||||
};
|
};
|
||||||
// notify sync layer
|
|
||||||
self.send_to_sync(SyncMessage::AnnounceShardConfig {
|
|
||||||
shard_config,
|
|
||||||
peer_id: msg.peer_id.clone().into(),
|
|
||||||
addr,
|
|
||||||
});
|
|
||||||
|
|
||||||
// insert message to cache
|
// insert message to cache
|
||||||
self.file_location_cache
|
self.file_location_cache
|
||||||
.insert_peer_config(msg.peer_id.clone().into(), shard_config);
|
.insert_peer_config(source, shard_config);
|
||||||
|
|
||||||
|
// notify sync layer
|
||||||
|
self.send_to_sync(SyncMessage::AnnounceShardConfig {
|
||||||
|
shard_config,
|
||||||
|
peer_id: source,
|
||||||
|
});
|
||||||
|
|
||||||
MessageAcceptance::Accept
|
MessageAcceptance::Accept
|
||||||
}
|
}
|
||||||
@ -913,11 +872,16 @@ impl Libp2pEventHandler {
|
|||||||
fn on_announce_chunks(
|
fn on_announce_chunks(
|
||||||
&self,
|
&self,
|
||||||
propagation_source: PeerId,
|
propagation_source: PeerId,
|
||||||
msg: SignedAnnounceChunks,
|
msg: TimedMessage<AnnounceChunks>,
|
||||||
) -> MessageAcceptance {
|
) -> MessageAcceptance {
|
||||||
// verify message signature
|
// verify timestamp
|
||||||
if !verify_signature(&msg, &msg.peer_id, propagation_source) {
|
if !metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_CHUNKS.verify_timestamp(
|
||||||
return MessageAcceptance::Reject;
|
propagation_source,
|
||||||
|
msg.timestamp,
|
||||||
|
*PUBSUB_TIMEOUT_NETWORK,
|
||||||
|
None,
|
||||||
|
) {
|
||||||
|
return MessageAcceptance::Ignore;
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify public ip address if required
|
// verify public ip address if required
|
||||||
@ -934,56 +898,73 @@ impl Libp2pEventHandler {
|
|||||||
return MessageAcceptance::Reject;
|
return MessageAcceptance::Reject;
|
||||||
}
|
}
|
||||||
|
|
||||||
// propagate gossip to peers
|
|
||||||
let d = duration_since(
|
|
||||||
msg.resend_timestamp,
|
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_CHUNKS_LATENCY.clone(),
|
|
||||||
);
|
|
||||||
if d < TOLERABLE_DRIFT.neg() || d > *ANNOUNCE_FILE_TIMEOUT {
|
|
||||||
debug!(%msg.resend_timestamp, ?d, "Invalid resend timestamp, ignoring AnnounceChunks message");
|
|
||||||
return MessageAcceptance::Ignore;
|
|
||||||
}
|
|
||||||
|
|
||||||
// notify sync layer
|
// notify sync layer
|
||||||
self.send_to_sync(SyncMessage::AnnounceChunksGossip { msg: msg.inner });
|
self.send_to_sync(SyncMessage::AnnounceChunksGossip { msg: msg.inner });
|
||||||
|
|
||||||
MessageAcceptance::Accept
|
MessageAcceptance::Accept
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_status_message(
|
fn verify_status_message(
|
||||||
&self,
|
&self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
status: StatusMessage,
|
status: StatusMessage,
|
||||||
network_id: NetworkIdentity,
|
network_id: NetworkIdentity,
|
||||||
) {
|
shard_config: &ShardConfig,
|
||||||
|
) -> bool {
|
||||||
if status.data != network_id {
|
if status.data != network_id {
|
||||||
warn!(%peer_id, ?network_id, ?status.data, "Report peer with incompatible network id");
|
warn!(%peer_id, ?network_id, ?status.data, "Report peer with incompatible network id");
|
||||||
self.send_to_network(NetworkMessage::ReportPeer {
|
self.send_to_network(NetworkMessage::ReportPeer {
|
||||||
peer_id,
|
peer_id,
|
||||||
action: PeerAction::Fatal,
|
action: PeerAction::Fatal,
|
||||||
source: ReportSource::Gossipsub,
|
source: ReportSource::RPC,
|
||||||
msg: "Incompatible network id in StatusMessage",
|
msg: "Incompatible network id in StatusMessage",
|
||||||
})
|
});
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
let peer_shard_config = match ShardConfig::new(status.shard_id, status.num_shard) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(err) => {
|
||||||
|
warn!(%peer_id, ?status, ?err, "Report peer with invalid shard config");
|
||||||
|
self.send_to_network(NetworkMessage::ReportPeer {
|
||||||
|
peer_id,
|
||||||
|
action: PeerAction::Fatal,
|
||||||
|
source: ReportSource::RPC,
|
||||||
|
msg: "Invalid shard config in StatusMessage",
|
||||||
|
});
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
self.file_location_cache
|
||||||
|
.insert_peer_config(peer_id, peer_shard_config);
|
||||||
|
|
||||||
|
if !peer_shard_config.intersect(shard_config) {
|
||||||
|
info!(%peer_id, ?shard_config, ?status, "Report peer with mismatched shard config");
|
||||||
|
self.send_to_network(NetworkMessage::ReportPeer {
|
||||||
|
peer_id,
|
||||||
|
action: PeerAction::LowToleranceError,
|
||||||
|
source: ReportSource::RPC,
|
||||||
|
msg: "Shard config mismatch in StatusMessage",
|
||||||
|
});
|
||||||
|
self.send_to_network(NetworkMessage::DisconnectPeer { peer_id });
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn publish_file(&self, tx_id: TxID) {
|
||||||
|
if let Some(batch) = self.file_batcher.write().await.add(tx_id) {
|
||||||
|
if let Some(announcement) = self.construct_announce_file_message(batch).await {
|
||||||
|
self.publish_announcement(announcement).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn publish_file(&self, tx_id: TxID) -> Option<bool> {
|
async fn publish_announcement(&self, announcement: SignedAnnounceFile) {
|
||||||
match self.file_batcher.write().await.add(tx_id) {
|
if let Some(batch) = self.announcement_batcher.write().await.add(announcement) {
|
||||||
Some(batch) => {
|
self.publish(PubsubMessage::AnnounceFile(batch));
|
||||||
let announcement = self.construct_announce_file_message(batch).await?;
|
|
||||||
Some(self.publish_announcement(announcement).await)
|
|
||||||
}
|
|
||||||
None => Some(false),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn publish_announcement(&self, announcement: SignedAnnounceFile) -> bool {
|
|
||||||
match self.announcement_batcher.write().await.add(announcement) {
|
|
||||||
Some(batch) => {
|
|
||||||
self.publish(PubsubMessage::AnnounceFile(batch));
|
|
||||||
true
|
|
||||||
}
|
|
||||||
None => false,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1184,10 +1165,7 @@ mod tests {
|
|||||||
|
|
||||||
assert_eq!(handler.peers.read().await.size(), 1);
|
assert_eq!(handler.peers.read().await.size(), 1);
|
||||||
ctx.assert_status_request(alice);
|
ctx.assert_status_request(alice);
|
||||||
assert!(matches!(
|
assert!(matches!(ctx.sync_recv.try_recv(), Err(TryRecvError::Empty)));
|
||||||
ctx.sync_recv.try_recv(),
|
|
||||||
Ok(Notification(SyncMessage::PeerConnected {peer_id})) if peer_id == alice
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@ -1216,6 +1194,8 @@ mod tests {
|
|||||||
let req_id = (ConnectionId::new(4), SubstreamId(12));
|
let req_id = (ConnectionId::new(4), SubstreamId(12));
|
||||||
let request = Request::Status(StatusMessage {
|
let request = Request::Status(StatusMessage {
|
||||||
data: Default::default(),
|
data: Default::default(),
|
||||||
|
num_shard: 1,
|
||||||
|
shard_id: 0,
|
||||||
});
|
});
|
||||||
handler.on_rpc_request(alice, req_id, request).await;
|
handler.on_rpc_request(alice, req_id, request).await;
|
||||||
|
|
||||||
@ -1337,11 +1317,11 @@ mod tests {
|
|||||||
) -> MessageAcceptance {
|
) -> MessageAcceptance {
|
||||||
let (alice, bob) = (PeerId::random(), PeerId::random());
|
let (alice, bob) = (PeerId::random(), PeerId::random());
|
||||||
let id = MessageId::new(b"dummy message");
|
let id = MessageId::new(b"dummy message");
|
||||||
let message = PubsubMessage::FindFile(FindFile {
|
let message = PubsubMessage::FindFile(TimedMessage {
|
||||||
tx_id,
|
inner: FindFile {
|
||||||
num_shard: 1,
|
tx_id,
|
||||||
shard_id: 0,
|
maybe_shard_config: None,
|
||||||
neighbors_only: false,
|
},
|
||||||
timestamp,
|
timestamp,
|
||||||
});
|
});
|
||||||
handler.on_pubsub_message(alice, bob, &id, message).await
|
handler.on_pubsub_message(alice, bob, &id, message).await
|
||||||
@ -1365,7 +1345,7 @@ mod tests {
|
|||||||
let result = handle_find_file_msg(
|
let result = handle_find_file_msg(
|
||||||
&handler,
|
&handler,
|
||||||
TxID::random_hash(412),
|
TxID::random_hash(412),
|
||||||
timestamp_now() - 10 - FIND_FILE_TIMEOUT.num_seconds() as u32,
|
timestamp_now() - 10 - PUBSUB_TIMEOUT_NETWORK.num_seconds() as u32,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
assert!(matches!(result, MessageAcceptance::Ignore));
|
assert!(matches!(result, MessageAcceptance::Ignore));
|
||||||
@ -1430,7 +1410,7 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let malicious_addr: Multiaddr = "/ip4/127.0.0.38/tcp/30000".parse().unwrap();
|
let malicious_addr: Multiaddr = "/ip4/127.0.0.38/tcp/30000".parse().unwrap();
|
||||||
file.inner.at = malicious_addr.into();
|
file.inner.inner.at = malicious_addr.into();
|
||||||
let message = PubsubMessage::AnnounceFile(vec![file]);
|
let message = PubsubMessage::AnnounceFile(vec![file]);
|
||||||
|
|
||||||
// failed to verify signature
|
// failed to verify signature
|
||||||
|
@ -2,6 +2,30 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use metrics::{register_meter, register_meter_with_group, Histogram, Meter, Sample};
|
use metrics::{register_meter, register_meter_with_group, Histogram, Meter, Sample};
|
||||||
|
|
||||||
|
pub struct PubsubMsgHandleMetrics {
|
||||||
|
pub(crate) topic_name: &'static str,
|
||||||
|
pub(crate) qps: Arc<dyn Meter>,
|
||||||
|
pub(crate) latency_ms: Arc<dyn Histogram>,
|
||||||
|
pub(crate) timeout: Arc<dyn Meter>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PubsubMsgHandleMetrics {
|
||||||
|
pub fn new(topic_name: &'static str) -> Self {
|
||||||
|
let group_name = format!("router_libp2p_handle_pubsub_{}", topic_name);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
topic_name,
|
||||||
|
qps: register_meter_with_group(group_name.as_str(), "qps"),
|
||||||
|
latency_ms: Sample::ExpDecay(0.015).register_with_group(
|
||||||
|
group_name.as_str(),
|
||||||
|
"latency_ms",
|
||||||
|
1024,
|
||||||
|
),
|
||||||
|
timeout: register_meter_with_group(group_name.as_str(), "timeout"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
lazy_static::lazy_static! {
|
lazy_static::lazy_static! {
|
||||||
// service
|
// service
|
||||||
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE: Arc<dyn Meter> = register_meter("router_service_route_network_message");
|
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE: Arc<dyn Meter> = register_meter("router_service_route_network_message");
|
||||||
@ -11,16 +35,14 @@ lazy_static::lazy_static! {
|
|||||||
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_PUBLISH: Arc<dyn Meter> = register_meter("router_service_route_network_message_publish");
|
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_PUBLISH: Arc<dyn Meter> = register_meter("router_service_route_network_message_publish");
|
||||||
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_REPORT_PEER: Arc<dyn Meter> = register_meter("router_service_route_network_message_report_peer");
|
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_REPORT_PEER: Arc<dyn Meter> = register_meter("router_service_route_network_message_report_peer");
|
||||||
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_GOODBYE_PEER: Arc<dyn Meter> = register_meter("router_service_route_network_message_goodbye_peer");
|
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_GOODBYE_PEER: Arc<dyn Meter> = register_meter("router_service_route_network_message_goodbye_peer");
|
||||||
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_DAIL_PEER: Arc<dyn Meter> = register_meter_with_group("router_service_route_network_message_dail_peer", "all");
|
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_DIAL_PEER: Arc<dyn Meter> = register_meter_with_group("router_service_route_network_message_dial_peer", "all");
|
||||||
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_DAIL_PEER_ALREADY: Arc<dyn Meter> = register_meter_with_group("router_service_route_network_message_dail_peer", "already");
|
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_DIAL_PEER_ALREADY: Arc<dyn Meter> = register_meter_with_group("router_service_route_network_message_dial_peer", "already");
|
||||||
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_DAIL_PEER_NEW_OK: Arc<dyn Meter> = register_meter_with_group("router_service_route_network_message_dail_peer", "ok");
|
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_DIAL_PEER_NEW_OK: Arc<dyn Meter> = register_meter_with_group("router_service_route_network_message_dial_peer", "ok");
|
||||||
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_DAIL_PEER_NEW_FAIL: Arc<dyn Meter> = register_meter_with_group("router_service_route_network_message_dail_peer", "fail");
|
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_DIAL_PEER_NEW_FAIL: Arc<dyn Meter> = register_meter_with_group("router_service_route_network_message_dial_peer", "fail");
|
||||||
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_ANNOUNCE_LOCAL_FILE: Arc<dyn Meter> = register_meter("router_service_route_network_message_announce_local_file");
|
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_ANNOUNCE_LOCAL_FILE: Arc<dyn Meter> = register_meter("router_service_route_network_message_announce_local_file");
|
||||||
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_UPNP: Arc<dyn Meter> = register_meter("router_service_route_network_message_upnp");
|
pub static ref SERVICE_ROUTE_NETWORK_MESSAGE_UPNP: Arc<dyn Meter> = register_meter("router_service_route_network_message_upnp");
|
||||||
|
|
||||||
pub static ref SERVICE_EXPIRED_PEERS: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("router_service_expired_peers", 1024);
|
pub static ref SERVICE_EXPIRED_PEERS: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("router_service_expired_peers", 1024);
|
||||||
pub static ref SERVICE_EXPIRED_PEERS_DISCONNECT_OK: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("router_service_expired_peers_disconnect_ok", 1024);
|
|
||||||
pub static ref SERVICE_EXPIRED_PEERS_DISCONNECT_FAIL: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("router_service_expired_peers_disconnect_fail", 1024);
|
|
||||||
|
|
||||||
// libp2p_event_handler
|
// libp2p_event_handler
|
||||||
|
|
||||||
@ -44,34 +66,25 @@ lazy_static::lazy_static! {
|
|||||||
pub static ref LIBP2P_HANDLE_RESPONSE_ERROR: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_response_error", "qps");
|
pub static ref LIBP2P_HANDLE_RESPONSE_ERROR: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_response_error", "qps");
|
||||||
pub static ref LIBP2P_HANDLE_RESPONSE_ERROR_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_response_error", "latency", 1024);
|
pub static ref LIBP2P_HANDLE_RESPONSE_ERROR_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_response_error", "latency", 1024);
|
||||||
|
|
||||||
// libp2p_event_handler: new file
|
// libp2p_event_handler: pubsub messages
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_NEW_FILE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_new_file", "qps");
|
pub static ref LIBP2P_HANDLE_PUBSUB_NEW_FILE: PubsubMsgHandleMetrics = PubsubMsgHandleMetrics::new("new_file");
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_NEW_FILE_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_pubsub_new_file", "latency", 1024);
|
pub static ref LIBP2P_HANDLE_PUBSUB_ASK_FILE: PubsubMsgHandleMetrics = PubsubMsgHandleMetrics::new("ask_file");
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_NEW_FILE_TIMEOUT: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_new_file", "timeout");
|
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_CHUNKS: PubsubMsgHandleMetrics = PubsubMsgHandleMetrics::new("find_chunks");
|
||||||
|
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_CHUNKS: PubsubMsgHandleMetrics = PubsubMsgHandleMetrics::new("announce_chunks");
|
||||||
|
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_SHARD: PubsubMsgHandleMetrics = PubsubMsgHandleMetrics::new("announce_shard");
|
||||||
|
|
||||||
// libp2p_event_handler: find & announce file
|
// libp2p_event_handler: find & announce file
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "qps");
|
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE: PubsubMsgHandleMetrics = PubsubMsgHandleMetrics::new("find_file");
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_pubsub_find_file", "latency", 1024);
|
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_TIMEOUT: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "timeout");
|
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_STORE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "store");
|
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_STORE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "store");
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_CACHE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "cache");
|
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_CACHE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "cache");
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_FORWARD: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "forward");
|
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_FORWARD: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "forward");
|
||||||
|
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_file", "qps");
|
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_file", "qps");
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_pubsub_announce_file", "latency", 1024);
|
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_pubsub_announce_file", "latency", 1024);
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_TIMEOUT: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_file", "timeout");
|
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_TIMEOUT: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_file", "timeout");
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_ANNOUNCEMENTS: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_file", "announcements");
|
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_ANNOUNCEMENTS: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_file", "announcements");
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_FILES: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_file", "files");
|
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_FILES: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_file", "files");
|
||||||
|
|
||||||
// libp2p_event_handler: find & announce chunks
|
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_CHUNKS: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_chunks", "qps");
|
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_CHUNKS_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_pubsub_find_chunks", "latency", 1024);
|
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_CHUNKS: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_chunks", "qps");
|
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_CHUNKS_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_pubsub_announce_chunks", "latency", 1024);
|
|
||||||
|
|
||||||
// libp2p_event_handler: announce shard config
|
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_SHARD: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_shard", "qps");
|
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_SHARD_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_pubsub_announce_shard", "latency", 1024);
|
|
||||||
|
|
||||||
// libp2p_event_handler: verify IP address
|
// libp2p_event_handler: verify IP address
|
||||||
pub static ref LIBP2P_VERIFY_ANNOUNCED_IP: Arc<dyn Meter> = register_meter("router_libp2p_verify_announced_ip");
|
pub static ref LIBP2P_VERIFY_ANNOUNCED_IP: Arc<dyn Meter> = register_meter("router_libp2p_verify_announced_ip");
|
||||||
pub static ref LIBP2P_VERIFY_ANNOUNCED_IP_UNSEEN: Arc<dyn Meter> = register_meter("router_libp2p_verify_announced_ip_unseen");
|
pub static ref LIBP2P_VERIFY_ANNOUNCED_IP_UNSEEN: Arc<dyn Meter> = register_meter("router_libp2p_verify_announced_ip_unseen");
|
||||||
|
68
node/router/src/rate_limit.rs
Normal file
68
node/router/src/rate_limit.rs
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
time::{Duration, Instant},
|
||||||
|
};
|
||||||
|
|
||||||
|
use network::{
|
||||||
|
rpc::rate_limiter::{Limiter, Quota, RateLimitedErr},
|
||||||
|
types::GossipKind,
|
||||||
|
PeerId, PubsubMessage,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub struct PubsubRateLimiter {
|
||||||
|
init_time: Instant,
|
||||||
|
limiters: Limiter<PeerId>,
|
||||||
|
limiters_by_topic: HashMap<GossipKind, Limiter<PeerId>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PubsubRateLimiter {
|
||||||
|
pub fn new(n: u64, period: Duration) -> Result<Self, String> {
|
||||||
|
Ok(Self {
|
||||||
|
init_time: Instant::now(),
|
||||||
|
limiters: Limiter::from_quota(Quota::n_every(n, period))?,
|
||||||
|
limiters_by_topic: Default::default(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn limit_by_topic(
|
||||||
|
mut self,
|
||||||
|
kind: GossipKind,
|
||||||
|
n: u64,
|
||||||
|
period: Duration,
|
||||||
|
) -> Result<Self, String> {
|
||||||
|
let limiter = Limiter::from_quota(Quota::n_every(n, period))?;
|
||||||
|
self.limiters_by_topic.insert(kind, limiter);
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn allows(
|
||||||
|
&mut self,
|
||||||
|
peer_id: &PeerId,
|
||||||
|
msg: &PubsubMessage,
|
||||||
|
) -> Result<(), (Option<GossipKind>, RateLimitedErr)> {
|
||||||
|
let time_since_start = self.init_time.elapsed();
|
||||||
|
|
||||||
|
if let Err(err) = self.limiters.allows(time_since_start, peer_id, 1) {
|
||||||
|
return Err((None, err));
|
||||||
|
}
|
||||||
|
|
||||||
|
let kind = msg.kind();
|
||||||
|
if let Some(limiter) = self.limiters_by_topic.get_mut(&kind) {
|
||||||
|
if let Err(err) = limiter.allows(time_since_start, peer_id, 1) {
|
||||||
|
return Err((Some(kind), err));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn prune(&mut self) {
|
||||||
|
let time_since_start = self.init_time.elapsed();
|
||||||
|
|
||||||
|
self.limiters.prune(time_since_start);
|
||||||
|
|
||||||
|
for limiter in self.limiters_by_topic.values_mut() {
|
||||||
|
limiter.prune(time_since_start);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,17 +1,22 @@
|
|||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
|
use crate::rate_limit::PubsubRateLimiter;
|
||||||
use crate::Config;
|
use crate::Config;
|
||||||
use crate::{libp2p_event_handler::Libp2pEventHandler, peer_manager::PeerManager};
|
use crate::{libp2p_event_handler::Libp2pEventHandler, peer_manager::PeerManager};
|
||||||
use chunk_pool::ChunkPoolMessage;
|
use chunk_pool::ChunkPoolMessage;
|
||||||
use file_location_cache::FileLocationCache;
|
use file_location_cache::FileLocationCache;
|
||||||
use futures::{channel::mpsc::Sender, prelude::*};
|
use futures::{channel::mpsc::Sender, prelude::*};
|
||||||
use miner::MinerMessage;
|
use miner::MinerMessage;
|
||||||
|
use network::rpc::GoodbyeReason;
|
||||||
|
use network::types::GossipKind;
|
||||||
use network::{
|
use network::{
|
||||||
types::NewFile, BehaviourEvent, Keypair, Libp2pEvent, NetworkGlobals, NetworkMessage,
|
BehaviourEvent, Keypair, Libp2pEvent, NetworkGlobals, NetworkMessage, NetworkReceiver,
|
||||||
NetworkReceiver, NetworkSender, PubsubMessage, RequestId, Service as LibP2PService, Swarm,
|
NetworkSender, PubsubMessage, RequestId, Service as LibP2PService, Swarm,
|
||||||
};
|
};
|
||||||
|
use network::{MessageAcceptance, PeerAction, PeerId, ReportSource};
|
||||||
use pruner::PrunerMessage;
|
use pruner::PrunerMessage;
|
||||||
use shared_types::timestamp_now;
|
use shared_types::ShardedFile;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
use storage::log_store::Store as LogStore;
|
use storage::log_store::Store as LogStore;
|
||||||
use storage_async::Store;
|
use storage_async::Store;
|
||||||
use sync::{SyncMessage, SyncSender};
|
use sync::{SyncMessage, SyncSender};
|
||||||
@ -47,6 +52,8 @@ pub struct RouterService {
|
|||||||
upnp_mappings: (Option<u16>, Option<u16>),
|
upnp_mappings: (Option<u16>, Option<u16>),
|
||||||
|
|
||||||
store: Arc<dyn LogStore>,
|
store: Arc<dyn LogStore>,
|
||||||
|
|
||||||
|
pubsub_rate_limiter: PubsubRateLimiter,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RouterService {
|
impl RouterService {
|
||||||
@ -65,9 +72,19 @@ impl RouterService {
|
|||||||
file_location_cache: Arc<FileLocationCache>,
|
file_location_cache: Arc<FileLocationCache>,
|
||||||
local_keypair: Keypair,
|
local_keypair: Keypair,
|
||||||
config: Config,
|
config: Config,
|
||||||
) {
|
) -> Result<(), String> {
|
||||||
let peers = Arc::new(RwLock::new(PeerManager::new(config.clone())));
|
let peers = Arc::new(RwLock::new(PeerManager::new(config.clone())));
|
||||||
|
|
||||||
|
let pubsub_rate_limiter = PubsubRateLimiter::new(100, Duration::from_secs(10))?
|
||||||
|
.limit_by_topic(GossipKind::Example, 10, Duration::from_secs(10))?
|
||||||
|
.limit_by_topic(GossipKind::NewFile, 50, Duration::from_secs(10))?
|
||||||
|
.limit_by_topic(GossipKind::AskFile, 50, Duration::from_secs(10))?
|
||||||
|
.limit_by_topic(GossipKind::FindFile, 10, Duration::from_secs(10))?
|
||||||
|
.limit_by_topic(GossipKind::AnnounceFile, 10, Duration::from_secs(10))?
|
||||||
|
.limit_by_topic(GossipKind::FindChunks, 10, Duration::from_secs(10))?
|
||||||
|
.limit_by_topic(GossipKind::AnnounceChunks, 10, Duration::from_secs(10))?
|
||||||
|
.limit_by_topic(GossipKind::AnnounceShardConfig, 50, Duration::from_secs(10))?;
|
||||||
|
|
||||||
// create the network service and spawn the task
|
// create the network service and spawn the task
|
||||||
let router = RouterService {
|
let router = RouterService {
|
||||||
config: config.clone(),
|
config: config.clone(),
|
||||||
@ -89,17 +106,21 @@ impl RouterService {
|
|||||||
),
|
),
|
||||||
upnp_mappings: (None, None),
|
upnp_mappings: (None, None),
|
||||||
store,
|
store,
|
||||||
|
pubsub_rate_limiter,
|
||||||
};
|
};
|
||||||
|
|
||||||
// spawn service
|
// spawn service
|
||||||
let shutdown_sender = executor.shutdown_sender();
|
let shutdown_sender = executor.shutdown_sender();
|
||||||
|
|
||||||
executor.spawn(router.main(shutdown_sender), "router");
|
executor.spawn(router.main(shutdown_sender), "router");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn main(mut self, mut shutdown_sender: Sender<ShutdownReason>) {
|
async fn main(mut self, mut shutdown_sender: Sender<ShutdownReason>) {
|
||||||
let mut heartbeat_service = interval(self.config.heartbeat_interval);
|
let mut heartbeat_service = interval(self.config.heartbeat_interval);
|
||||||
let mut heartbeat_batcher = interval(self.config.batcher_timeout);
|
let mut heartbeat_batcher = interval(self.config.batcher_timeout);
|
||||||
|
let mut heartbeat_rate_limiter = interval(Duration::from_secs(30));
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
@ -116,6 +137,8 @@ impl RouterService {
|
|||||||
|
|
||||||
// heartbeat for expire file batcher
|
// heartbeat for expire file batcher
|
||||||
_ = heartbeat_batcher.tick() => self.libp2p_event_handler.expire_batcher().await,
|
_ = heartbeat_batcher.tick() => self.libp2p_event_handler.expire_batcher().await,
|
||||||
|
|
||||||
|
_ = heartbeat_rate_limiter.tick() => self.pubsub_rate_limiter.prune(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -190,10 +213,24 @@ impl RouterService {
|
|||||||
message,
|
message,
|
||||||
..
|
..
|
||||||
} => {
|
} => {
|
||||||
let result = self
|
let result = if let Err((rate_limit_kind, _)) = self
|
||||||
.libp2p_event_handler
|
.pubsub_rate_limiter
|
||||||
.on_pubsub_message(propagation_source, source, &id, message)
|
.allows(&propagation_source, &message)
|
||||||
.await;
|
{
|
||||||
|
warn!(%propagation_source, kind=?message.kind(), ?rate_limit_kind, "Pubsub message rate limited");
|
||||||
|
self.libp2p_event_handler
|
||||||
|
.send_to_network(NetworkMessage::ReportPeer {
|
||||||
|
peer_id: propagation_source,
|
||||||
|
action: PeerAction::LowToleranceError,
|
||||||
|
source: ReportSource::Gossipsub,
|
||||||
|
msg: "Pubsub message rate limited",
|
||||||
|
});
|
||||||
|
MessageAcceptance::Reject
|
||||||
|
} else {
|
||||||
|
self.libp2p_event_handler
|
||||||
|
.on_pubsub_message(propagation_source, source, &id, message)
|
||||||
|
.await
|
||||||
|
};
|
||||||
|
|
||||||
self.libp2p
|
self.libp2p
|
||||||
.swarm
|
.swarm
|
||||||
@ -309,37 +346,39 @@ impl RouterService {
|
|||||||
metrics::SERVICE_ROUTE_NETWORK_MESSAGE_GOODBYE_PEER.mark(1);
|
metrics::SERVICE_ROUTE_NETWORK_MESSAGE_GOODBYE_PEER.mark(1);
|
||||||
}
|
}
|
||||||
NetworkMessage::DialPeer { address, peer_id } => {
|
NetworkMessage::DialPeer { address, peer_id } => {
|
||||||
metrics::SERVICE_ROUTE_NETWORK_MESSAGE_DAIL_PEER.mark(1);
|
metrics::SERVICE_ROUTE_NETWORK_MESSAGE_DIAL_PEER.mark(1);
|
||||||
|
|
||||||
if self.libp2p.swarm.is_connected(&peer_id) {
|
if self.libp2p.swarm.is_connected(&peer_id) {
|
||||||
self.libp2p_event_handler
|
self.libp2p_event_handler
|
||||||
.send_to_sync(SyncMessage::PeerConnected { peer_id });
|
.send_to_sync(SyncMessage::PeerConnected { peer_id });
|
||||||
metrics::SERVICE_ROUTE_NETWORK_MESSAGE_DAIL_PEER_ALREADY.mark(1);
|
metrics::SERVICE_ROUTE_NETWORK_MESSAGE_DIAL_PEER_ALREADY.mark(1);
|
||||||
} else {
|
} else {
|
||||||
match Swarm::dial(&mut self.libp2p.swarm, address.clone()) {
|
match Swarm::dial(&mut self.libp2p.swarm, address.clone()) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
debug!(%address, "Dialing libp2p peer");
|
debug!(%address, "Dialing libp2p peer");
|
||||||
metrics::SERVICE_ROUTE_NETWORK_MESSAGE_DAIL_PEER_NEW_OK.mark(1);
|
metrics::SERVICE_ROUTE_NETWORK_MESSAGE_DIAL_PEER_NEW_OK.mark(1);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
info!(%address, error = ?err, "Failed to dial peer");
|
info!(%address, error = ?err, "Failed to dial peer");
|
||||||
self.libp2p_event_handler
|
self.libp2p_event_handler
|
||||||
.send_to_sync(SyncMessage::DailFailed { peer_id, err });
|
.send_to_sync(SyncMessage::DialFailed { peer_id, err });
|
||||||
metrics::SERVICE_ROUTE_NETWORK_MESSAGE_DAIL_PEER_NEW_FAIL.mark(1);
|
metrics::SERVICE_ROUTE_NETWORK_MESSAGE_DIAL_PEER_NEW_FAIL.mark(1);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
NetworkMessage::DisconnectPeer { peer_id } => {
|
||||||
|
self.disconnect_peer(peer_id);
|
||||||
|
}
|
||||||
NetworkMessage::AnnounceLocalFile { tx_id } => {
|
NetworkMessage::AnnounceLocalFile { tx_id } => {
|
||||||
let shard_config = self.store.get_shard_config();
|
let new_file = ShardedFile {
|
||||||
let msg = PubsubMessage::NewFile(NewFile {
|
|
||||||
tx_id,
|
tx_id,
|
||||||
num_shard: shard_config.num_shard,
|
shard_config: self.store.get_shard_config().into(),
|
||||||
shard_id: shard_config.shard_id,
|
};
|
||||||
timestamp: timestamp_now(),
|
let msg = PubsubMessage::NewFile(new_file.into());
|
||||||
});
|
|
||||||
self.libp2p.swarm.behaviour_mut().publish(vec![msg]);
|
self.libp2p.swarm.behaviour_mut().publish(vec![msg]);
|
||||||
metrics::SERVICE_ROUTE_NETWORK_MESSAGE_ANNOUNCE_LOCAL_FILE.mark(1);
|
metrics::SERVICE_ROUTE_NETWORK_MESSAGE_ANNOUNCE_LOCAL_FILE.mark(1);
|
||||||
|
debug!(?new_file, "Publish NewFile message");
|
||||||
}
|
}
|
||||||
NetworkMessage::UPnPMappingEstablished {
|
NetworkMessage::UPnPMappingEstablished {
|
||||||
tcp_socket,
|
tcp_socket,
|
||||||
@ -379,13 +418,10 @@ impl RouterService {
|
|||||||
PrunerMessage::ChangeShardConfig(shard_config) => {
|
PrunerMessage::ChangeShardConfig(shard_config) => {
|
||||||
self.libp2p_event_handler
|
self.libp2p_event_handler
|
||||||
.send_to_chunk_pool(ChunkPoolMessage::ChangeShardConfig(shard_config));
|
.send_to_chunk_pool(ChunkPoolMessage::ChangeShardConfig(shard_config));
|
||||||
if let Some(msg) = self
|
|
||||||
.libp2p_event_handler
|
let shard_config = shared_types::ShardConfig::from(shard_config);
|
||||||
.construct_announce_shard_config_message(shard_config)
|
self.libp2p_event_handler
|
||||||
.await
|
.publish(PubsubMessage::AnnounceShardConfig(shard_config.into()));
|
||||||
{
|
|
||||||
self.libp2p_event_handler.publish(msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -399,24 +435,16 @@ impl RouterService {
|
|||||||
debug!(%num_expired_peers, "Heartbeat, remove expired peers")
|
debug!(%num_expired_peers, "Heartbeat, remove expired peers")
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut num_succeeded = 0;
|
|
||||||
let mut num_failed = 0;
|
|
||||||
for peer_id in expired_peers {
|
for peer_id in expired_peers {
|
||||||
// async operation, once peer disconnected, swarm event `PeerDisconnected`
|
self.disconnect_peer(peer_id);
|
||||||
// will be polled to handle in advance.
|
}
|
||||||
match self.libp2p.swarm.disconnect_peer_id(peer_id) {
|
}
|
||||||
Ok(_) => {
|
|
||||||
debug!(%peer_id, "Peer expired and disconnect it");
|
fn disconnect_peer(&mut self, peer_id: PeerId) {
|
||||||
num_succeeded += 1;
|
let pm = self.libp2p.swarm.behaviour_mut().peer_manager_mut();
|
||||||
}
|
if pm.is_connected(&peer_id) {
|
||||||
Err(_) => {
|
pm.disconnect_peer(peer_id, GoodbyeReason::IrrelevantNetwork);
|
||||||
debug!(%peer_id, "Peer expired but failed to disconnect");
|
|
||||||
num_failed += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
metrics::SERVICE_EXPIRED_PEERS_DISCONNECT_OK.update(num_succeeded);
|
|
||||||
metrics::SERVICE_EXPIRED_PEERS_DISCONNECT_FAIL.update(num_failed);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,3 +27,4 @@ merkle_light = { path = "../../common/merkle_light" }
|
|||||||
merkle_tree = { path = "../../common/merkle_tree"}
|
merkle_tree = { path = "../../common/merkle_tree"}
|
||||||
futures-channel = "^0.3"
|
futures-channel = "^0.3"
|
||||||
metrics = { workspace = true }
|
metrics = { workspace = true }
|
||||||
|
parking_lot = "0.12.3"
|
||||||
|
@ -18,7 +18,6 @@ pub struct RpcServerImpl {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl RpcServer for RpcServerImpl {
|
impl RpcServer for RpcServerImpl {
|
||||||
#[tracing::instrument(skip(self), err)]
|
|
||||||
async fn find_file(&self, tx_seq: u64) -> RpcResult<()> {
|
async fn find_file(&self, tx_seq: u64) -> RpcResult<()> {
|
||||||
info!("admin_findFile({tx_seq})");
|
info!("admin_findFile({tx_seq})");
|
||||||
|
|
||||||
@ -39,7 +38,6 @@ impl RpcServer for RpcServerImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self), err)]
|
|
||||||
async fn shutdown(&self) -> RpcResult<()> {
|
async fn shutdown(&self) -> RpcResult<()> {
|
||||||
info!("admin_shutdown()");
|
info!("admin_shutdown()");
|
||||||
|
|
||||||
@ -51,7 +49,6 @@ impl RpcServer for RpcServerImpl {
|
|||||||
.map_err(|e| error::internal_error(format!("Failed to send shutdown command: {:?}", e)))
|
.map_err(|e| error::internal_error(format!("Failed to send shutdown command: {:?}", e)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self), err)]
|
|
||||||
async fn start_sync_file(&self, tx_seq: u64) -> RpcResult<()> {
|
async fn start_sync_file(&self, tx_seq: u64) -> RpcResult<()> {
|
||||||
info!("admin_startSyncFile({tx_seq})");
|
info!("admin_startSyncFile({tx_seq})");
|
||||||
|
|
||||||
@ -72,7 +69,6 @@ impl RpcServer for RpcServerImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self), err)]
|
|
||||||
async fn start_sync_chunks(
|
async fn start_sync_chunks(
|
||||||
&self,
|
&self,
|
||||||
tx_seq: u64,
|
tx_seq: u64,
|
||||||
@ -102,7 +98,6 @@ impl RpcServer for RpcServerImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self), err)]
|
|
||||||
async fn terminate_sync(&self, tx_seq: u64) -> RpcResult<bool> {
|
async fn terminate_sync(&self, tx_seq: u64) -> RpcResult<bool> {
|
||||||
info!("admin_terminateSync({tx_seq})");
|
info!("admin_terminateSync({tx_seq})");
|
||||||
|
|
||||||
@ -131,7 +126,6 @@ impl RpcServer for RpcServerImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self), err)]
|
|
||||||
async fn get_sync_status(&self, tx_seq: u64) -> RpcResult<String> {
|
async fn get_sync_status(&self, tx_seq: u64) -> RpcResult<String> {
|
||||||
info!("admin_getSyncStatus({tx_seq})");
|
info!("admin_getSyncStatus({tx_seq})");
|
||||||
|
|
||||||
@ -148,7 +142,6 @@ impl RpcServer for RpcServerImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self), err)]
|
|
||||||
async fn get_sync_info(&self, tx_seq: Option<u64>) -> RpcResult<HashMap<u64, FileSyncInfo>> {
|
async fn get_sync_info(&self, tx_seq: Option<u64>) -> RpcResult<HashMap<u64, FileSyncInfo>> {
|
||||||
info!(?tx_seq, "admin_getSyncInfo()");
|
info!(?tx_seq, "admin_getSyncInfo()");
|
||||||
|
|
||||||
@ -163,7 +156,6 @@ impl RpcServer for RpcServerImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self), err)]
|
|
||||||
async fn get_network_info(&self) -> RpcResult<NetworkInfo> {
|
async fn get_network_info(&self) -> RpcResult<NetworkInfo> {
|
||||||
info!("admin_getNetworkInfo()");
|
info!("admin_getNetworkInfo()");
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@ extern crate miner as zgs_miner;
|
|||||||
mod admin;
|
mod admin;
|
||||||
mod config;
|
mod config;
|
||||||
mod error;
|
mod error;
|
||||||
|
mod middleware;
|
||||||
mod miner;
|
mod miner;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
mod zgs;
|
mod zgs;
|
||||||
@ -77,8 +78,10 @@ pub async fn run_server(
|
|||||||
Ok(handles)
|
Ok(handles)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn server_builder(ctx: Context) -> HttpServerBuilder {
|
fn server_builder(ctx: Context) -> HttpServerBuilder<middleware::Metrics> {
|
||||||
HttpServerBuilder::default().max_request_body_size(ctx.config.max_request_body_size)
|
HttpServerBuilder::default()
|
||||||
|
.max_request_body_size(ctx.config.max_request_body_size)
|
||||||
|
.set_middleware(middleware::Metrics::default())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Run a single RPC server for all namespace RPCs.
|
/// Run a single RPC server for all namespace RPCs.
|
||||||
|
50
node/rpc/src/middleware.rs
Normal file
50
node/rpc/src/middleware.rs
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
use std::{collections::HashMap, sync::Arc, time::Instant};
|
||||||
|
|
||||||
|
use jsonrpsee::core::middleware::Middleware;
|
||||||
|
use metrics::{register_meter_with_group, Histogram, Meter, Sample};
|
||||||
|
use parking_lot::RwLock;
|
||||||
|
|
||||||
|
struct RpcMetric {
|
||||||
|
qps: Arc<dyn Meter>,
|
||||||
|
latency: Arc<dyn Histogram>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RpcMetric {
|
||||||
|
fn new(method_name: &String) -> Self {
|
||||||
|
let group = format!("rpc_{}", method_name);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
qps: register_meter_with_group(group.as_str(), "qps"),
|
||||||
|
latency: Sample::ExpDecay(0.015).register_with_group(group.as_str(), "latency", 1024),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Default)]
|
||||||
|
pub struct Metrics {
|
||||||
|
metrics_by_method: Arc<RwLock<HashMap<String, RpcMetric>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Middleware for Metrics {
|
||||||
|
type Instant = Instant;
|
||||||
|
|
||||||
|
fn on_request(&self) -> Self::Instant {
|
||||||
|
Instant::now()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_call(&self, name: &str) {
|
||||||
|
let mut metrics_by_method = self.metrics_by_method.write();
|
||||||
|
let entry = metrics_by_method
|
||||||
|
.entry(name.to_string())
|
||||||
|
.or_insert_with_key(RpcMetric::new);
|
||||||
|
entry.qps.mark(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_result(&self, name: &str, _success: bool, started_at: Self::Instant) {
|
||||||
|
let mut metrics_by_method = self.metrics_by_method.write();
|
||||||
|
let entry = metrics_by_method
|
||||||
|
.entry(name.to_string())
|
||||||
|
.or_insert_with_key(RpcMetric::new);
|
||||||
|
entry.latency.update_since(started_at);
|
||||||
|
}
|
||||||
|
}
|
@ -63,7 +63,11 @@ pub trait Rpc {
|
|||||||
async fn check_file_finalized(&self, tx_seq_or_root: TxSeqOrRoot) -> RpcResult<Option<bool>>;
|
async fn check_file_finalized(&self, tx_seq_or_root: TxSeqOrRoot) -> RpcResult<Option<bool>>;
|
||||||
|
|
||||||
#[method(name = "getFileInfo")]
|
#[method(name = "getFileInfo")]
|
||||||
async fn get_file_info(&self, data_root: DataRoot) -> RpcResult<Option<FileInfo>>;
|
async fn get_file_info(
|
||||||
|
&self,
|
||||||
|
data_root: DataRoot,
|
||||||
|
need_available: bool,
|
||||||
|
) -> RpcResult<Option<FileInfo>>;
|
||||||
|
|
||||||
#[method(name = "getFileInfoByTxSeq")]
|
#[method(name = "getFileInfoByTxSeq")]
|
||||||
async fn get_file_info_by_tx_seq(&self, tx_seq: u64) -> RpcResult<Option<FileInfo>>;
|
async fn get_file_info_by_tx_seq(&self, tx_seq: u64) -> RpcResult<Option<FileInfo>>;
|
||||||
|
@ -17,7 +17,6 @@ pub struct RpcServerImpl {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl RpcServer for RpcServerImpl {
|
impl RpcServer for RpcServerImpl {
|
||||||
#[tracing::instrument(skip(self), err)]
|
|
||||||
async fn get_status(&self) -> RpcResult<Status> {
|
async fn get_status(&self) -> RpcResult<Status> {
|
||||||
info!("zgs_getStatus()");
|
info!("zgs_getStatus()");
|
||||||
let sync_progress = self
|
let sync_progress = self
|
||||||
@ -96,7 +95,7 @@ impl RpcServer for RpcServerImpl {
|
|||||||
let tx_seq = try_option!(
|
let tx_seq = try_option!(
|
||||||
self.ctx
|
self.ctx
|
||||||
.log_store
|
.log_store
|
||||||
.get_tx_seq_by_data_root(&data_root)
|
.get_tx_seq_by_data_root(&data_root, true)
|
||||||
.await?
|
.await?
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -122,7 +121,12 @@ impl RpcServer for RpcServerImpl {
|
|||||||
) -> RpcResult<Option<SegmentWithProof>> {
|
) -> RpcResult<Option<SegmentWithProof>> {
|
||||||
info!(%data_root, %index, "zgs_downloadSegmentWithProof");
|
info!(%data_root, %index, "zgs_downloadSegmentWithProof");
|
||||||
|
|
||||||
let tx = try_option!(self.ctx.log_store.get_tx_by_data_root(&data_root).await?);
|
let tx = try_option!(
|
||||||
|
self.ctx
|
||||||
|
.log_store
|
||||||
|
.get_tx_by_data_root(&data_root, true)
|
||||||
|
.await?
|
||||||
|
);
|
||||||
|
|
||||||
self.get_segment_with_proof_by_tx(tx, index).await
|
self.get_segment_with_proof_by_tx(tx, index).await
|
||||||
}
|
}
|
||||||
@ -145,7 +149,12 @@ impl RpcServer for RpcServerImpl {
|
|||||||
let seq = match tx_seq_or_root {
|
let seq = match tx_seq_or_root {
|
||||||
TxSeqOrRoot::TxSeq(v) => v,
|
TxSeqOrRoot::TxSeq(v) => v,
|
||||||
TxSeqOrRoot::Root(v) => {
|
TxSeqOrRoot::Root(v) => {
|
||||||
try_option!(self.ctx.log_store.get_tx_seq_by_data_root(&v).await?)
|
try_option!(
|
||||||
|
self.ctx
|
||||||
|
.log_store
|
||||||
|
.get_tx_seq_by_data_root(&v, false)
|
||||||
|
.await?
|
||||||
|
)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -164,10 +173,19 @@ impl RpcServer for RpcServerImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_file_info(&self, data_root: DataRoot) -> RpcResult<Option<FileInfo>> {
|
async fn get_file_info(
|
||||||
|
&self,
|
||||||
|
data_root: DataRoot,
|
||||||
|
need_available: bool,
|
||||||
|
) -> RpcResult<Option<FileInfo>> {
|
||||||
debug!(%data_root, "zgs_getFileInfo");
|
debug!(%data_root, "zgs_getFileInfo");
|
||||||
|
|
||||||
let tx = try_option!(self.ctx.log_store.get_tx_by_data_root(&data_root).await?);
|
let tx = try_option!(
|
||||||
|
self.ctx
|
||||||
|
.log_store
|
||||||
|
.get_tx_by_data_root(&data_root, need_available)
|
||||||
|
.await?
|
||||||
|
);
|
||||||
|
|
||||||
Ok(Some(self.get_file_info_by_tx(tx).await?))
|
Ok(Some(self.get_file_info_by_tx(tx).await?))
|
||||||
}
|
}
|
||||||
@ -246,12 +264,7 @@ impl RpcServerImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn get_file_info_by_tx(&self, tx: Transaction) -> RpcResult<FileInfo> {
|
async fn get_file_info_by_tx(&self, tx: Transaction) -> RpcResult<FileInfo> {
|
||||||
let (finalized, pruned) = match self
|
let (finalized, pruned) = match self.ctx.log_store.get_store().get_tx_status(tx.seq)? {
|
||||||
.ctx
|
|
||||||
.log_store
|
|
||||||
.get_store()
|
|
||||||
.get_tx_status(TxSeqOrRoot::TxSeq(tx.seq))?
|
|
||||||
{
|
|
||||||
Some(TxStatus::Finalized) => (true, false),
|
Some(TxStatus::Finalized) => (true, false),
|
||||||
Some(TxStatus::Pruned) => (false, true),
|
Some(TxStatus::Pruned) => (false, true),
|
||||||
None => (false, false),
|
None => (false, false),
|
||||||
@ -294,7 +307,7 @@ impl RpcServerImpl {
|
|||||||
let maybe_tx = self
|
let maybe_tx = self
|
||||||
.ctx
|
.ctx
|
||||||
.log_store
|
.log_store
|
||||||
.get_tx_by_data_root(&segment.root)
|
.get_tx_by_data_root(&segment.root, false)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
self.put_segment_with_maybe_tx(segment, maybe_tx).await
|
self.put_segment_with_maybe_tx(segment, maybe_tx).await
|
||||||
|
@ -9,13 +9,11 @@ use merkle_light::merkle::MerkleTree;
|
|||||||
use merkle_light::proof::Proof as RawFileProof;
|
use merkle_light::proof::Proof as RawFileProof;
|
||||||
use merkle_light::{hash::Algorithm, merkle::next_pow2};
|
use merkle_light::{hash::Algorithm, merkle::next_pow2};
|
||||||
use merkle_tree::RawLeafSha3Algorithm;
|
use merkle_tree::RawLeafSha3Algorithm;
|
||||||
use serde::de::Visitor;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
use ssz_derive::{Decode as DeriveDecode, Encode as DeriveEncode};
|
use ssz_derive::{Decode as DeriveDecode, Encode as DeriveEncode};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::hash::Hasher;
|
use std::hash::Hasher;
|
||||||
use std::str::FromStr;
|
|
||||||
use tiny_keccak::{Hasher as KeccakHasher, Keccak};
|
use tiny_keccak::{Hasher as KeccakHasher, Keccak};
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
|
|
||||||
@ -398,82 +396,60 @@ pub struct ProtocolVersion {
|
|||||||
pub build: u8,
|
pub build: u8,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(untagged)]
|
||||||
pub enum TxSeqOrRoot {
|
pub enum TxSeqOrRoot {
|
||||||
TxSeq(u64),
|
TxSeq(u64),
|
||||||
Root(DataRoot),
|
Root(DataRoot),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Serialize for TxSeqOrRoot {
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, DeriveEncode, DeriveDecode)]
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
pub struct ShardConfig {
|
||||||
where
|
pub num_shard: usize,
|
||||||
S: serde::Serializer,
|
pub shard_id: usize,
|
||||||
{
|
}
|
||||||
match self {
|
|
||||||
TxSeqOrRoot::TxSeq(seq) => seq.serialize(serializer),
|
impl Default for ShardConfig {
|
||||||
TxSeqOrRoot::Root(root) => root.serialize(serializer),
|
fn default() -> Self {
|
||||||
|
ShardConfig {
|
||||||
|
num_shard: 1,
|
||||||
|
shard_id: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Deserialize<'a> for TxSeqOrRoot {
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, DeriveEncode, DeriveDecode)]
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
pub struct ShardedFile {
|
||||||
where
|
pub tx_id: TxID,
|
||||||
D: serde::Deserializer<'a>,
|
pub shard_config: ShardConfig,
|
||||||
{
|
|
||||||
deserializer.deserialize_any(TxSeqOrRootVisitor)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct TxSeqOrRootVisitor;
|
|
||||||
|
|
||||||
impl<'a> Visitor<'a> for TxSeqOrRootVisitor {
|
|
||||||
type Value = TxSeqOrRoot;
|
|
||||||
|
|
||||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
|
||||||
write!(formatter, "an u64 integer or a hex64 value")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
|
|
||||||
where
|
|
||||||
E: serde::de::Error,
|
|
||||||
{
|
|
||||||
Ok(TxSeqOrRoot::TxSeq(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
|
|
||||||
where
|
|
||||||
E: serde::de::Error,
|
|
||||||
{
|
|
||||||
let root: H256 = H256::from_str(v).map_err(E::custom)?;
|
|
||||||
Ok(TxSeqOrRoot::Root(root))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_tx_seq_or_root_serde() {
|
fn test_tx_seq_or_root_serde() {
|
||||||
// serialize tx seq
|
// serialize tx seq as number
|
||||||
let tx_seq = TxSeqOrRoot::TxSeq(666);
|
let tx_seq = TxSeqOrRoot::TxSeq(666);
|
||||||
assert_eq!(serde_json::to_string(&tx_seq).unwrap(), "666".to_string());
|
assert_eq!(serde_json::to_string(&tx_seq).unwrap(), "666".to_string());
|
||||||
|
|
||||||
// serialize root
|
// serialize root as quoted string
|
||||||
let hash_str = "0xa906f46f8b9f15908dbee7adc5492ff30779c3abe114ccdb7079ecdcb72eb855";
|
let hash_str = "0xa906f46f8b9f15908dbee7adc5492ff30779c3abe114ccdb7079ecdcb72eb855";
|
||||||
let hash_quoted = format!("\"{}\"", hash_str);
|
let hash_quoted = format!("\"{}\"", hash_str);
|
||||||
let hash = H256::from_str(hash_str).unwrap();
|
let hash = H256::from_str(hash_str).unwrap();
|
||||||
let root = TxSeqOrRoot::Root(hash);
|
let root = TxSeqOrRoot::Root(hash);
|
||||||
assert_eq!(serde_json::to_string(&root).unwrap(), hash_quoted);
|
assert_eq!(serde_json::to_string(&root).unwrap(), hash_quoted);
|
||||||
|
|
||||||
// deserialize tx seq
|
// deserialize tx seq from number
|
||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
serde_json::from_str::<TxSeqOrRoot>("777").unwrap(),
|
serde_json::from_str::<TxSeqOrRoot>("777").unwrap(),
|
||||||
TxSeqOrRoot::TxSeq(777)
|
TxSeqOrRoot::TxSeq(777)
|
||||||
));
|
));
|
||||||
|
|
||||||
// deserialize root
|
// deserialize root from quoted string
|
||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
serde_json::from_str::<TxSeqOrRoot>(hash_quoted.as_str()).unwrap(),
|
serde_json::from_str::<TxSeqOrRoot>(hash_quoted.as_str()).unwrap(),
|
||||||
TxSeqOrRoot::Root(v) if v == hash,
|
TxSeqOrRoot::Root(v) if v == hash,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use super::{Client, RuntimeContext};
|
use super::{Client, RuntimeContext};
|
||||||
use chunk_pool::{ChunkPoolMessage, Config as ChunkPoolConfig, MemoryChunkPool};
|
use chunk_pool::{Config as ChunkPoolConfig, MemoryChunkPool};
|
||||||
use file_location_cache::FileLocationCache;
|
use file_location_cache::FileLocationCache;
|
||||||
use log_entry_sync::{LogSyncConfig, LogSyncEvent, LogSyncManager};
|
use log_entry_sync::{LogSyncConfig, LogSyncEvent, LogSyncManager};
|
||||||
use miner::{MineService, MinerConfig, MinerMessage, ShardConfig};
|
use miner::{MineService, MinerConfig, MinerMessage, ShardConfig};
|
||||||
@ -7,7 +7,7 @@ use network::{
|
|||||||
self, new_network_channel, Keypair, NetworkConfig, NetworkGlobals, NetworkReceiver,
|
self, new_network_channel, Keypair, NetworkConfig, NetworkGlobals, NetworkReceiver,
|
||||||
NetworkSender, RequestId, Service as LibP2PService,
|
NetworkSender, RequestId, Service as LibP2PService,
|
||||||
};
|
};
|
||||||
use pruner::{Pruner, PrunerConfig, PrunerMessage};
|
use pruner::{get_shard_config, Pruner, PrunerConfig, PrunerMessage};
|
||||||
use router::RouterService;
|
use router::RouterService;
|
||||||
use rpc::RPCConfig;
|
use rpc::RPCConfig;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -54,7 +54,7 @@ struct PrunerComponents {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct ChunkPoolComponents {
|
struct ChunkPoolComponents {
|
||||||
send: mpsc::UnboundedSender<ChunkPoolMessage>,
|
chunk_pool: Arc<MemoryChunkPool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds a `Client` instance.
|
/// Builds a `Client` instance.
|
||||||
@ -134,11 +134,21 @@ impl ClientBuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Starts the networking stack.
|
/// Starts the networking stack.
|
||||||
pub async fn with_network(mut self, config: &NetworkConfig) -> Result<Self, String> {
|
pub async fn with_network(mut self, mut config: NetworkConfig) -> Result<Self, String> {
|
||||||
let executor = require!("network", self, runtime_context).clone().executor;
|
let executor = require!("network", self, runtime_context).clone().executor;
|
||||||
|
let store = require!("network", self, store).clone();
|
||||||
|
let file_location_cache = require!("network", self, file_location_cache).clone();
|
||||||
|
|
||||||
|
// only dial to peers that shard config matched
|
||||||
|
config.peer_manager.filters.dial_peer_filter = Some(Arc::new(move |peer_id| {
|
||||||
|
match file_location_cache.get_peer_config(peer_id) {
|
||||||
|
Some(v) => store.get_shard_config().intersect(&v),
|
||||||
|
None => true,
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
// construct the libp2p service context
|
// construct the libp2p service context
|
||||||
let service_context = network::Context { config };
|
let service_context = network::Context { config: &config };
|
||||||
|
|
||||||
// construct communication channel
|
// construct communication channel
|
||||||
let (send, recv) = new_network_channel();
|
let (send, recv) = new_network_channel();
|
||||||
@ -193,7 +203,7 @@ impl ClientBuilder {
|
|||||||
if let Some(config) = config {
|
if let Some(config) = config {
|
||||||
let executor = require!("miner", self, runtime_context).clone().executor;
|
let executor = require!("miner", self, runtime_context).clone().executor;
|
||||||
let network_send = require!("miner", self, network).send.clone();
|
let network_send = require!("miner", self, network).send.clone();
|
||||||
let store = self.async_store.as_ref().unwrap().clone();
|
let store = require!("miner", self, async_store).clone();
|
||||||
|
|
||||||
let send = MineService::spawn(executor, network_send, config, store).await?;
|
let send = MineService::spawn(executor, network_send, config, store).await?;
|
||||||
self.miner = Some(MinerComponents { send });
|
self.miner = Some(MinerComponents { send });
|
||||||
@ -215,7 +225,11 @@ impl ClientBuilder {
|
|||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn with_shard(self, config: ShardConfig) -> Result<Self, String> {
|
pub async fn with_shard(self, mut config: ShardConfig) -> Result<Self, String> {
|
||||||
|
let store = require!("shard", self, async_store).clone();
|
||||||
|
if let Some(stored_config) = get_shard_config(store.as_ref()).await.unwrap_or(None) {
|
||||||
|
config = stored_config;
|
||||||
|
}
|
||||||
self.async_store
|
self.async_store
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@ -230,7 +244,7 @@ impl ClientBuilder {
|
|||||||
let executor = require!("router", self, runtime_context).clone().executor;
|
let executor = require!("router", self, runtime_context).clone().executor;
|
||||||
let sync_send = require!("router", self, sync).send.clone(); // note: we can make this optional in the future
|
let sync_send = require!("router", self, sync).send.clone(); // note: we can make this optional in the future
|
||||||
let miner_send = self.miner.as_ref().map(|x| x.send.clone());
|
let miner_send = self.miner.as_ref().map(|x| x.send.clone());
|
||||||
let chunk_pool_send = require!("router", self, chunk_pool).send.clone();
|
let chunk_pool_send = require!("router", self, chunk_pool).chunk_pool.sender();
|
||||||
let store = require!("router", self, store).clone();
|
let store = require!("router", self, store).clone();
|
||||||
let file_location_cache = require!("router", self, file_location_cache).clone();
|
let file_location_cache = require!("router", self, file_location_cache).clone();
|
||||||
|
|
||||||
@ -255,16 +269,12 @@ impl ClientBuilder {
|
|||||||
file_location_cache,
|
file_location_cache,
|
||||||
network.keypair.clone(),
|
network.keypair.clone(),
|
||||||
router_config,
|
router_config,
|
||||||
);
|
)?;
|
||||||
|
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn with_rpc(
|
pub async fn with_rpc(self, rpc_config: RPCConfig) -> Result<Self, String> {
|
||||||
mut self,
|
|
||||||
rpc_config: RPCConfig,
|
|
||||||
chunk_pool_config: ChunkPoolConfig,
|
|
||||||
) -> Result<Self, String> {
|
|
||||||
if !rpc_config.enabled {
|
if !rpc_config.enabled {
|
||||||
return Ok(self);
|
return Ok(self);
|
||||||
}
|
}
|
||||||
@ -273,16 +283,9 @@ impl ClientBuilder {
|
|||||||
let async_store = require!("rpc", self, async_store).clone();
|
let async_store = require!("rpc", self, async_store).clone();
|
||||||
let network_send = require!("rpc", self, network).send.clone();
|
let network_send = require!("rpc", self, network).send.clone();
|
||||||
let mine_send = self.miner.as_ref().map(|x| x.send.clone());
|
let mine_send = self.miner.as_ref().map(|x| x.send.clone());
|
||||||
let synced_tx_recv = require!("rpc", self, log_sync).send.subscribe();
|
|
||||||
let file_location_cache = require!("rpc", self, file_location_cache).clone();
|
let file_location_cache = require!("rpc", self, file_location_cache).clone();
|
||||||
|
let chunk_pool = require!("rpc", self, chunk_pool).chunk_pool.clone();
|
||||||
|
|
||||||
let (chunk_pool, chunk_pool_handler) =
|
|
||||||
chunk_pool::unbounded(chunk_pool_config, async_store.clone(), network_send.clone());
|
|
||||||
let chunk_pool_components = ChunkPoolComponents {
|
|
||||||
send: chunk_pool.sender(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let chunk_pool_clone = chunk_pool.clone();
|
|
||||||
let ctx = rpc::Context {
|
let ctx = rpc::Context {
|
||||||
config: rpc_config,
|
config: rpc_config,
|
||||||
file_location_cache,
|
file_location_cache,
|
||||||
@ -295,7 +298,7 @@ impl ClientBuilder {
|
|||||||
mine_service_sender: mine_send,
|
mine_service_sender: mine_send,
|
||||||
};
|
};
|
||||||
|
|
||||||
let (rpc_handle, maybe_admin_rpc_handle) = rpc::run_server(ctx.clone())
|
let (rpc_handle, maybe_admin_rpc_handle) = rpc::run_server(ctx)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("Unable to start HTTP RPC server: {:?}", e))?;
|
.map_err(|e| format!("Unable to start HTTP RPC server: {:?}", e))?;
|
||||||
|
|
||||||
@ -303,13 +306,29 @@ impl ClientBuilder {
|
|||||||
if let Some(admin_rpc_handle) = maybe_admin_rpc_handle {
|
if let Some(admin_rpc_handle) = maybe_admin_rpc_handle {
|
||||||
executor.spawn(admin_rpc_handle, "rpc_admin");
|
executor.spawn(admin_rpc_handle, "rpc_admin");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn with_chunk_pool(
|
||||||
|
mut self,
|
||||||
|
chunk_pool_config: ChunkPoolConfig,
|
||||||
|
) -> Result<Self, String> {
|
||||||
|
let executor = require!("rpc", self, runtime_context).clone().executor;
|
||||||
|
let async_store = require!("rpc", self, async_store).clone();
|
||||||
|
let network_send = require!("rpc", self, network).send.clone();
|
||||||
|
let synced_tx_recv = require!("rpc", self, log_sync).send.subscribe();
|
||||||
|
|
||||||
|
let (chunk_pool, chunk_pool_handler) =
|
||||||
|
chunk_pool::unbounded(chunk_pool_config, async_store.clone(), network_send.clone());
|
||||||
|
|
||||||
executor.spawn(chunk_pool_handler.run(), "chunk_pool_handler");
|
executor.spawn(chunk_pool_handler.run(), "chunk_pool_handler");
|
||||||
executor.spawn(
|
executor.spawn(
|
||||||
MemoryChunkPool::monitor_log_entry(chunk_pool_clone, synced_tx_recv),
|
MemoryChunkPool::monitor_log_entry(chunk_pool.clone(), synced_tx_recv),
|
||||||
"chunk_pool_log_monitor",
|
"chunk_pool_log_monitor",
|
||||||
);
|
);
|
||||||
|
|
||||||
self.chunk_pool = Some(chunk_pool_components);
|
self.chunk_pool = Some(ChunkPoolComponents { chunk_pool });
|
||||||
|
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//! This crate aims to provide a common set of tools that can be used to create a "environment" to
|
//! This crate aims to provide a common set of tools that can be used to create an "environment" to
|
||||||
//! run Zgs services. This allows for the unification of creating tokio runtimes, etc.
|
//! run Zgs services. This allows for the unification of creating tokio runtimes, etc.
|
||||||
//!
|
//!
|
||||||
//! The idea is that the main thread creates an `Environment`, which is then used to spawn a
|
//! The idea is that the main thread creates an `Environment`, which is then used to spawn a
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#![allow(clippy::field_reassign_with_default)]
|
#![allow(clippy::field_reassign_with_default)]
|
||||||
|
|
||||||
use crate::ZgsConfig;
|
use crate::ZgsConfig;
|
||||||
use ethereum_types::{H256, U256};
|
use ethereum_types::H256;
|
||||||
use ethers::prelude::{Http, Middleware, Provider};
|
use ethers::prelude::{Http, Middleware, Provider};
|
||||||
use log_entry_sync::{CacheConfig, ContractAddress, LogSyncConfig};
|
use log_entry_sync::{CacheConfig, ContractAddress, LogSyncConfig};
|
||||||
use miner::MinerConfig;
|
use miner::MinerConfig;
|
||||||
@ -43,9 +43,9 @@ impl ZgsConfig {
|
|||||||
chain_id,
|
chain_id,
|
||||||
flow_address,
|
flow_address,
|
||||||
p2p_protocol_version: ProtocolVersion {
|
p2p_protocol_version: ProtocolVersion {
|
||||||
major: network::PROTOCOL_VERSION[0],
|
major: network::PROTOCOL_VERSION_V4[0],
|
||||||
minor: network::PROTOCOL_VERSION[1],
|
minor: network::PROTOCOL_VERSION_V4[1],
|
||||||
build: network::PROTOCOL_VERSION[2],
|
build: network::PROTOCOL_VERSION_V4[2],
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
network_config.network_id = local_network_id.clone();
|
network_config.network_id = local_network_id.clone();
|
||||||
@ -105,8 +105,9 @@ impl ZgsConfig {
|
|||||||
network_config.private = self.network_private;
|
network_config.private = self.network_private;
|
||||||
|
|
||||||
network_config.peer_db = self.network_peer_db;
|
network_config.peer_db = self.network_peer_db;
|
||||||
network_config.peer_manager = self.network_peer_manager;
|
network_config.peer_manager = self.network_peer_manager.clone();
|
||||||
network_config.disable_enr_network_id = self.discv5_disable_enr_network_id;
|
network_config.disable_enr_network_id = self.discv5_disable_enr_network_id;
|
||||||
|
network_config.find_chunks_enabled = self.network_find_chunks_enabled;
|
||||||
|
|
||||||
Ok(network_config)
|
Ok(network_config)
|
||||||
}
|
}
|
||||||
@ -178,7 +179,6 @@ impl ZgsConfig {
|
|||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
let submission_gas = self.miner_submission_gas.map(U256::from);
|
|
||||||
let cpu_percentage = self.miner_cpu_percentage;
|
let cpu_percentage = self.miner_cpu_percentage;
|
||||||
let iter_batch = self.mine_iter_batch_size;
|
let iter_batch = self.mine_iter_batch_size;
|
||||||
let context_query_seconds = self.mine_context_query_seconds;
|
let context_query_seconds = self.mine_context_query_seconds;
|
||||||
@ -191,7 +191,6 @@ impl ZgsConfig {
|
|||||||
self.blockchain_rpc_endpoint.clone(),
|
self.blockchain_rpc_endpoint.clone(),
|
||||||
mine_address,
|
mine_address,
|
||||||
flow_address,
|
flow_address,
|
||||||
submission_gas,
|
|
||||||
cpu_percentage,
|
cpu_percentage,
|
||||||
iter_batch,
|
iter_batch,
|
||||||
context_query_seconds,
|
context_query_seconds,
|
||||||
@ -199,6 +198,7 @@ impl ZgsConfig {
|
|||||||
self.rate_limit_retries,
|
self.rate_limit_retries,
|
||||||
self.timeout_retries,
|
self.timeout_retries,
|
||||||
self.initial_backoff,
|
self.initial_backoff,
|
||||||
|
self.submission_config,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@ build_config! {
|
|||||||
(network_libp2p_nodes, (Vec<String>), vec![])
|
(network_libp2p_nodes, (Vec<String>), vec![])
|
||||||
(network_private, (bool), false)
|
(network_private, (bool), false)
|
||||||
(network_disable_discovery, (bool), false)
|
(network_disable_discovery, (bool), false)
|
||||||
|
(network_find_chunks_enabled, (bool), false)
|
||||||
|
|
||||||
// discv5
|
// discv5
|
||||||
(discv5_request_timeout_secs, (u64), 5)
|
(discv5_request_timeout_secs, (u64), 5)
|
||||||
@ -73,7 +74,6 @@ build_config! {
|
|||||||
(mine_contract_address, (String), "".to_string())
|
(mine_contract_address, (String), "".to_string())
|
||||||
(miner_id, (Option<String>), None)
|
(miner_id, (Option<String>), None)
|
||||||
(miner_key, (Option<String>), None)
|
(miner_key, (Option<String>), None)
|
||||||
(miner_submission_gas, (Option<u64>), None)
|
|
||||||
(miner_cpu_percentage, (u64), 100)
|
(miner_cpu_percentage, (u64), 100)
|
||||||
(mine_iter_batch_size, (usize), 100)
|
(mine_iter_batch_size, (usize), 100)
|
||||||
(reward_contract_address, (String), "".to_string())
|
(reward_contract_address, (String), "".to_string())
|
||||||
@ -105,6 +105,9 @@ pub struct ZgsConfig {
|
|||||||
// rpc config, configured by [rpc] section by `config` crate.
|
// rpc config, configured by [rpc] section by `config` crate.
|
||||||
pub rpc: rpc::RPCConfig,
|
pub rpc: rpc::RPCConfig,
|
||||||
|
|
||||||
|
// submission config, configured by [submission_config] section by `config` crate.
|
||||||
|
pub submission_config: contract_wrapper::SubmitConfig,
|
||||||
|
|
||||||
// metrics config, configured by [metrics] section by `config` crate.
|
// metrics config, configured by [metrics] section by `config` crate.
|
||||||
pub metrics: metrics::MetricsConfiguration,
|
pub metrics: metrics::MetricsConfiguration,
|
||||||
}
|
}
|
||||||
|
@ -1,33 +1,41 @@
|
|||||||
use task_executor::TaskExecutor;
|
use task_executor::TaskExecutor;
|
||||||
use tracing::Level;
|
use tracing_log::AsLog;
|
||||||
use tracing_subscriber::EnvFilter;
|
use tracing_subscriber::layer::SubscriberExt;
|
||||||
|
use tracing_subscriber::util::SubscriberInitExt;
|
||||||
|
use tracing_subscriber::{EnvFilter, Layer};
|
||||||
|
|
||||||
const LOG_RELOAD_PERIOD_SEC: u64 = 30;
|
const LOG_RELOAD_PERIOD_SEC: u64 = 30;
|
||||||
|
|
||||||
pub fn configure(log_level_file: &str, log_directory: &str, executor: TaskExecutor) {
|
pub fn configure(log_level_file: &str, log_directory: &str, executor: TaskExecutor) {
|
||||||
let file_appender = tracing_appender::rolling::daily(log_directory, "zgs.log");
|
let file_appender = tracing_appender::rolling::daily(log_directory, "zgs.log");
|
||||||
let (non_blocking, guard) = tracing_appender::non_blocking(file_appender);
|
let (non_blocking, guard) = tracing_appender::non_blocking(file_appender);
|
||||||
let builder = tracing_subscriber::fmt()
|
|
||||||
.with_max_level(Level::TRACE)
|
|
||||||
.with_env_filter(EnvFilter::default())
|
|
||||||
.with_writer(non_blocking)
|
|
||||||
.with_ansi(false)
|
|
||||||
// .with_file(true)
|
|
||||||
// .with_line_number(true)
|
|
||||||
// .with_thread_names(true)
|
|
||||||
.with_filter_reloading();
|
|
||||||
|
|
||||||
let handle = builder.reload_handle();
|
|
||||||
builder.init();
|
|
||||||
|
|
||||||
let level_file = log_level_file.trim_end().to_string();
|
let level_file = log_level_file.trim_end().to_string();
|
||||||
|
|
||||||
// load config synchronously
|
// load config synchronously
|
||||||
let mut config = std::fs::read_to_string(&level_file)
|
let mut config = std::fs::read_to_string(&level_file)
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
.trim_end()
|
.trim_end()
|
||||||
.to_string();
|
.to_string();
|
||||||
let _ = handle.reload(&config);
|
let filter = EnvFilter::try_new(config.clone()).expect("invalid log level");
|
||||||
|
let (filter, reload_handle) = tracing_subscriber::reload::Layer::new(filter);
|
||||||
|
|
||||||
|
let fmt_layer = tracing_subscriber::fmt::layer()
|
||||||
|
.with_writer(non_blocking)
|
||||||
|
.with_ansi(false)
|
||||||
|
.compact()
|
||||||
|
.with_filter(filter);
|
||||||
|
// .with_file(true)
|
||||||
|
// .with_line_number(true)
|
||||||
|
// .with_thread_names(true)
|
||||||
|
let subscriber = tracing_subscriber::registry().with(fmt_layer);
|
||||||
|
#[cfg(feature = "tokio-console")]
|
||||||
|
{
|
||||||
|
subscriber.with(console_subscriber::spawn()).init();
|
||||||
|
}
|
||||||
|
#[cfg(not(feature = "tokio-console"))]
|
||||||
|
{
|
||||||
|
subscriber.init();
|
||||||
|
}
|
||||||
|
|
||||||
// periodically check for config changes
|
// periodically check for config changes
|
||||||
executor.spawn(
|
executor.spawn(
|
||||||
@ -57,8 +65,11 @@ pub fn configure(log_level_file: &str, log_directory: &str, executor: TaskExecut
|
|||||||
|
|
||||||
println!("Updating log config to {:?}", new_config);
|
println!("Updating log config to {:?}", new_config);
|
||||||
|
|
||||||
match handle.reload(&new_config) {
|
match reload_handle.reload(&new_config) {
|
||||||
Ok(()) => config = new_config,
|
Ok(()) => {
|
||||||
|
rust_log::set_max_level(tracing_core::LevelFilter::current().as_log());
|
||||||
|
config = new_config
|
||||||
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
println!("Failed to load new config: {:?}", e);
|
println!("Failed to load new config: {:?}", e);
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,7 @@ async fn start_node(context: RuntimeContext, config: ZgsConfig) -> Result<Client
|
|||||||
let network_config = config.network_config().await?;
|
let network_config = config.network_config().await?;
|
||||||
let storage_config = config.storage_config()?;
|
let storage_config = config.storage_config()?;
|
||||||
let log_sync_config = config.log_sync_config()?;
|
let log_sync_config = config.log_sync_config()?;
|
||||||
|
let chunk_pool_config = config.chunk_pool_config()?;
|
||||||
let miner_config = config.mine_config()?;
|
let miner_config = config.mine_config()?;
|
||||||
let router_config = config.router_config(&network_config)?;
|
let router_config = config.router_config(&network_config)?;
|
||||||
let pruner_config = config.pruner_config()?;
|
let pruner_config = config.pruner_config()?;
|
||||||
@ -22,20 +23,22 @@ async fn start_node(context: RuntimeContext, config: ZgsConfig) -> Result<Client
|
|||||||
ClientBuilder::default()
|
ClientBuilder::default()
|
||||||
.with_runtime_context(context)
|
.with_runtime_context(context)
|
||||||
.with_rocksdb_store(&storage_config)?
|
.with_rocksdb_store(&storage_config)?
|
||||||
|
.with_shard(shard_config)
|
||||||
|
.await?
|
||||||
.with_log_sync(log_sync_config)
|
.with_log_sync(log_sync_config)
|
||||||
.await?
|
.await?
|
||||||
.with_file_location_cache(config.file_location_cache)
|
.with_file_location_cache(config.file_location_cache)
|
||||||
.with_network(&network_config)
|
.with_network(network_config)
|
||||||
|
.await?
|
||||||
|
.with_chunk_pool(chunk_pool_config)
|
||||||
.await?
|
.await?
|
||||||
.with_sync(config.sync)
|
.with_sync(config.sync)
|
||||||
.await?
|
.await?
|
||||||
.with_miner(miner_config)
|
.with_miner(miner_config)
|
||||||
.await?
|
.await?
|
||||||
.with_shard(shard_config)
|
|
||||||
.await?
|
|
||||||
.with_pruner(pruner_config)
|
.with_pruner(pruner_config)
|
||||||
.await?
|
.await?
|
||||||
.with_rpc(config.rpc, config.chunk_pool_config()?)
|
.with_rpc(config.rpc)
|
||||||
.await?
|
.await?
|
||||||
.with_router(router_config)?
|
.with_router(router_config)?
|
||||||
.build()
|
.build()
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
extern crate tracing;
|
extern crate tracing;
|
||||||
|
|
||||||
use anyhow::bail;
|
use anyhow::bail;
|
||||||
use backtrace::Backtrace;
|
|
||||||
use shared_types::{
|
use shared_types::{
|
||||||
Chunk, ChunkArray, ChunkArrayWithProof, DataRoot, FlowProof, FlowRangeProof, Transaction,
|
Chunk, ChunkArray, ChunkArrayWithProof, DataRoot, FlowProof, FlowRangeProof, Transaction,
|
||||||
};
|
};
|
||||||
@ -60,15 +59,23 @@ impl Store {
|
|||||||
delegate!(fn get_proof_at_root(root: Option<DataRoot>, index: u64, length: u64) -> Result<FlowRangeProof>);
|
delegate!(fn get_proof_at_root(root: Option<DataRoot>, index: u64, length: u64) -> Result<FlowRangeProof>);
|
||||||
delegate!(fn get_context() -> Result<(DataRoot, u64)>);
|
delegate!(fn get_context() -> Result<(DataRoot, u64)>);
|
||||||
|
|
||||||
pub async fn get_tx_seq_by_data_root(&self, data_root: &DataRoot) -> Result<Option<u64>> {
|
pub async fn get_tx_seq_by_data_root(
|
||||||
|
&self,
|
||||||
|
data_root: &DataRoot,
|
||||||
|
need_available: bool,
|
||||||
|
) -> Result<Option<u64>> {
|
||||||
let root = *data_root;
|
let root = *data_root;
|
||||||
self.spawn(move |store| store.get_tx_seq_by_data_root(&root))
|
self.spawn(move |store| store.get_tx_seq_by_data_root(&root, need_available))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_tx_by_data_root(&self, data_root: &DataRoot) -> Result<Option<Transaction>> {
|
pub async fn get_tx_by_data_root(
|
||||||
|
&self,
|
||||||
|
data_root: &DataRoot,
|
||||||
|
need_available: bool,
|
||||||
|
) -> Result<Option<Transaction>> {
|
||||||
let root = *data_root;
|
let root = *data_root;
|
||||||
self.spawn(move |store| store.get_tx_by_data_root(&root))
|
self.spawn(move |store| store.get_tx_by_data_root(&root, need_available))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -140,9 +147,6 @@ impl Store {
|
|||||||
{
|
{
|
||||||
let store = self.store.clone();
|
let store = self.store.clone();
|
||||||
let (tx, rx) = oneshot::channel();
|
let (tx, rx) = oneshot::channel();
|
||||||
let mut backtrace = Backtrace::new();
|
|
||||||
let frames = backtrace.frames().to_vec();
|
|
||||||
backtrace = frames.into();
|
|
||||||
|
|
||||||
self.executor.spawn_blocking(
|
self.executor.spawn_blocking(
|
||||||
move || {
|
move || {
|
||||||
@ -150,7 +154,6 @@ impl Store {
|
|||||||
let res = f(&*store);
|
let res = f(&*store);
|
||||||
|
|
||||||
if tx.send(res).is_err() {
|
if tx.send(res).is_err() {
|
||||||
warn!("Backtrace: {:?}", backtrace);
|
|
||||||
error!("Unable to complete async storage operation: the receiver dropped");
|
error!("Unable to complete async storage operation: the receiver dropped");
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -60,6 +60,23 @@ impl TryFrom<Option<String>> for ShardConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl TryFrom<shared_types::ShardConfig> for ShardConfig {
|
||||||
|
type Error = String;
|
||||||
|
|
||||||
|
fn try_from(value: shared_types::ShardConfig) -> Result<Self, Self::Error> {
|
||||||
|
Self::new(value.shard_id, value.num_shard)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ShardConfig> for shared_types::ShardConfig {
|
||||||
|
fn from(value: ShardConfig) -> Self {
|
||||||
|
Self {
|
||||||
|
num_shard: value.num_shard,
|
||||||
|
shard_id: value.shard_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl ShardConfig {
|
impl ShardConfig {
|
||||||
pub fn new(id: usize, num: usize) -> Result<Self, String> {
|
pub fn new(id: usize, num: usize) -> Result<Self, String> {
|
||||||
let config = ShardConfig {
|
let config = ShardConfig {
|
||||||
|
@ -128,6 +128,12 @@ impl DataRange for Subtree {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for EntryBatchData {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl EntryBatchData {
|
impl EntryBatchData {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
EntryBatchData::Incomplete(IncompleteData {
|
EntryBatchData::Incomplete(IncompleteData {
|
||||||
|
@ -20,7 +20,7 @@ use zgs_spec::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use super::SealAnswer;
|
use super::SealAnswer;
|
||||||
use chunk_data::EntryBatchData;
|
pub use chunk_data::EntryBatchData;
|
||||||
use seal::SealInfo;
|
use seal::SealInfo;
|
||||||
|
|
||||||
#[derive(Debug, Encode, Decode, Deserialize, Serialize)]
|
#[derive(Debug, Encode, Decode, Deserialize, Serialize)]
|
||||||
|
@ -21,7 +21,6 @@ use rayon::prelude::ParallelSlice;
|
|||||||
use shared_types::{
|
use shared_types::{
|
||||||
bytes_to_chunks, compute_padded_chunk_size, compute_segment_size, Chunk, ChunkArray,
|
bytes_to_chunks, compute_padded_chunk_size, compute_segment_size, Chunk, ChunkArray,
|
||||||
ChunkArrayWithProof, ChunkWithProof, DataRoot, FlowProof, FlowRangeProof, Merkle, Transaction,
|
ChunkArrayWithProof, ChunkWithProof, DataRoot, FlowProof, FlowRangeProof, Merkle, Transaction,
|
||||||
TxSeqOrRoot,
|
|
||||||
};
|
};
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
@ -512,7 +511,7 @@ impl LogStoreChunkRead for LogManager {
|
|||||||
index_start: usize,
|
index_start: usize,
|
||||||
index_end: usize,
|
index_end: usize,
|
||||||
) -> crate::error::Result<Option<ChunkArray>> {
|
) -> crate::error::Result<Option<ChunkArray>> {
|
||||||
let tx_seq = try_option!(self.get_tx_seq_by_data_root(data_root)?);
|
let tx_seq = try_option!(self.get_tx_seq_by_data_root(data_root, true)?);
|
||||||
self.get_chunks_by_tx_and_index_range(tx_seq, index_start, index_end)
|
self.get_chunks_by_tx_and_index_range(tx_seq, index_start, index_end)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -537,8 +536,30 @@ impl LogStoreRead for LogManager {
|
|||||||
self.tx_store.get_tx_by_seq_number(seq)
|
self.tx_store.get_tx_by_seq_number(seq)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_tx_seq_by_data_root(&self, data_root: &DataRoot) -> crate::error::Result<Option<u64>> {
|
fn get_tx_seq_by_data_root(
|
||||||
self.tx_store.get_first_tx_seq_by_data_root(data_root)
|
&self,
|
||||||
|
data_root: &DataRoot,
|
||||||
|
need_available: bool,
|
||||||
|
) -> crate::error::Result<Option<u64>> {
|
||||||
|
let seq_list = self.tx_store.get_tx_seq_list_by_data_root(data_root)?;
|
||||||
|
let mut available_seq = None;
|
||||||
|
for tx_seq in &seq_list {
|
||||||
|
if self.tx_store.check_tx_completed(*tx_seq)? {
|
||||||
|
// Return the first finalized tx if possible.
|
||||||
|
return Ok(Some(*tx_seq));
|
||||||
|
}
|
||||||
|
if need_available
|
||||||
|
&& available_seq.is_none()
|
||||||
|
&& !self.tx_store.check_tx_pruned(*tx_seq)?
|
||||||
|
{
|
||||||
|
available_seq = Some(*tx_seq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if need_available {
|
||||||
|
return Ok(available_seq);
|
||||||
|
}
|
||||||
|
// No tx is finalized, return the first one.
|
||||||
|
Ok(seq_list.first().cloned())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_chunk_with_proof_by_tx_and_index(
|
fn get_chunk_with_proof_by_tx_and_index(
|
||||||
@ -582,14 +603,7 @@ impl LogStoreRead for LogManager {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_tx_status(&self, tx_seq_or_data_root: TxSeqOrRoot) -> Result<Option<TxStatus>> {
|
fn get_tx_status(&self, tx_seq: u64) -> Result<Option<TxStatus>> {
|
||||||
let tx_seq = match tx_seq_or_data_root {
|
|
||||||
TxSeqOrRoot::TxSeq(v) => v,
|
|
||||||
TxSeqOrRoot::Root(root) => {
|
|
||||||
try_option!(self.tx_store.get_first_tx_seq_by_data_root(&root)?)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
self.tx_store.get_tx_status(tx_seq)
|
self.tx_store.get_tx_status(tx_seq)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -700,7 +714,7 @@ impl LogManager {
|
|||||||
data_db_source: Arc<dyn ZgsKeyValueDB>,
|
data_db_source: Arc<dyn ZgsKeyValueDB>,
|
||||||
config: LogConfig,
|
config: LogConfig,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
let tx_store = TransactionStore::new(data_db_source.clone())?;
|
let tx_store = TransactionStore::new(flow_db_source.clone(), data_db_source.clone())?;
|
||||||
let flow_db = Arc::new(FlowDBStore::new(flow_db_source.clone()));
|
let flow_db = Arc::new(FlowDBStore::new(flow_db_source.clone()));
|
||||||
let data_db = Arc::new(FlowDBStore::new(data_db_source.clone()));
|
let data_db = Arc::new(FlowDBStore::new(data_db_source.clone()));
|
||||||
let flow_store = Arc::new(FlowStore::new(
|
let flow_store = Arc::new(FlowStore::new(
|
||||||
@ -1157,6 +1171,7 @@ impl LogManager {
|
|||||||
.get_tx_by_seq_number(from_tx_seq)?
|
.get_tx_by_seq_number(from_tx_seq)?
|
||||||
.ok_or_else(|| anyhow!("from tx missing"))?;
|
.ok_or_else(|| anyhow!("from tx missing"))?;
|
||||||
let mut to_tx_offset_list = Vec::with_capacity(to_tx_seq_list.len());
|
let mut to_tx_offset_list = Vec::with_capacity(to_tx_seq_list.len());
|
||||||
|
|
||||||
for seq in to_tx_seq_list {
|
for seq in to_tx_seq_list {
|
||||||
// No need to copy data for completed tx.
|
// No need to copy data for completed tx.
|
||||||
if self.check_tx_completed(seq)? {
|
if self.check_tx_completed(seq)? {
|
||||||
|
@ -38,4 +38,6 @@ lazy_static::lazy_static! {
|
|||||||
pub static ref FINALIZE_TX_WITH_HASH: Arc<dyn Timer> = register_timer("log_store_log_manager_finalize_tx_with_hash");
|
pub static ref FINALIZE_TX_WITH_HASH: Arc<dyn Timer> = register_timer("log_store_log_manager_finalize_tx_with_hash");
|
||||||
|
|
||||||
pub static ref DATA_TO_MERKLE_LEAVES_SIZE: Arc<dyn Gauge<usize>> = GaugeUsize::register("log_store_data_to_merkle_leaves_size");
|
pub static ref DATA_TO_MERKLE_LEAVES_SIZE: Arc<dyn Gauge<usize>> = GaugeUsize::register("log_store_data_to_merkle_leaves_size");
|
||||||
|
|
||||||
|
pub static ref TX_BY_SEQ_NUMBER: Arc<dyn Timer> = register_timer("log_store_tx_store_get_tx_by_seq_number");
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@ use ethereum_types::H256;
|
|||||||
use flow_store::PadPair;
|
use flow_store::PadPair;
|
||||||
use shared_types::{
|
use shared_types::{
|
||||||
Chunk, ChunkArray, ChunkArrayWithProof, ChunkWithProof, DataRoot, FlowProof, FlowRangeProof,
|
Chunk, ChunkArray, ChunkArrayWithProof, ChunkWithProof, DataRoot, FlowProof, FlowRangeProof,
|
||||||
Transaction, TxSeqOrRoot,
|
Transaction,
|
||||||
};
|
};
|
||||||
use zgs_spec::{BYTES_PER_SEAL, SEALS_PER_LOAD};
|
use zgs_spec::{BYTES_PER_SEAL, SEALS_PER_LOAD};
|
||||||
|
|
||||||
@ -14,7 +14,7 @@ use self::tx_store::{BlockHashAndSubmissionIndex, TxStatus};
|
|||||||
|
|
||||||
pub mod config;
|
pub mod config;
|
||||||
mod flow_store;
|
mod flow_store;
|
||||||
mod load_chunk;
|
pub mod load_chunk;
|
||||||
pub mod log_manager;
|
pub mod log_manager;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
mod seal_task_manager;
|
mod seal_task_manager;
|
||||||
@ -31,10 +31,22 @@ pub trait LogStoreRead: LogStoreChunkRead {
|
|||||||
fn get_tx_by_seq_number(&self, seq: u64) -> Result<Option<Transaction>>;
|
fn get_tx_by_seq_number(&self, seq: u64) -> Result<Option<Transaction>>;
|
||||||
|
|
||||||
/// Get a transaction by the data root of its data.
|
/// Get a transaction by the data root of its data.
|
||||||
fn get_tx_seq_by_data_root(&self, data_root: &DataRoot) -> Result<Option<u64>>;
|
/// If all txs are not finalized, return the first one if need available is false.
|
||||||
|
/// Otherwise, return the first finalized tx.
|
||||||
|
fn get_tx_seq_by_data_root(
|
||||||
|
&self,
|
||||||
|
data_root: &DataRoot,
|
||||||
|
need_available: bool,
|
||||||
|
) -> Result<Option<u64>>;
|
||||||
|
|
||||||
fn get_tx_by_data_root(&self, data_root: &DataRoot) -> Result<Option<Transaction>> {
|
/// If all txs are not finalized, return the first one if need available is false.
|
||||||
match self.get_tx_seq_by_data_root(data_root)? {
|
/// Otherwise, return the first finalized tx.
|
||||||
|
fn get_tx_by_data_root(
|
||||||
|
&self,
|
||||||
|
data_root: &DataRoot,
|
||||||
|
need_available: bool,
|
||||||
|
) -> Result<Option<Transaction>> {
|
||||||
|
match self.get_tx_seq_by_data_root(data_root, need_available)? {
|
||||||
Some(seq) => self.get_tx_by_seq_number(seq),
|
Some(seq) => self.get_tx_by_seq_number(seq),
|
||||||
None => Ok(None),
|
None => Ok(None),
|
||||||
}
|
}
|
||||||
@ -58,7 +70,7 @@ pub trait LogStoreRead: LogStoreChunkRead {
|
|||||||
|
|
||||||
fn check_tx_pruned(&self, tx_seq: u64) -> Result<bool>;
|
fn check_tx_pruned(&self, tx_seq: u64) -> Result<bool>;
|
||||||
|
|
||||||
fn get_tx_status(&self, tx_seq_or_data_root: TxSeqOrRoot) -> Result<Option<TxStatus>>;
|
fn get_tx_status(&self, tx_seq: u64) -> Result<Option<TxStatus>>;
|
||||||
|
|
||||||
fn next_tx_seq(&self) -> u64;
|
fn next_tx_seq(&self) -> u64;
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ const LOG_SYNC_PROGRESS_KEY: &str = "log_sync_progress";
|
|||||||
const NEXT_TX_KEY: &str = "next_tx_seq";
|
const NEXT_TX_KEY: &str = "next_tx_seq";
|
||||||
const LOG_LATEST_BLOCK_NUMBER_KEY: &str = "log_latest_block_number_key";
|
const LOG_LATEST_BLOCK_NUMBER_KEY: &str = "log_latest_block_number_key";
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub enum TxStatus {
|
pub enum TxStatus {
|
||||||
Finalized,
|
Finalized,
|
||||||
Pruned,
|
Pruned,
|
||||||
@ -56,19 +57,24 @@ pub struct BlockHashAndSubmissionIndex {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct TransactionStore {
|
pub struct TransactionStore {
|
||||||
kvdb: Arc<dyn ZgsKeyValueDB>,
|
flow_kvdb: Arc<dyn ZgsKeyValueDB>,
|
||||||
|
data_kvdb: Arc<dyn ZgsKeyValueDB>,
|
||||||
/// This is always updated before writing the database to ensure no intermediate states.
|
/// This is always updated before writing the database to ensure no intermediate states.
|
||||||
next_tx_seq: AtomicU64,
|
next_tx_seq: AtomicU64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TransactionStore {
|
impl TransactionStore {
|
||||||
pub fn new(kvdb: Arc<dyn ZgsKeyValueDB>) -> Result<Self> {
|
pub fn new(
|
||||||
let next_tx_seq = kvdb
|
flow_kvdb: Arc<dyn ZgsKeyValueDB>,
|
||||||
|
data_kvdb: Arc<dyn ZgsKeyValueDB>,
|
||||||
|
) -> Result<Self> {
|
||||||
|
let next_tx_seq = flow_kvdb
|
||||||
.get(COL_TX, NEXT_TX_KEY.as_bytes())?
|
.get(COL_TX, NEXT_TX_KEY.as_bytes())?
|
||||||
.map(|a| decode_tx_seq(&a))
|
.map(|a| decode_tx_seq(&a))
|
||||||
.unwrap_or(Ok(0))?;
|
.unwrap_or(Ok(0))?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
kvdb,
|
flow_kvdb,
|
||||||
|
data_kvdb,
|
||||||
next_tx_seq: AtomicU64::new(next_tx_seq),
|
next_tx_seq: AtomicU64::new(next_tx_seq),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -85,7 +91,7 @@ impl TransactionStore {
|
|||||||
return Ok(old_tx_seq_list);
|
return Ok(old_tx_seq_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut db_tx = self.kvdb.transaction();
|
let mut db_tx = self.flow_kvdb.transaction();
|
||||||
if !tx.data.is_empty() {
|
if !tx.data.is_empty() {
|
||||||
tx.size = tx.data.len() as u64;
|
tx.size = tx.data.len() as u64;
|
||||||
let mut padded_data = tx.data.clone();
|
let mut padded_data = tx.data.clone();
|
||||||
@ -112,32 +118,35 @@ impl TransactionStore {
|
|||||||
&new_tx_seq_list.as_ssz_bytes(),
|
&new_tx_seq_list.as_ssz_bytes(),
|
||||||
);
|
);
|
||||||
self.next_tx_seq.store(tx.seq + 1, Ordering::SeqCst);
|
self.next_tx_seq.store(tx.seq + 1, Ordering::SeqCst);
|
||||||
self.kvdb.write(db_tx)?;
|
self.flow_kvdb.write(db_tx)?;
|
||||||
metrics::TX_STORE_PUT.update_since(start_time);
|
metrics::TX_STORE_PUT.update_since(start_time);
|
||||||
Ok(old_tx_seq_list)
|
Ok(old_tx_seq_list)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_tx_by_seq_number(&self, seq: u64) -> Result<Option<Transaction>> {
|
pub fn get_tx_by_seq_number(&self, seq: u64) -> Result<Option<Transaction>> {
|
||||||
|
let start_time = Instant::now();
|
||||||
if seq >= self.next_tx_seq() {
|
if seq >= self.next_tx_seq() {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
let value = try_option!(self.kvdb.get(COL_TX, &seq.to_be_bytes())?);
|
let value = try_option!(self.flow_kvdb.get(COL_TX, &seq.to_be_bytes())?);
|
||||||
let tx = Transaction::from_ssz_bytes(&value).map_err(Error::from)?;
|
let tx = Transaction::from_ssz_bytes(&value).map_err(Error::from)?;
|
||||||
|
metrics::TX_BY_SEQ_NUMBER.update_since(start_time);
|
||||||
Ok(Some(tx))
|
Ok(Some(tx))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn remove_tx_after(&self, min_seq: u64) -> Result<Vec<Transaction>> {
|
pub fn remove_tx_after(&self, min_seq: u64) -> Result<Vec<Transaction>> {
|
||||||
let mut removed_txs = Vec::new();
|
let mut removed_txs = Vec::new();
|
||||||
let max_seq = self.next_tx_seq();
|
let max_seq = self.next_tx_seq();
|
||||||
let mut db_tx = self.kvdb.transaction();
|
let mut flow_db_tx = self.flow_kvdb.transaction();
|
||||||
|
let mut data_db_tx = self.data_kvdb.transaction();
|
||||||
let mut modified_merkle_root_map = HashMap::new();
|
let mut modified_merkle_root_map = HashMap::new();
|
||||||
for seq in min_seq..max_seq {
|
for seq in min_seq..max_seq {
|
||||||
let Some(tx) = self.get_tx_by_seq_number(seq)? else {
|
let Some(tx) = self.get_tx_by_seq_number(seq)? else {
|
||||||
error!(?seq, ?max_seq, "Transaction missing before the end");
|
error!(?seq, ?max_seq, "Transaction missing before the end");
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
db_tx.delete(COL_TX, &seq.to_be_bytes());
|
flow_db_tx.delete(COL_TX, &seq.to_be_bytes());
|
||||||
db_tx.delete(COL_TX_COMPLETED, &seq.to_be_bytes());
|
data_db_tx.delete(COL_TX_COMPLETED, &seq.to_be_bytes());
|
||||||
// We only remove tx when the blockchain reorgs.
|
// We only remove tx when the blockchain reorgs.
|
||||||
// If a tx is reverted, all data after it will also be reverted, so we call remove
|
// If a tx is reverted, all data after it will also be reverted, so we call remove
|
||||||
// all indices after it.
|
// all indices after it.
|
||||||
@ -152,24 +161,25 @@ impl TransactionStore {
|
|||||||
}
|
}
|
||||||
for (merkle_root, tx_seq_list) in modified_merkle_root_map {
|
for (merkle_root, tx_seq_list) in modified_merkle_root_map {
|
||||||
if tx_seq_list.is_empty() {
|
if tx_seq_list.is_empty() {
|
||||||
db_tx.delete(COL_TX_DATA_ROOT_INDEX, merkle_root.as_bytes());
|
flow_db_tx.delete(COL_TX_DATA_ROOT_INDEX, merkle_root.as_bytes());
|
||||||
} else {
|
} else {
|
||||||
db_tx.put(
|
flow_db_tx.put(
|
||||||
COL_TX_DATA_ROOT_INDEX,
|
COL_TX_DATA_ROOT_INDEX,
|
||||||
merkle_root.as_bytes(),
|
merkle_root.as_bytes(),
|
||||||
&tx_seq_list.as_ssz_bytes(),
|
&tx_seq_list.as_ssz_bytes(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
db_tx.put(COL_TX, NEXT_TX_KEY.as_bytes(), &min_seq.to_be_bytes());
|
flow_db_tx.put(COL_TX, NEXT_TX_KEY.as_bytes(), &min_seq.to_be_bytes());
|
||||||
self.next_tx_seq.store(min_seq, Ordering::SeqCst);
|
self.next_tx_seq.store(min_seq, Ordering::SeqCst);
|
||||||
self.kvdb.write(db_tx)?;
|
self.data_kvdb.write(data_db_tx)?;
|
||||||
|
self.flow_kvdb.write(flow_db_tx)?;
|
||||||
Ok(removed_txs)
|
Ok(removed_txs)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_tx_seq_list_by_data_root(&self, data_root: &DataRoot) -> Result<Vec<u64>> {
|
pub fn get_tx_seq_list_by_data_root(&self, data_root: &DataRoot) -> Result<Vec<u64>> {
|
||||||
let value = match self
|
let value = match self
|
||||||
.kvdb
|
.flow_kvdb
|
||||||
.get(COL_TX_DATA_ROOT_INDEX, data_root.as_bytes())?
|
.get(COL_TX_DATA_ROOT_INDEX, data_root.as_bytes())?
|
||||||
{
|
{
|
||||||
Some(v) => v,
|
Some(v) => v,
|
||||||
@ -178,17 +188,9 @@ impl TransactionStore {
|
|||||||
Ok(Vec::<u64>::from_ssz_bytes(&value).map_err(Error::from)?)
|
Ok(Vec::<u64>::from_ssz_bytes(&value).map_err(Error::from)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_first_tx_seq_by_data_root(&self, data_root: &DataRoot) -> Result<Option<u64>> {
|
|
||||||
let value = try_option!(self
|
|
||||||
.kvdb
|
|
||||||
.get(COL_TX_DATA_ROOT_INDEX, data_root.as_bytes())?);
|
|
||||||
let seq_list = Vec::<u64>::from_ssz_bytes(&value).map_err(Error::from)?;
|
|
||||||
Ok(seq_list.first().cloned())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(self))]
|
#[instrument(skip(self))]
|
||||||
pub fn finalize_tx(&self, tx_seq: u64) -> Result<()> {
|
pub fn finalize_tx(&self, tx_seq: u64) -> Result<()> {
|
||||||
Ok(self.kvdb.put(
|
Ok(self.data_kvdb.put(
|
||||||
COL_TX_COMPLETED,
|
COL_TX_COMPLETED,
|
||||||
&tx_seq.to_be_bytes(),
|
&tx_seq.to_be_bytes(),
|
||||||
&[TxStatus::Finalized.into()],
|
&[TxStatus::Finalized.into()],
|
||||||
@ -197,7 +199,7 @@ impl TransactionStore {
|
|||||||
|
|
||||||
#[instrument(skip(self))]
|
#[instrument(skip(self))]
|
||||||
pub fn prune_tx(&self, tx_seq: u64) -> Result<()> {
|
pub fn prune_tx(&self, tx_seq: u64) -> Result<()> {
|
||||||
Ok(self.kvdb.put(
|
Ok(self.data_kvdb.put(
|
||||||
COL_TX_COMPLETED,
|
COL_TX_COMPLETED,
|
||||||
&tx_seq.to_be_bytes(),
|
&tx_seq.to_be_bytes(),
|
||||||
&[TxStatus::Pruned.into()],
|
&[TxStatus::Pruned.into()],
|
||||||
@ -205,7 +207,9 @@ impl TransactionStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_tx_status(&self, tx_seq: u64) -> Result<Option<TxStatus>> {
|
pub fn get_tx_status(&self, tx_seq: u64) -> Result<Option<TxStatus>> {
|
||||||
let value = try_option!(self.kvdb.get(COL_TX_COMPLETED, &tx_seq.to_be_bytes())?);
|
let value = try_option!(self
|
||||||
|
.data_kvdb
|
||||||
|
.get(COL_TX_COMPLETED, &tx_seq.to_be_bytes())?);
|
||||||
match value.first() {
|
match value.first() {
|
||||||
Some(v) => Ok(Some(TxStatus::try_from(*v)?)),
|
Some(v) => Ok(Some(TxStatus::try_from(*v)?)),
|
||||||
None => Ok(None),
|
None => Ok(None),
|
||||||
@ -244,14 +248,14 @@ impl TransactionStore {
|
|||||||
(progress.1, p).as_ssz_bytes(),
|
(progress.1, p).as_ssz_bytes(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
Ok(self.kvdb.puts(items)?)
|
Ok(self.flow_kvdb.puts(items)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(skip(self))]
|
#[instrument(skip(self))]
|
||||||
pub fn get_progress(&self) -> Result<Option<(u64, H256)>> {
|
pub fn get_progress(&self) -> Result<Option<(u64, H256)>> {
|
||||||
Ok(Some(
|
Ok(Some(
|
||||||
<(u64, H256)>::from_ssz_bytes(&try_option!(self
|
<(u64, H256)>::from_ssz_bytes(&try_option!(self
|
||||||
.kvdb
|
.flow_kvdb
|
||||||
.get(COL_MISC, LOG_SYNC_PROGRESS_KEY.as_bytes())?))
|
.get(COL_MISC, LOG_SYNC_PROGRESS_KEY.as_bytes())?))
|
||||||
.map_err(Error::from)?,
|
.map_err(Error::from)?,
|
||||||
))
|
))
|
||||||
@ -259,7 +263,7 @@ impl TransactionStore {
|
|||||||
|
|
||||||
#[instrument(skip(self))]
|
#[instrument(skip(self))]
|
||||||
pub fn put_log_latest_block_number(&self, block_number: u64) -> Result<()> {
|
pub fn put_log_latest_block_number(&self, block_number: u64) -> Result<()> {
|
||||||
Ok(self.kvdb.put(
|
Ok(self.flow_kvdb.put(
|
||||||
COL_MISC,
|
COL_MISC,
|
||||||
LOG_LATEST_BLOCK_NUMBER_KEY.as_bytes(),
|
LOG_LATEST_BLOCK_NUMBER_KEY.as_bytes(),
|
||||||
&block_number.as_ssz_bytes(),
|
&block_number.as_ssz_bytes(),
|
||||||
@ -270,7 +274,7 @@ impl TransactionStore {
|
|||||||
pub fn get_log_latest_block_number(&self) -> Result<Option<u64>> {
|
pub fn get_log_latest_block_number(&self) -> Result<Option<u64>> {
|
||||||
Ok(Some(
|
Ok(Some(
|
||||||
<u64>::from_ssz_bytes(&try_option!(self
|
<u64>::from_ssz_bytes(&try_option!(self
|
||||||
.kvdb
|
.flow_kvdb
|
||||||
.get(COL_MISC, LOG_LATEST_BLOCK_NUMBER_KEY.as_bytes())?))
|
.get(COL_MISC, LOG_LATEST_BLOCK_NUMBER_KEY.as_bytes())?))
|
||||||
.map_err(Error::from)?,
|
.map_err(Error::from)?,
|
||||||
))
|
))
|
||||||
@ -282,7 +286,7 @@ impl TransactionStore {
|
|||||||
) -> Result<Option<(H256, Option<u64>)>> {
|
) -> Result<Option<(H256, Option<u64>)>> {
|
||||||
Ok(Some(
|
Ok(Some(
|
||||||
<(H256, Option<u64>)>::from_ssz_bytes(&try_option!(self
|
<(H256, Option<u64>)>::from_ssz_bytes(&try_option!(self
|
||||||
.kvdb
|
.flow_kvdb
|
||||||
.get(COL_BLOCK_PROGRESS, &block_number.to_be_bytes())?))
|
.get(COL_BLOCK_PROGRESS, &block_number.to_be_bytes())?))
|
||||||
.map_err(Error::from)?,
|
.map_err(Error::from)?,
|
||||||
))
|
))
|
||||||
@ -290,7 +294,7 @@ impl TransactionStore {
|
|||||||
|
|
||||||
pub fn get_block_hashes(&self) -> Result<Vec<(u64, BlockHashAndSubmissionIndex)>> {
|
pub fn get_block_hashes(&self) -> Result<Vec<(u64, BlockHashAndSubmissionIndex)>> {
|
||||||
let mut block_numbers = vec![];
|
let mut block_numbers = vec![];
|
||||||
for r in self.kvdb.iter(COL_BLOCK_PROGRESS) {
|
for r in self.flow_kvdb.iter(COL_BLOCK_PROGRESS) {
|
||||||
let (key, val) = r?;
|
let (key, val) = r?;
|
||||||
let block_number =
|
let block_number =
|
||||||
u64::from_be_bytes(key.as_ref().try_into().map_err(|e| anyhow!("{:?}", e))?);
|
u64::from_be_bytes(key.as_ref().try_into().map_err(|e| anyhow!("{:?}", e))?);
|
||||||
@ -310,7 +314,7 @@ impl TransactionStore {
|
|||||||
|
|
||||||
pub fn delete_block_hash_by_number(&self, block_number: u64) -> Result<()> {
|
pub fn delete_block_hash_by_number(&self, block_number: u64) -> Result<()> {
|
||||||
Ok(self
|
Ok(self
|
||||||
.kvdb
|
.flow_kvdb
|
||||||
.delete(COL_BLOCK_PROGRESS, &block_number.to_be_bytes())?)
|
.delete(COL_BLOCK_PROGRESS, &block_number.to_be_bytes())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,7 @@
|
|||||||
use crate::{controllers::SyncState, SyncRequest, SyncResponse, SyncSender};
|
use crate::{
|
||||||
|
controllers::{FailureReason, SyncState},
|
||||||
|
SyncRequest, SyncResponse, SyncSender,
|
||||||
|
};
|
||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{collections::HashSet, fmt::Debug, sync::Arc, time::Duration};
|
use std::{collections::HashSet, fmt::Debug, sync::Arc, time::Duration};
|
||||||
@ -84,14 +87,12 @@ impl Batcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn poll_tx(&self, tx_seq: u64) -> Result<Option<SyncResult>> {
|
async fn poll_tx(&self, tx_seq: u64) -> Result<Option<SyncResult>> {
|
||||||
// file already exists
|
// file already finalized or even pruned
|
||||||
if self.store.check_tx_completed(tx_seq).await?
|
if let Some(tx_status) = self.store.get_store().get_tx_status(tx_seq)? {
|
||||||
|| self.store.check_tx_pruned(tx_seq).await?
|
let num_terminated: usize = self.terminate_file_sync(tx_seq, false).await;
|
||||||
{
|
if num_terminated > 0 {
|
||||||
// File may be finalized during file sync, e.g. user uploaded file via RPC.
|
info!(%tx_seq, %num_terminated, ?tx_status, "Terminate file sync due to file already completed in db");
|
||||||
// In this case, just terminate the file sync.
|
}
|
||||||
let num_terminated = self.terminate_file_sync(tx_seq, false).await;
|
|
||||||
info!(%tx_seq, %num_terminated, "Terminate file sync due to file already finalized in db");
|
|
||||||
return Ok(Some(SyncResult::Completed));
|
return Ok(Some(SyncResult::Completed));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,7 +129,10 @@ impl Batcher {
|
|||||||
"Failed to sync file and terminate the failed file sync"
|
"Failed to sync file and terminate the failed file sync"
|
||||||
);
|
);
|
||||||
self.terminate_file_sync(tx_seq, false).await;
|
self.terminate_file_sync(tx_seq, false).await;
|
||||||
Ok(Some(SyncResult::Failed))
|
match reason {
|
||||||
|
FailureReason::TimeoutFindFile => Ok(Some(SyncResult::Timeout)),
|
||||||
|
_ => Ok(Some(SyncResult::Failed)),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// finding peers timeout
|
// finding peers timeout
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use super::{batcher::Batcher, sync_store::SyncStore};
|
use super::{batcher::Batcher, metrics::RandomBatcherMetrics, sync_store::SyncStore};
|
||||||
use crate::{
|
use crate::{
|
||||||
auto_sync::{batcher::SyncResult, metrics, sync_store::Queue},
|
auto_sync::{batcher::SyncResult, sync_store::Queue},
|
||||||
Config, SyncSender,
|
Config, SyncSender,
|
||||||
};
|
};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
@ -15,26 +15,33 @@ use tokio::time::sleep;
|
|||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct RandomBatcherState {
|
pub struct RandomBatcherState {
|
||||||
|
pub name: String,
|
||||||
pub tasks: Vec<u64>,
|
pub tasks: Vec<u64>,
|
||||||
pub pending_txs: usize,
|
pub pending_txs: usize,
|
||||||
pub ready_txs: usize,
|
pub ready_txs: usize,
|
||||||
|
pub cached_ready_txs: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct RandomBatcher {
|
pub struct RandomBatcher {
|
||||||
|
name: String,
|
||||||
config: Config,
|
config: Config,
|
||||||
batcher: Batcher,
|
batcher: Batcher,
|
||||||
sync_store: Arc<SyncStore>,
|
sync_store: Arc<SyncStore>,
|
||||||
|
metrics: Arc<RandomBatcherMetrics>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RandomBatcher {
|
impl RandomBatcher {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
name: String,
|
||||||
config: Config,
|
config: Config,
|
||||||
store: Store,
|
store: Store,
|
||||||
sync_send: SyncSender,
|
sync_send: SyncSender,
|
||||||
sync_store: Arc<SyncStore>,
|
sync_store: Arc<SyncStore>,
|
||||||
|
metrics: Arc<RandomBatcherMetrics>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
name,
|
||||||
config,
|
config,
|
||||||
batcher: Batcher::new(
|
batcher: Batcher::new(
|
||||||
config.max_random_workers,
|
config.max_random_workers,
|
||||||
@ -43,21 +50,24 @@ impl RandomBatcher {
|
|||||||
sync_send,
|
sync_send,
|
||||||
),
|
),
|
||||||
sync_store,
|
sync_store,
|
||||||
|
metrics,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_state(&self) -> Result<RandomBatcherState> {
|
pub async fn get_state(&self) -> Result<RandomBatcherState> {
|
||||||
let (pending_txs, ready_txs) = self.sync_store.stat().await?;
|
let (pending_txs, ready_txs, cached_ready_txs) = self.sync_store.stat().await?;
|
||||||
|
|
||||||
Ok(RandomBatcherState {
|
Ok(RandomBatcherState {
|
||||||
|
name: self.name.clone(),
|
||||||
tasks: self.batcher.tasks().await,
|
tasks: self.batcher.tasks().await,
|
||||||
pending_txs,
|
pending_txs,
|
||||||
ready_txs,
|
ready_txs,
|
||||||
|
cached_ready_txs,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start(mut self, catched_up: Arc<AtomicBool>) {
|
pub async fn start(mut self, catched_up: Arc<AtomicBool>) {
|
||||||
info!("Start to sync files");
|
info!("Start to sync files, state = {:?}", self.get_state().await);
|
||||||
|
|
||||||
// wait for log entry sync catched up
|
// wait for log entry sync catched up
|
||||||
while !catched_up.load(Ordering::Relaxed) {
|
while !catched_up.load(Ordering::Relaxed) {
|
||||||
@ -67,9 +77,8 @@ impl RandomBatcher {
|
|||||||
|
|
||||||
loop {
|
loop {
|
||||||
if let Ok(state) = self.get_state().await {
|
if let Ok(state) = self.get_state().await {
|
||||||
metrics::RANDOM_STATE_TXS_SYNCING.update(state.tasks.len() as u64);
|
self.metrics
|
||||||
metrics::RANDOM_STATE_TXS_READY.update(state.ready_txs as u64);
|
.update_state(state.ready_txs, state.pending_txs);
|
||||||
metrics::RANDOM_STATE_TXS_PENDING.update(state.pending_txs as u64);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
match self.sync_once().await {
|
match self.sync_once().await {
|
||||||
@ -101,11 +110,7 @@ impl RandomBatcher {
|
|||||||
};
|
};
|
||||||
|
|
||||||
debug!(%tx_seq, ?sync_result, "Completed to sync file, state = {:?}", self.get_state().await);
|
debug!(%tx_seq, ?sync_result, "Completed to sync file, state = {:?}", self.get_state().await);
|
||||||
match sync_result {
|
self.metrics.update_result(sync_result);
|
||||||
SyncResult::Completed => metrics::RANDOM_SYNC_RESULT_COMPLETED.mark(1),
|
|
||||||
SyncResult::Failed => metrics::RANDOM_SYNC_RESULT_FAILED.inc(1),
|
|
||||||
SyncResult::Timeout => metrics::RANDOM_SYNC_RESULT_TIMEOUT.inc(1),
|
|
||||||
}
|
|
||||||
|
|
||||||
if matches!(sync_result, SyncResult::Completed) {
|
if matches!(sync_result, SyncResult::Completed) {
|
||||||
self.sync_store.remove(tx_seq).await?;
|
self.sync_store.remove(tx_seq).await?;
|
||||||
|
103
node/sync/src/auto_sync/historical_tx_writer.rs
Normal file
103
node/sync/src/auto_sync/historical_tx_writer.rs
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
use std::sync::{
|
||||||
|
atomic::{AtomicU64, Ordering},
|
||||||
|
Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use storage::log_store::log_manager::DATA_DB_KEY;
|
||||||
|
use storage_async::Store;
|
||||||
|
use tokio::time::sleep;
|
||||||
|
|
||||||
|
use crate::Config;
|
||||||
|
|
||||||
|
use super::sync_store::{Queue, SyncStore};
|
||||||
|
|
||||||
|
const KEY_NEXT_TX_SEQ: &str = "sync.manager.historical.next_tx_seq";
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct HistoricalTxWriterState {
|
||||||
|
pub next_tx_seq: u64,
|
||||||
|
pub pending_txs: usize,
|
||||||
|
pub ready_txs: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct HistoricalTxWriter {
|
||||||
|
config: Config,
|
||||||
|
store: Store,
|
||||||
|
sync_store: Arc<SyncStore>,
|
||||||
|
next_tx_seq: Arc<AtomicU64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HistoricalTxWriter {
|
||||||
|
pub async fn new(config: Config, store: Store, sync_store: Arc<SyncStore>) -> Result<Self> {
|
||||||
|
let next_tx_seq = store
|
||||||
|
.get_config_decoded(&KEY_NEXT_TX_SEQ, DATA_DB_KEY)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
config,
|
||||||
|
store,
|
||||||
|
sync_store,
|
||||||
|
next_tx_seq: Arc::new(AtomicU64::new(next_tx_seq.unwrap_or(0))),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_state(&self) -> Result<HistoricalTxWriterState> {
|
||||||
|
let (pending_txs, ready_txs, _) = self.sync_store.stat().await?;
|
||||||
|
|
||||||
|
Ok(HistoricalTxWriterState {
|
||||||
|
next_tx_seq: self.next_tx_seq.load(Ordering::Relaxed),
|
||||||
|
pending_txs,
|
||||||
|
ready_txs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn start(mut self) {
|
||||||
|
info!(
|
||||||
|
"Start to write historical files into sync store, state = {:?}",
|
||||||
|
self.get_state().await
|
||||||
|
);
|
||||||
|
|
||||||
|
loop {
|
||||||
|
match self.write_once().await {
|
||||||
|
Ok(true) => {}
|
||||||
|
Ok(false) => {
|
||||||
|
trace!(
|
||||||
|
"There is no tx to write in sync store, state = {:?}",
|
||||||
|
self.get_state().await
|
||||||
|
);
|
||||||
|
sleep(self.config.auto_sync_idle_interval).await;
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
warn!(%err, "Failed to write tx once, state = {:?}", self.get_state().await);
|
||||||
|
sleep(self.config.auto_sync_error_interval).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn write_once(&mut self) -> Result<bool> {
|
||||||
|
let mut next_tx_seq = self.next_tx_seq.load(Ordering::Relaxed);
|
||||||
|
|
||||||
|
// no tx to write in sync store
|
||||||
|
if next_tx_seq >= self.store.get_store().next_tx_seq() {
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// write tx in sync store if not finalized or pruned
|
||||||
|
if self.store.get_store().get_tx_status(next_tx_seq)?.is_none() {
|
||||||
|
self.sync_store.insert(next_tx_seq, Queue::Ready).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// move forward
|
||||||
|
next_tx_seq += 1;
|
||||||
|
self.store
|
||||||
|
.set_config_encoded(&KEY_NEXT_TX_SEQ, &next_tx_seq, DATA_DB_KEY)
|
||||||
|
.await?;
|
||||||
|
self.next_tx_seq.store(next_tx_seq, Ordering::Relaxed);
|
||||||
|
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
}
|
@ -18,6 +18,8 @@ use crate::{Config, SyncSender};
|
|||||||
use super::{
|
use super::{
|
||||||
batcher_random::RandomBatcher,
|
batcher_random::RandomBatcher,
|
||||||
batcher_serial::SerialBatcher,
|
batcher_serial::SerialBatcher,
|
||||||
|
historical_tx_writer::HistoricalTxWriter,
|
||||||
|
metrics,
|
||||||
sync_store::{Queue, SyncStore},
|
sync_store::{Queue, SyncStore},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -44,11 +46,12 @@ impl AutoSyncManager {
|
|||||||
// use v2 db to avoid reading v1 files that announced from the whole network instead of neighbors
|
// use v2 db to avoid reading v1 files that announced from the whole network instead of neighbors
|
||||||
Arc::new(SyncStore::new_with_name(
|
Arc::new(SyncStore::new_with_name(
|
||||||
store.clone(),
|
store.clone(),
|
||||||
|
config.ready_txs_cache_cap,
|
||||||
"pendingv2",
|
"pendingv2",
|
||||||
"readyv2",
|
"readyv2",
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
Arc::new(SyncStore::new(store.clone()))
|
Arc::new(SyncStore::new(store.clone(), 0))
|
||||||
};
|
};
|
||||||
let catched_up = Arc::new(AtomicBool::new(false));
|
let catched_up = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
@ -76,7 +79,14 @@ impl AutoSyncManager {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// sync randomly
|
// sync randomly
|
||||||
let random = RandomBatcher::new(config, store, sync_send, sync_store);
|
let random = RandomBatcher::new(
|
||||||
|
"random".into(),
|
||||||
|
config,
|
||||||
|
store.clone(),
|
||||||
|
sync_send.clone(),
|
||||||
|
sync_store,
|
||||||
|
metrics::RANDOM_ANNOUNCED.clone(),
|
||||||
|
);
|
||||||
executor.spawn(random.clone().start(catched_up.clone()), "auto_sync_random");
|
executor.spawn(random.clone().start(catched_up.clone()), "auto_sync_random");
|
||||||
|
|
||||||
// handle on catched up notification
|
// handle on catched up notification
|
||||||
@ -85,6 +95,34 @@ impl AutoSyncManager {
|
|||||||
"auto_sync_wait_for_catchup",
|
"auto_sync_wait_for_catchup",
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// sync randomly for files without NewFile announcement
|
||||||
|
if config.neighbors_only {
|
||||||
|
let historical_sync_store = Arc::new(SyncStore::new_with_name(
|
||||||
|
store.clone(),
|
||||||
|
0,
|
||||||
|
"pendingv2_historical",
|
||||||
|
"readyv2_historical",
|
||||||
|
));
|
||||||
|
|
||||||
|
let writer =
|
||||||
|
HistoricalTxWriter::new(config, store.clone(), historical_sync_store.clone())
|
||||||
|
.await?;
|
||||||
|
executor.spawn(writer.start(), "auto_sync_historical_writer");
|
||||||
|
|
||||||
|
let random_historical = RandomBatcher::new(
|
||||||
|
"random_historical".into(),
|
||||||
|
config,
|
||||||
|
store,
|
||||||
|
sync_send,
|
||||||
|
historical_sync_store,
|
||||||
|
metrics::RANDOM_HISTORICAL.clone(),
|
||||||
|
);
|
||||||
|
executor.spawn(
|
||||||
|
random_historical.start(catched_up.clone()),
|
||||||
|
"auto_sync_random_historical",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
serial,
|
serial,
|
||||||
random,
|
random,
|
||||||
|
@ -1,6 +1,46 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use metrics::{register_meter, Counter, CounterUsize, Gauge, GaugeUsize, Histogram, Meter, Sample};
|
use metrics::{
|
||||||
|
register_meter, register_meter_with_group, Counter, CounterUsize, Gauge, GaugeUsize, Histogram,
|
||||||
|
Meter, Sample,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::batcher::SyncResult;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct RandomBatcherMetrics {
|
||||||
|
pub ready_txs: Arc<dyn Gauge<usize>>,
|
||||||
|
pub pending_txs: Arc<dyn Gauge<usize>>,
|
||||||
|
|
||||||
|
pub completed_qps: Arc<dyn Meter>,
|
||||||
|
pub failed_qps: Arc<dyn Meter>,
|
||||||
|
pub timeout_qps: Arc<dyn Meter>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RandomBatcherMetrics {
|
||||||
|
pub fn new(group_name: &str) -> Self {
|
||||||
|
Self {
|
||||||
|
ready_txs: GaugeUsize::register_with_group(group_name, "ready_txs"),
|
||||||
|
pending_txs: GaugeUsize::register_with_group(group_name, "pending_txs"),
|
||||||
|
completed_qps: register_meter_with_group(group_name, "completed_qps"),
|
||||||
|
failed_qps: register_meter_with_group(group_name, "failed_qps"),
|
||||||
|
timeout_qps: register_meter_with_group(group_name, "timeout_qps"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update_state(&self, ready_txs: usize, pending_txs: usize) {
|
||||||
|
self.ready_txs.update(ready_txs);
|
||||||
|
self.pending_txs.update(pending_txs);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update_result(&self, result: SyncResult) {
|
||||||
|
match result {
|
||||||
|
SyncResult::Completed => self.completed_qps.mark(1),
|
||||||
|
SyncResult::Failed => self.failed_qps.mark(1),
|
||||||
|
SyncResult::Timeout => self.timeout_qps.mark(1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
lazy_static::lazy_static! {
|
lazy_static::lazy_static! {
|
||||||
// sequential auto sync
|
// sequential auto sync
|
||||||
@ -14,11 +54,6 @@ lazy_static::lazy_static! {
|
|||||||
pub static ref SEQUENTIAL_SYNC_RESULT_TIMEOUT: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_sequential_sync_result_timeout");
|
pub static ref SEQUENTIAL_SYNC_RESULT_TIMEOUT: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_sequential_sync_result_timeout");
|
||||||
|
|
||||||
// random auto sync
|
// random auto sync
|
||||||
pub static ref RANDOM_STATE_TXS_SYNCING: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_syncing", 1024);
|
pub static ref RANDOM_ANNOUNCED: Arc<RandomBatcherMetrics> = Arc::new(RandomBatcherMetrics::new("sync_auto_random_announced"));
|
||||||
pub static ref RANDOM_STATE_TXS_READY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_ready", 1024);
|
pub static ref RANDOM_HISTORICAL: Arc<RandomBatcherMetrics> = Arc::new(RandomBatcherMetrics::new("sync_auto_random_historical"));
|
||||||
pub static ref RANDOM_STATE_TXS_PENDING: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_pending", 1024);
|
|
||||||
|
|
||||||
pub static ref RANDOM_SYNC_RESULT_COMPLETED: Arc<dyn Meter> = register_meter("sync_auto_random_sync_result_completed");
|
|
||||||
pub static ref RANDOM_SYNC_RESULT_FAILED: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_random_sync_result_failed");
|
|
||||||
pub static ref RANDOM_SYNC_RESULT_TIMEOUT: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_random_sync_result_timeout");
|
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
mod batcher;
|
mod batcher;
|
||||||
pub mod batcher_random;
|
pub mod batcher_random;
|
||||||
pub mod batcher_serial;
|
pub mod batcher_serial;
|
||||||
|
mod historical_tx_writer;
|
||||||
pub mod manager;
|
pub mod manager;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
pub mod sync_store;
|
pub mod sync_store;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use super::tx_store::TxStore;
|
use super::tx_store::{CachedTxStore, TxStore};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use storage::log_store::{
|
use storage::log_store::{
|
||||||
@ -33,35 +33,36 @@ pub struct SyncStore {
|
|||||||
|
|
||||||
/// Ready transactions to sync with high priority since announcement
|
/// Ready transactions to sync with high priority since announcement
|
||||||
/// already received from other peers.
|
/// already received from other peers.
|
||||||
ready_txs: TxStore,
|
ready_txs: CachedTxStore,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SyncStore {
|
impl SyncStore {
|
||||||
pub fn new(store: Store) -> Self {
|
pub fn new(store: Store, ready_txs_cache_cap: usize) -> Self {
|
||||||
Self {
|
Self::new_with_name(store, ready_txs_cache_cap, "pending", "ready")
|
||||||
store: Arc::new(RwLock::new(store)),
|
|
||||||
pending_txs: TxStore::new("pending"),
|
|
||||||
ready_txs: TxStore::new("ready"),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_with_name(store: Store, pending: &'static str, ready: &'static str) -> Self {
|
pub fn new_with_name(
|
||||||
|
store: Store,
|
||||||
|
ready_txs_cache_cap: usize,
|
||||||
|
pending: &'static str,
|
||||||
|
ready: &'static str,
|
||||||
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
store: Arc::new(RwLock::new(store)),
|
store: Arc::new(RwLock::new(store)),
|
||||||
pending_txs: TxStore::new(pending),
|
pending_txs: TxStore::new(pending),
|
||||||
ready_txs: TxStore::new(ready),
|
ready_txs: CachedTxStore::new(ready, ready_txs_cache_cap),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of pending txs and ready txs.
|
/// Returns the number of pending txs and ready txs.
|
||||||
pub async fn stat(&self) -> Result<(usize, usize)> {
|
pub async fn stat(&self) -> Result<(usize, usize, usize)> {
|
||||||
let async_store = self.store.read().await;
|
let async_store = self.store.read().await;
|
||||||
let store = async_store.get_store();
|
let store = async_store.get_store();
|
||||||
|
|
||||||
let num_pending_txs = self.pending_txs.count(store)?;
|
let num_pending_txs = self.pending_txs.count(store)?;
|
||||||
let num_ready_txs = self.ready_txs.count(store)?;
|
let (num_ready_txs, num_cached_ready_txs) = self.ready_txs.count(store).await?;
|
||||||
|
|
||||||
Ok((num_pending_txs, num_ready_txs))
|
Ok((num_pending_txs, num_ready_txs, num_cached_ready_txs))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_tx_seq_range(&self) -> Result<(Option<u64>, Option<u64>)> {
|
pub async fn get_tx_seq_range(&self) -> Result<(Option<u64>, Option<u64>)> {
|
||||||
@ -112,7 +113,7 @@ impl SyncStore {
|
|||||||
|
|
||||||
match queue {
|
match queue {
|
||||||
Queue::Ready => {
|
Queue::Ready => {
|
||||||
if !self.ready_txs.add(store, Some(&mut tx), tx_seq)? {
|
if !self.ready_txs.add(store, Some(&mut tx), tx_seq).await? {
|
||||||
return Ok(InsertResult::AlreadyExists);
|
return Ok(InsertResult::AlreadyExists);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,7 +131,7 @@ impl SyncStore {
|
|||||||
return Ok(InsertResult::AlreadyExists);
|
return Ok(InsertResult::AlreadyExists);
|
||||||
}
|
}
|
||||||
|
|
||||||
let removed = self.ready_txs.remove(store, Some(&mut tx), tx_seq)?;
|
let removed = self.ready_txs.remove(store, Some(&mut tx), tx_seq).await?;
|
||||||
store.exec_configs(tx, DATA_DB_KEY)?;
|
store.exec_configs(tx, DATA_DB_KEY)?;
|
||||||
|
|
||||||
if removed {
|
if removed {
|
||||||
@ -152,7 +153,7 @@ impl SyncStore {
|
|||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
let added = self.ready_txs.add(store, Some(&mut tx), tx_seq)?;
|
let added = self.ready_txs.add(store, Some(&mut tx), tx_seq).await?;
|
||||||
|
|
||||||
store.exec_configs(tx, DATA_DB_KEY)?;
|
store.exec_configs(tx, DATA_DB_KEY)?;
|
||||||
|
|
||||||
@ -164,7 +165,7 @@ impl SyncStore {
|
|||||||
let store = async_store.get_store();
|
let store = async_store.get_store();
|
||||||
|
|
||||||
// try to find a tx in ready queue with high priority
|
// try to find a tx in ready queue with high priority
|
||||||
if let Some(val) = self.ready_txs.random(store)? {
|
if let Some(val) = self.ready_txs.random(store).await? {
|
||||||
return Ok(Some(val));
|
return Ok(Some(val));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -177,7 +178,7 @@ impl SyncStore {
|
|||||||
let store = async_store.get_store();
|
let store = async_store.get_store();
|
||||||
|
|
||||||
// removed in ready queue
|
// removed in ready queue
|
||||||
if self.ready_txs.remove(store, None, tx_seq)? {
|
if self.ready_txs.remove(store, None, tx_seq).await? {
|
||||||
return Ok(Some(Queue::Ready));
|
return Ok(Some(Queue::Ready));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,7 +200,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_tx_seq_range() {
|
async fn test_tx_seq_range() {
|
||||||
let runtime = TestStoreRuntime::default();
|
let runtime = TestStoreRuntime::default();
|
||||||
let store = SyncStore::new(runtime.store.clone());
|
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||||
|
|
||||||
// check values by default
|
// check values by default
|
||||||
assert_eq!(store.get_tx_seq_range().await.unwrap(), (None, None));
|
assert_eq!(store.get_tx_seq_range().await.unwrap(), (None, None));
|
||||||
@ -215,7 +216,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_insert() {
|
async fn test_insert() {
|
||||||
let runtime = TestStoreRuntime::default();
|
let runtime = TestStoreRuntime::default();
|
||||||
let store = SyncStore::new(runtime.store.clone());
|
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||||
|
|
||||||
assert_eq!(store.contains(1).await.unwrap(), None);
|
assert_eq!(store.contains(1).await.unwrap(), None);
|
||||||
assert_eq!(store.insert(1, Pending).await.unwrap(), NewAdded);
|
assert_eq!(store.insert(1, Pending).await.unwrap(), NewAdded);
|
||||||
@ -234,7 +235,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_upgrade() {
|
async fn test_upgrade() {
|
||||||
let runtime = TestStoreRuntime::default();
|
let runtime = TestStoreRuntime::default();
|
||||||
let store = SyncStore::new(runtime.store.clone());
|
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||||
|
|
||||||
// cannot upgrade by default
|
// cannot upgrade by default
|
||||||
assert!(!store.upgrade(3).await.unwrap());
|
assert!(!store.upgrade(3).await.unwrap());
|
||||||
@ -253,7 +254,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_random() {
|
async fn test_random() {
|
||||||
let runtime = TestStoreRuntime::default();
|
let runtime = TestStoreRuntime::default();
|
||||||
let store = SyncStore::new(runtime.store.clone());
|
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||||
|
|
||||||
// no tx by default
|
// no tx by default
|
||||||
assert_eq!(store.random().await.unwrap(), None);
|
assert_eq!(store.random().await.unwrap(), None);
|
||||||
@ -273,7 +274,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_remove() {
|
async fn test_remove() {
|
||||||
let runtime = TestStoreRuntime::default();
|
let runtime = TestStoreRuntime::default();
|
||||||
let store = SyncStore::new(runtime.store.clone());
|
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||||
|
|
||||||
// cannot remove by default
|
// cannot remove by default
|
||||||
assert_eq!(store.remove(1).await.unwrap(), None);
|
assert_eq!(store.remove(1).await.unwrap(), None);
|
||||||
|
@ -1,8 +1,12 @@
|
|||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use rand::seq::IteratorRandom;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use storage::log_store::config::{ConfigTx, ConfigurableExt};
|
use storage::log_store::config::{ConfigTx, ConfigurableExt};
|
||||||
use storage::log_store::log_manager::DATA_DB_KEY;
|
use storage::log_store::log_manager::DATA_DB_KEY;
|
||||||
use storage::log_store::Store;
|
use storage::log_store::Store;
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
/// TxStore is used to store pending transactions that to be synchronized in advance.
|
/// TxStore is used to store pending transactions that to be synchronized in advance.
|
||||||
///
|
///
|
||||||
@ -138,6 +142,99 @@ impl TxStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Cache the recent inserted tx in memory for random pick with priority.
|
||||||
|
pub struct CachedTxStore {
|
||||||
|
tx_store: TxStore,
|
||||||
|
cache_cap: usize,
|
||||||
|
cache: RwLock<HashSet<u64>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CachedTxStore {
|
||||||
|
pub fn new(name: &'static str, cache_cap: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
tx_store: TxStore::new(name),
|
||||||
|
cache_cap,
|
||||||
|
cache: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn has(&self, store: &dyn Store, tx_seq: u64) -> Result<bool> {
|
||||||
|
self.tx_store.has(store, tx_seq)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn count(&self, store: &dyn Store) -> Result<(usize, usize)> {
|
||||||
|
if self.cache_cap == 0 {
|
||||||
|
return Ok((self.tx_store.count(store)?, 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
let cache = self.cache.read().await;
|
||||||
|
|
||||||
|
Ok((self.tx_store.count(store)?, cache.len()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn add(
|
||||||
|
&self,
|
||||||
|
store: &dyn Store,
|
||||||
|
db_tx: Option<&mut ConfigTx>,
|
||||||
|
tx_seq: u64,
|
||||||
|
) -> Result<bool> {
|
||||||
|
if self.cache_cap == 0 {
|
||||||
|
return self.tx_store.add(store, db_tx, tx_seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut cache = self.cache.write().await;
|
||||||
|
|
||||||
|
let added = self.tx_store.add(store, db_tx, tx_seq)?;
|
||||||
|
|
||||||
|
if added {
|
||||||
|
cache.insert(tx_seq);
|
||||||
|
|
||||||
|
if cache.len() > self.cache_cap {
|
||||||
|
if let Some(popped) = cache.iter().choose(&mut rand::thread_rng()).cloned() {
|
||||||
|
cache.remove(&popped);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(added)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn random(&self, store: &dyn Store) -> Result<Option<u64>> {
|
||||||
|
if self.cache_cap == 0 {
|
||||||
|
return self.tx_store.random(store);
|
||||||
|
}
|
||||||
|
|
||||||
|
let cache = self.cache.read().await;
|
||||||
|
|
||||||
|
if let Some(v) = cache.iter().choose(&mut rand::thread_rng()).cloned() {
|
||||||
|
return Ok(Some(v));
|
||||||
|
}
|
||||||
|
|
||||||
|
self.tx_store.random(store)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn remove(
|
||||||
|
&self,
|
||||||
|
store: &dyn Store,
|
||||||
|
db_tx: Option<&mut ConfigTx>,
|
||||||
|
tx_seq: u64,
|
||||||
|
) -> Result<bool> {
|
||||||
|
if self.cache_cap == 0 {
|
||||||
|
return self.tx_store.remove(store, db_tx, tx_seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut cache: tokio::sync::RwLockWriteGuard<'_, HashSet<u64>> = self.cache.write().await;
|
||||||
|
|
||||||
|
let removed = self.tx_store.remove(store, db_tx, tx_seq)?;
|
||||||
|
|
||||||
|
if removed {
|
||||||
|
cache.remove(&tx_seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(removed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::test_util::tests::TestStoreRuntime;
|
use crate::test_util::tests::TestStoreRuntime;
|
||||||
|
@ -194,7 +194,7 @@ impl SyncPeers {
|
|||||||
ctx.report_peer(
|
ctx.report_peer(
|
||||||
*peer_id,
|
*peer_id,
|
||||||
PeerAction::LowToleranceError,
|
PeerAction::LowToleranceError,
|
||||||
"Dail timeout",
|
"Dial timeout",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ use network::{
|
|||||||
PeerAction, PeerId, PubsubMessage, SyncId as RequestId,
|
PeerAction, PeerId, PubsubMessage, SyncId as RequestId,
|
||||||
};
|
};
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use shared_types::{timestamp_now, ChunkArrayWithProof, TxID, CHUNK_SIZE};
|
use shared_types::{ChunkArrayWithProof, ShardedFile, TxID, CHUNK_SIZE};
|
||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
use std::{sync::Arc, time::Instant};
|
use std::{sync::Arc, time::Instant};
|
||||||
use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE};
|
use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE};
|
||||||
@ -211,28 +211,42 @@ impl SerialSyncController {
|
|||||||
fn do_publish_find_file(&self) {
|
fn do_publish_find_file(&self) {
|
||||||
let shard_config = self.store.get_store().get_shard_config();
|
let shard_config = self.store.get_store().get_shard_config();
|
||||||
|
|
||||||
self.ctx.publish(PubsubMessage::FindFile(FindFile {
|
let msg = if self.config.neighbors_only {
|
||||||
tx_id: self.tx_id,
|
PubsubMessage::AskFile(
|
||||||
num_shard: shard_config.num_shard,
|
ShardedFile {
|
||||||
shard_id: shard_config.shard_id,
|
tx_id: self.tx_id,
|
||||||
neighbors_only: self.config.neighbors_only,
|
shard_config: shard_config.into(),
|
||||||
timestamp: timestamp_now(),
|
}
|
||||||
}));
|
.into(),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
PubsubMessage::FindFile(
|
||||||
|
FindFile {
|
||||||
|
tx_id: self.tx_id,
|
||||||
|
maybe_shard_config: Some(shard_config.into()),
|
||||||
|
}
|
||||||
|
.into(),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
self.ctx.publish(msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn publish_find_chunks(&self) {
|
fn publish_find_chunks(&self) {
|
||||||
self.ctx.publish(PubsubMessage::FindChunks(FindChunks {
|
self.ctx.publish(PubsubMessage::FindChunks(
|
||||||
tx_id: self.tx_id,
|
FindChunks {
|
||||||
index_start: self.goal.index_start,
|
tx_id: self.tx_id,
|
||||||
index_end: self.goal.index_end,
|
index_start: self.goal.index_start,
|
||||||
timestamp: timestamp_now(),
|
index_end: self.goal.index_end,
|
||||||
}));
|
}
|
||||||
|
.into(),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dial to peers in `Found` state, so that `Connecting` or `Connected` peers cover
|
/// Dial to peers in `Found` state, so that `Connecting` or `Connected` peers cover
|
||||||
/// data in all shards.
|
/// data in all shards.
|
||||||
fn try_connect(&mut self) {
|
fn try_connect(&mut self) {
|
||||||
let mut num_peers_dailed = 0;
|
let mut num_peers_dialed = 0;
|
||||||
|
|
||||||
// select a random peer
|
// select a random peer
|
||||||
while !self
|
while !self
|
||||||
@ -256,10 +270,10 @@ impl SerialSyncController {
|
|||||||
self.peers
|
self.peers
|
||||||
.update_state(&peer_id, PeerState::Found, PeerState::Connecting);
|
.update_state(&peer_id, PeerState::Found, PeerState::Connecting);
|
||||||
|
|
||||||
num_peers_dailed += 1;
|
num_peers_dialed += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
info!(%self.tx_seq, %num_peers_dailed, "Connecting peers");
|
info!(%self.tx_seq, %num_peers_dialed, "Connecting peers");
|
||||||
|
|
||||||
self.state = SyncState::ConnectingPeers {
|
self.state = SyncState::ConnectingPeers {
|
||||||
origin: self.since,
|
origin: self.since,
|
||||||
@ -358,14 +372,14 @@ impl SerialSyncController {
|
|||||||
.update_state_force(&peer_id, PeerState::Connected);
|
.update_state_force(&peer_id, PeerState::Connected);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn on_dail_failed(&mut self, peer_id: PeerId, err: &DialError) {
|
pub fn on_dial_failed(&mut self, peer_id: PeerId, err: &DialError) {
|
||||||
match err {
|
match err {
|
||||||
DialError::ConnectionLimit(_) => {
|
DialError::ConnectionLimit(_) => {
|
||||||
if let Some(true) =
|
if let Some(true) =
|
||||||
self.peers
|
self.peers
|
||||||
.update_state(&peer_id, PeerState::Connecting, PeerState::Found)
|
.update_state(&peer_id, PeerState::Connecting, PeerState::Found)
|
||||||
{
|
{
|
||||||
info!(%self.tx_seq, %peer_id, "Failed to dail peer due to outgoing connection limitation");
|
info!(%self.tx_seq, %peer_id, "Failed to dial peer due to outgoing connection limitation");
|
||||||
self.state = SyncState::AwaitingOutgoingConnection {
|
self.state = SyncState::AwaitingOutgoingConnection {
|
||||||
since: Instant::now().into(),
|
since: Instant::now().into(),
|
||||||
};
|
};
|
||||||
@ -377,7 +391,7 @@ impl SerialSyncController {
|
|||||||
PeerState::Connecting,
|
PeerState::Connecting,
|
||||||
PeerState::Disconnected,
|
PeerState::Disconnected,
|
||||||
) {
|
) {
|
||||||
info!(%self.tx_seq, %peer_id, %err, "Failed to dail peer");
|
info!(%self.tx_seq, %peer_id, %err, "Failed to dial peer");
|
||||||
self.state = SyncState::Idle;
|
self.state = SyncState::Idle;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -663,7 +677,7 @@ impl SerialSyncController {
|
|||||||
} else {
|
} else {
|
||||||
// FindFile timeout
|
// FindFile timeout
|
||||||
if since.elapsed() >= self.config.peer_find_timeout {
|
if since.elapsed() >= self.config.peer_find_timeout {
|
||||||
if self.config.neighbors_only {
|
if self.goal.is_all_chunks() && self.config.neighbors_only {
|
||||||
self.state = SyncState::Failed {
|
self.state = SyncState::Failed {
|
||||||
reason: FailureReason::TimeoutFindFile,
|
reason: FailureReason::TimeoutFindFile,
|
||||||
};
|
};
|
||||||
@ -1680,7 +1694,10 @@ mod tests {
|
|||||||
let file_location_cache = create_file_location_cache(peer_id, vec![tx_id]);
|
let file_location_cache = create_file_location_cache(peer_id, vec![tx_id]);
|
||||||
|
|
||||||
let controller = SerialSyncController::new(
|
let controller = SerialSyncController::new(
|
||||||
Config::default(),
|
Config {
|
||||||
|
neighbors_only: false,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
tx_id,
|
tx_id,
|
||||||
0,
|
0,
|
||||||
FileSyncGoal::new_file(num_chunks as u64),
|
FileSyncGoal::new_file(num_chunks as u64),
|
||||||
|
@ -62,25 +62,26 @@ pub struct Config {
|
|||||||
pub sequential_find_peer_timeout: Duration,
|
pub sequential_find_peer_timeout: Duration,
|
||||||
#[serde(deserialize_with = "deserialize_duration")]
|
#[serde(deserialize_with = "deserialize_duration")]
|
||||||
pub random_find_peer_timeout: Duration,
|
pub random_find_peer_timeout: Duration,
|
||||||
|
pub ready_txs_cache_cap: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
// sync service config
|
// sync service config
|
||||||
neighbors_only: false,
|
neighbors_only: true,
|
||||||
heartbeat_interval: Duration::from_secs(5),
|
heartbeat_interval: Duration::from_secs(3),
|
||||||
auto_sync_enabled: false,
|
auto_sync_enabled: false,
|
||||||
max_sync_files: 8,
|
max_sync_files: 16,
|
||||||
sync_file_by_rpc_enabled: true,
|
sync_file_by_rpc_enabled: true,
|
||||||
sync_file_on_announcement_enabled: false,
|
sync_file_on_announcement_enabled: false,
|
||||||
|
|
||||||
// serial sync config
|
// serial sync config
|
||||||
max_chunks_to_request: 2 * 1024,
|
max_chunks_to_request: 2 * 1024,
|
||||||
max_request_failures: 5,
|
max_request_failures: 3,
|
||||||
peer_connect_timeout: Duration::from_secs(15),
|
peer_connect_timeout: Duration::from_secs(15),
|
||||||
peer_disconnect_timeout: Duration::from_secs(15),
|
peer_disconnect_timeout: Duration::from_secs(15),
|
||||||
peer_find_timeout: Duration::from_secs(120),
|
peer_find_timeout: Duration::from_secs(5),
|
||||||
peer_chunks_download_timeout: Duration::from_secs(15),
|
peer_chunks_download_timeout: Duration::from_secs(15),
|
||||||
peer_wait_outgoing_connection_timeout: Duration::from_secs(10),
|
peer_wait_outgoing_connection_timeout: Duration::from_secs(10),
|
||||||
peer_next_chunks_request_wait_timeout: Duration::from_secs(3),
|
peer_next_chunks_request_wait_timeout: Duration::from_secs(3),
|
||||||
@ -91,9 +92,10 @@ impl Default for Config {
|
|||||||
auto_sync_idle_interval: Duration::from_secs(3),
|
auto_sync_idle_interval: Duration::from_secs(3),
|
||||||
auto_sync_error_interval: Duration::from_secs(10),
|
auto_sync_error_interval: Duration::from_secs(10),
|
||||||
max_sequential_workers: 0,
|
max_sequential_workers: 0,
|
||||||
max_random_workers: 2,
|
max_random_workers: 8,
|
||||||
sequential_find_peer_timeout: Duration::from_secs(60),
|
sequential_find_peer_timeout: Duration::from_secs(5),
|
||||||
random_find_peer_timeout: Duration::from_secs(500),
|
random_find_peer_timeout: Duration::from_secs(5),
|
||||||
|
ready_txs_cache_cap: 1_000_000,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,13 +8,12 @@ use anyhow::{anyhow, bail, Result};
|
|||||||
use file_location_cache::FileLocationCache;
|
use file_location_cache::FileLocationCache;
|
||||||
use libp2p::swarm::DialError;
|
use libp2p::swarm::DialError;
|
||||||
use log_entry_sync::LogSyncEvent;
|
use log_entry_sync::LogSyncEvent;
|
||||||
use network::rpc::methods::FileAnnouncement;
|
use network::types::{AnnounceChunks, FindFile};
|
||||||
use network::types::{AnnounceChunks, FindFile, NewFile};
|
|
||||||
use network::{
|
use network::{
|
||||||
rpc::GetChunksRequest, rpc::RPCResponseErrorCode, Multiaddr, NetworkMessage, NetworkSender,
|
rpc::GetChunksRequest, rpc::RPCResponseErrorCode, Multiaddr, NetworkMessage, NetworkSender,
|
||||||
PeerId, PeerRequestId, PubsubMessage, SyncId as RequestId,
|
PeerId, PeerRequestId, PubsubMessage, SyncId as RequestId,
|
||||||
};
|
};
|
||||||
use shared_types::{bytes_to_chunks, timestamp_now, ChunkArrayWithProof, Transaction, TxID};
|
use shared_types::{bytes_to_chunks, ChunkArrayWithProof, ShardedFile, Transaction, TxID};
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
use std::{
|
use std::{
|
||||||
cmp,
|
cmp,
|
||||||
@ -33,7 +32,7 @@ pub type SyncReceiver = channel::Receiver<SyncMessage, SyncRequest, SyncResponse
|
|||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum SyncMessage {
|
pub enum SyncMessage {
|
||||||
DailFailed {
|
DialFailed {
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
err: DialError,
|
err: DialError,
|
||||||
},
|
},
|
||||||
@ -65,19 +64,17 @@ pub enum SyncMessage {
|
|||||||
AnnounceShardConfig {
|
AnnounceShardConfig {
|
||||||
shard_config: ShardConfig,
|
shard_config: ShardConfig,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
addr: Multiaddr,
|
|
||||||
},
|
},
|
||||||
AnnounceChunksGossip {
|
AnnounceChunksGossip {
|
||||||
msg: AnnounceChunks,
|
msg: AnnounceChunks,
|
||||||
},
|
},
|
||||||
NewFile {
|
NewFile {
|
||||||
from: PeerId,
|
from: PeerId,
|
||||||
msg: NewFile,
|
file: ShardedFile,
|
||||||
},
|
},
|
||||||
AnnounceFile {
|
AnswerFile {
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
request_id: PeerRequestId,
|
file: ShardedFile,
|
||||||
announcement: FileAnnouncement,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,8 +224,8 @@ impl SyncService {
|
|||||||
trace!("Sync received message {:?}", msg);
|
trace!("Sync received message {:?}", msg);
|
||||||
|
|
||||||
match msg {
|
match msg {
|
||||||
SyncMessage::DailFailed { peer_id, err } => {
|
SyncMessage::DialFailed { peer_id, err } => {
|
||||||
self.on_dail_failed(peer_id, err);
|
self.on_dial_failed(peer_id, err);
|
||||||
}
|
}
|
||||||
SyncMessage::PeerConnected { peer_id } => {
|
SyncMessage::PeerConnected { peer_id } => {
|
||||||
self.on_peer_connected(peer_id);
|
self.on_peer_connected(peer_id);
|
||||||
@ -274,12 +271,8 @@ impl SyncService {
|
|||||||
SyncMessage::AnnounceShardConfig { .. } => {
|
SyncMessage::AnnounceShardConfig { .. } => {
|
||||||
// FIXME: Check if controllers need to be reset?
|
// FIXME: Check if controllers need to be reset?
|
||||||
}
|
}
|
||||||
SyncMessage::NewFile { from, msg } => self.on_new_file_gossip(from, msg).await,
|
SyncMessage::NewFile { from, file } => self.on_new_file_gossip(from, file).await,
|
||||||
SyncMessage::AnnounceFile {
|
SyncMessage::AnswerFile { peer_id, file } => self.on_answer_file(peer_id, file).await,
|
||||||
peer_id,
|
|
||||||
announcement,
|
|
||||||
..
|
|
||||||
} => self.on_announce_file(peer_id, announcement).await,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -369,11 +362,11 @@ impl SyncService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_dail_failed(&mut self, peer_id: PeerId, err: DialError) {
|
fn on_dial_failed(&mut self, peer_id: PeerId, err: DialError) {
|
||||||
info!(%peer_id, ?err, "Dail to peer failed");
|
info!(%peer_id, ?err, "Dial to peer failed");
|
||||||
|
|
||||||
for controller in self.controllers.values_mut() {
|
for controller in self.controllers.values_mut() {
|
||||||
controller.on_dail_failed(peer_id, &err);
|
controller.on_dial_failed(peer_id, &err);
|
||||||
controller.transition();
|
controller.transition();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -585,9 +578,7 @@ impl SyncService {
|
|||||||
|
|
||||||
async fn on_find_file(&mut self, tx_seq: u64) -> Result<()> {
|
async fn on_find_file(&mut self, tx_seq: u64) -> Result<()> {
|
||||||
// file already exists
|
// file already exists
|
||||||
if self.store.check_tx_completed(tx_seq).await?
|
if self.store.get_store().get_tx_status(tx_seq)?.is_some() {
|
||||||
|| self.store.check_tx_pruned(tx_seq).await?
|
|
||||||
{
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
// broadcast find file
|
// broadcast find file
|
||||||
@ -595,14 +586,13 @@ impl SyncService {
|
|||||||
Some(tx) => tx,
|
Some(tx) => tx,
|
||||||
None => bail!("Transaction not found"),
|
None => bail!("Transaction not found"),
|
||||||
};
|
};
|
||||||
let shard_config = self.store.get_store().get_shard_config();
|
self.ctx.publish(PubsubMessage::FindFile(
|
||||||
self.ctx.publish(PubsubMessage::FindFile(FindFile {
|
FindFile {
|
||||||
tx_id: tx.id(),
|
tx_id: tx.id(),
|
||||||
num_shard: shard_config.num_shard,
|
maybe_shard_config: None,
|
||||||
shard_id: shard_config.shard_id,
|
}
|
||||||
neighbors_only: false,
|
.into(),
|
||||||
timestamp: timestamp_now(),
|
));
|
||||||
}));
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -651,10 +641,8 @@ impl SyncService {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// file already exists
|
// file already exists
|
||||||
if self.store.check_tx_completed(tx_seq).await?
|
if let Some(status) = self.store.get_store().get_tx_status(tx_seq)? {
|
||||||
|| self.store.check_tx_pruned(tx_seq).await?
|
bail!("File already exists [{:?}]", status);
|
||||||
{
|
|
||||||
bail!("File already exists");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let (index_start, index_end, all_chunks) = match maybe_range {
|
let (index_start, index_end, all_chunks) = match maybe_range {
|
||||||
@ -729,21 +717,12 @@ impl SyncService {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// File already exists and ignore the AnnounceFile message
|
// File already exists or pruned, just ignore the AnnounceFile message
|
||||||
match self.store.check_tx_completed(tx_seq).await {
|
match self.store.get_store().get_tx_status(tx_seq) {
|
||||||
Ok(true) => return,
|
Ok(Some(_)) => return,
|
||||||
Ok(false) => {}
|
Ok(None) => {}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!(%tx_seq, %err, "Failed to check if file finalized");
|
error!(%tx_seq, %err, "Failed to get tx status");
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
match self.store.check_tx_pruned(tx_seq).await {
|
|
||||||
Ok(true) => return,
|
|
||||||
Ok(false) => {}
|
|
||||||
Err(err) => {
|
|
||||||
error!(%tx_seq, %err, "Failed to check if file pruned");
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -774,27 +753,25 @@ impl SyncService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Handle on `NewFile` gossip message received.
|
/// Handle on `NewFile` gossip message received.
|
||||||
async fn on_new_file_gossip(&mut self, from: PeerId, msg: NewFile) {
|
async fn on_new_file_gossip(&mut self, from: PeerId, file: ShardedFile) {
|
||||||
debug!(%from, ?msg, "Received NewFile gossip");
|
debug!(%from, ?file, "Received NewFile gossip");
|
||||||
|
|
||||||
if let Some(controller) = self.controllers.get_mut(&msg.tx_id.seq) {
|
if let Some(controller) = self.controllers.get_mut(&file.tx_id.seq) {
|
||||||
// Notify new peer announced if file already in sync
|
// Notify new peer announced if file already in sync
|
||||||
if let Ok(shard_config) = ShardConfig::new(msg.shard_id, msg.num_shard) {
|
if let Ok(shard_config) = ShardConfig::try_from(file.shard_config) {
|
||||||
controller.on_peer_announced(from, shard_config);
|
controller.on_peer_announced(from, shard_config);
|
||||||
controller.transition();
|
controller.transition();
|
||||||
}
|
}
|
||||||
} else if let Some(manager) = &self.auto_sync_manager {
|
} else if let Some(manager) = &self.auto_sync_manager {
|
||||||
let _ = manager.new_file_send.send(msg.tx_id.seq);
|
let _ = manager.new_file_send.send(file.tx_id.seq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle on `AnnounceFile` RPC message received.
|
/// Handle on `AnswerFile` RPC message received.
|
||||||
async fn on_announce_file(&mut self, from: PeerId, announcement: FileAnnouncement) {
|
async fn on_answer_file(&mut self, from: PeerId, file: ShardedFile) {
|
||||||
// Notify new peer announced if file already in sync
|
// Notify new peer announced if file already in sync
|
||||||
if let Some(controller) = self.controllers.get_mut(&announcement.tx_id.seq) {
|
if let Some(controller) = self.controllers.get_mut(&file.tx_id.seq) {
|
||||||
if let Ok(shard_config) =
|
if let Ok(shard_config) = ShardConfig::try_from(file.shard_config) {
|
||||||
ShardConfig::new(announcement.shard_id, announcement.num_shard)
|
|
||||||
{
|
|
||||||
controller.on_peer_announced(from, shard_config);
|
controller.on_peer_announced(from, shard_config);
|
||||||
controller.transition();
|
controller.transition();
|
||||||
}
|
}
|
||||||
@ -964,8 +941,14 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn spawn_sync_service(&mut self, with_peer_store: bool) -> SyncSender {
|
async fn spawn_sync_service(&mut self, with_peer_store: bool) -> SyncSender {
|
||||||
self.spawn_sync_service_with_config(with_peer_store, Config::default())
|
self.spawn_sync_service_with_config(
|
||||||
.await
|
with_peer_store,
|
||||||
|
Config {
|
||||||
|
neighbors_only: false,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn spawn_sync_service_with_config(
|
async fn spawn_sync_service_with_config(
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
jsonrpcclient==4.0.3
|
jsonrpcclient==4.0.3
|
||||||
pyyaml==6.0.1
|
pyyaml==6.0.1
|
||||||
pysha3==1.0.2
|
safe-pysha3==1.0.4
|
||||||
coincurve==18.0.0
|
coincurve==20.0.0
|
||||||
eth-utils==3.0.0
|
eth-utils==5.1.0
|
||||||
py-ecc==7.0.0
|
py-ecc==7.0.0
|
||||||
web3==6.14.0
|
web3==7.5.0
|
||||||
eth_tester
|
eth_tester
|
||||||
cffi==1.16.0
|
cffi==1.16.0
|
||||||
rtoml==0.10.0
|
rtoml==0.11.0
|
@ -176,7 +176,7 @@ mine_contract_address = "0x1785c8683b3c527618eFfF78d876d9dCB4b70285"
|
|||||||
# If this limit is reached, the node will update its `shard_position`
|
# If this limit is reached, the node will update its `shard_position`
|
||||||
# and store only half data.
|
# and store only half data.
|
||||||
#
|
#
|
||||||
db_max_num_sectors = 1000000000
|
db_max_num_sectors = 4000000000
|
||||||
|
|
||||||
# The format is <shard_id>/<shard_number>, where the shard number is 2^n.
|
# The format is <shard_id>/<shard_number>, where the shard number is 2^n.
|
||||||
# This only applies if there is no stored shard config in db.
|
# This only applies if there is no stored shard config in db.
|
||||||
@ -232,27 +232,23 @@ batcher_announcement_capacity = 100
|
|||||||
# all files, and sufficient disk space is required.
|
# all files, and sufficient disk space is required.
|
||||||
auto_sync_enabled = true
|
auto_sync_enabled = true
|
||||||
|
|
||||||
# Indicates whether to sync file from neighbor nodes only. This is to avoid flooding file
|
|
||||||
# announcements in the whole network, which leads to high latency or even timeout to sync files.
|
|
||||||
neighbors_only = true
|
|
||||||
|
|
||||||
# Maximum number of files in sync from other peers simultaneously.
|
# Maximum number of files in sync from other peers simultaneously.
|
||||||
# max_sync_files = 8
|
# max_sync_files = 16
|
||||||
|
|
||||||
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
|
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
|
||||||
# sync_file_by_rpc_enabled = true
|
# sync_file_by_rpc_enabled = true
|
||||||
|
|
||||||
# Maximum number of continous failures to terminate a file sync.
|
# Maximum number of continuous failures to terminate a file sync.
|
||||||
# max_request_failures = 5
|
# max_request_failures = 3
|
||||||
|
|
||||||
# Timeout to dail peers.
|
# Timeout to dial peers.
|
||||||
# peer_connect_timeout = "15s"
|
# peer_connect_timeout = "15s"
|
||||||
|
|
||||||
# Timeout to disconnect peers.
|
# Timeout to disconnect peers.
|
||||||
# peer_disconnect_timeout = "15s"
|
# peer_disconnect_timeout = "15s"
|
||||||
|
|
||||||
# Timeout to find peers via FIND_FILE P2P pubsub message.
|
# Timeout to find peers via FIND_FILE P2P pubsub message.
|
||||||
# peer_find_timeout = "120s"
|
# peer_find_timeout = "5s"
|
||||||
|
|
||||||
# Timeout to download data from remote peer.
|
# Timeout to download data from remote peer.
|
||||||
# peer_chunks_download_timeout = "15s"
|
# peer_chunks_download_timeout = "15s"
|
||||||
@ -265,13 +261,13 @@ neighbors_only = true
|
|||||||
# max_sequential_workers = 0
|
# max_sequential_workers = 0
|
||||||
|
|
||||||
# Maximum threads to sync files randomly.
|
# Maximum threads to sync files randomly.
|
||||||
# max_random_workers = 2
|
# max_random_workers = 8
|
||||||
|
|
||||||
# Timeout to terminate a file sync in sequence.
|
# Timeout to terminate a file sync in sequence.
|
||||||
# sequential_find_peer_timeout = "60s"
|
# sequential_find_peer_timeout = "5s"
|
||||||
|
|
||||||
# Timeout to terminate a file sync randomly.
|
# Timeout to terminate a file sync randomly.
|
||||||
# random_find_peer_timeout = "500s"
|
# random_find_peer_timeout = "5s"
|
||||||
|
|
||||||
#######################################################################
|
#######################################################################
|
||||||
### File Location Cache Options ###
|
### File Location Cache Options ###
|
||||||
|
@ -188,7 +188,7 @@ mine_contract_address = "0x6815F41019255e00D6F34aAB8397a6Af5b6D806f"
|
|||||||
# If this limit is reached, the node will update its `shard_position`
|
# If this limit is reached, the node will update its `shard_position`
|
||||||
# and store only half data.
|
# and store only half data.
|
||||||
#
|
#
|
||||||
db_max_num_sectors = 1000000000
|
db_max_num_sectors = 4000000000
|
||||||
|
|
||||||
# The format is <shard_id>/<shard_number>, where the shard number is 2^n.
|
# The format is <shard_id>/<shard_number>, where the shard number is 2^n.
|
||||||
# This only applies if there is no stored shard config in db.
|
# This only applies if there is no stored shard config in db.
|
||||||
@ -244,27 +244,23 @@ batcher_announcement_capacity = 100
|
|||||||
# all files, and sufficient disk space is required.
|
# all files, and sufficient disk space is required.
|
||||||
auto_sync_enabled = true
|
auto_sync_enabled = true
|
||||||
|
|
||||||
# Indicates whether to sync file from neighbor nodes only. This is to avoid flooding file
|
|
||||||
# announcements in the whole network, which leads to high latency or even timeout to sync files.
|
|
||||||
neighbors_only = true
|
|
||||||
|
|
||||||
# Maximum number of files in sync from other peers simultaneously.
|
# Maximum number of files in sync from other peers simultaneously.
|
||||||
# max_sync_files = 8
|
# max_sync_files = 16
|
||||||
|
|
||||||
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
|
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
|
||||||
# sync_file_by_rpc_enabled = true
|
# sync_file_by_rpc_enabled = true
|
||||||
|
|
||||||
# Maximum number of continous failures to terminate a file sync.
|
# Maximum number of continuous failures to terminate a file sync.
|
||||||
# max_request_failures = 5
|
# max_request_failures = 3
|
||||||
|
|
||||||
# Timeout to dail peers.
|
# Timeout to dial peers.
|
||||||
# peer_connect_timeout = "15s"
|
# peer_connect_timeout = "15s"
|
||||||
|
|
||||||
# Timeout to disconnect peers.
|
# Timeout to disconnect peers.
|
||||||
# peer_disconnect_timeout = "15s"
|
# peer_disconnect_timeout = "15s"
|
||||||
|
|
||||||
# Timeout to find peers via FIND_FILE P2P pubsub message.
|
# Timeout to find peers via FIND_FILE P2P pubsub message.
|
||||||
# peer_find_timeout = "120s"
|
# peer_find_timeout = "5s"
|
||||||
|
|
||||||
# Timeout to download data from remote peer.
|
# Timeout to download data from remote peer.
|
||||||
# peer_chunks_download_timeout = "15s"
|
# peer_chunks_download_timeout = "15s"
|
||||||
@ -277,13 +273,13 @@ neighbors_only = true
|
|||||||
# max_sequential_workers = 0
|
# max_sequential_workers = 0
|
||||||
|
|
||||||
# Maximum threads to sync files randomly.
|
# Maximum threads to sync files randomly.
|
||||||
# max_random_workers = 2
|
# max_random_workers = 8
|
||||||
|
|
||||||
# Timeout to terminate a file sync in sequence.
|
# Timeout to terminate a file sync in sequence.
|
||||||
# sequential_find_peer_timeout = "60s"
|
# sequential_find_peer_timeout = "5s"
|
||||||
|
|
||||||
# Timeout to terminate a file sync randomly.
|
# Timeout to terminate a file sync randomly.
|
||||||
# random_find_peer_timeout = "500s"
|
# random_find_peer_timeout = "5s"
|
||||||
|
|
||||||
#######################################################################
|
#######################################################################
|
||||||
### File Location Cache Options ###
|
### File Location Cache Options ###
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user