mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2025-04-04 15:35:18 +00:00
Compare commits
34 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
74074dfa2f | ||
![]() |
12538e4b6c | ||
![]() |
2fd8ffc2ea | ||
![]() |
4cf45149cb | ||
![]() |
cfe4b45c41 | ||
![]() |
d43a616b56 | ||
![]() |
898350e271 | ||
![]() |
6a26c336e7 | ||
![]() |
a915766840 | ||
![]() |
3d9aa8c940 | ||
![]() |
538afb00e1 | ||
![]() |
7ad3f717b4 | ||
![]() |
26cc19b92d | ||
![]() |
a3335eed82 | ||
![]() |
2272b5dbfd | ||
![]() |
760d4b4a53 | ||
![]() |
91680f2e33 | ||
![]() |
c9bca86add | ||
![]() |
93f587c407 | ||
![]() |
1f71aadeec | ||
![]() |
656a092cf8 | ||
![]() |
8014f51b6d | ||
![]() |
b0a9a415f7 | ||
![]() |
bc6bcf857c | ||
![]() |
d15ef5ba3d | ||
![]() |
9ce215b919 | ||
![]() |
40d435597a | ||
![]() |
bb74143ddc | ||
![]() |
52f700c86e | ||
![]() |
4e5b14c0a3 | ||
![]() |
3024771fb1 | ||
![]() |
a56876eb1a | ||
![]() |
64120399f1 | ||
![]() |
8790fe1d66 |
4
.github/actions/setup-rust/action.yml
vendored
4
.github/actions/setup-rust/action.yml
vendored
@ -2,11 +2,11 @@ name: Setup Rust (cache & toolchain)
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install toolchain 1.75.0
|
||||
- name: Install toolchain 1.78.0
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: 1.75.0
|
||||
toolchain: 1.78.0
|
||||
components: rustfmt, clippy
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
5
.github/workflows/tests.yml
vendored
5
.github/workflows/tests.yml
vendored
@ -45,6 +45,11 @@ jobs:
|
||||
python-version: '3.9'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
|
404
Cargo.lock
generated
404
Cargo.lock
generated
@ -453,6 +453,28 @@ dependencies = [
|
||||
"trust-dns-resolver",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-stream"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
|
||||
dependencies = [
|
||||
"async-stream-impl",
|
||||
"futures-core",
|
||||
"pin-project-lite 0.2.14",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-stream-impl"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.68",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-task"
|
||||
version = "4.7.1"
|
||||
@ -506,7 +528,7 @@ version = "0.16.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247"
|
||||
dependencies = [
|
||||
"http",
|
||||
"http 0.2.12",
|
||||
"log",
|
||||
"url",
|
||||
"wildmatch",
|
||||
@ -529,6 +551,53 @@ version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
|
||||
|
||||
[[package]]
|
||||
name = "axum"
|
||||
version = "0.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum-core",
|
||||
"bytes",
|
||||
"futures-util",
|
||||
"http 1.2.0",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
"itoa",
|
||||
"matchit",
|
||||
"memchr",
|
||||
"mime",
|
||||
"percent-encoding",
|
||||
"pin-project-lite 0.2.14",
|
||||
"rustversion",
|
||||
"serde",
|
||||
"sync_wrapper 1.0.2",
|
||||
"tower",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-core"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"futures-util",
|
||||
"http 1.2.0",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
"mime",
|
||||
"pin-project-lite 0.2.14",
|
||||
"rustversion",
|
||||
"sync_wrapper 1.0.2",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.73"
|
||||
@ -568,6 +637,12 @@ version = "0.21.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.22.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
|
||||
|
||||
[[package]]
|
||||
name = "base64ct"
|
||||
version = "1.6.0"
|
||||
@ -1104,6 +1179,45 @@ dependencies = [
|
||||
"yaml-rust",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "console-api"
|
||||
version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8030735ecb0d128428b64cd379809817e620a40e5001c54465b99ec5feec2857"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"prost 0.13.4",
|
||||
"prost-types 0.13.4",
|
||||
"tonic",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "console-subscriber"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6539aa9c6a4cd31f4b1c040f860a1eac9aa80e7df6b05d506a6e7179936d6a01"
|
||||
dependencies = [
|
||||
"console-api",
|
||||
"crossbeam-channel",
|
||||
"crossbeam-utils",
|
||||
"futures-task",
|
||||
"hdrhistogram",
|
||||
"humantime",
|
||||
"hyper-util",
|
||||
"prost 0.13.4",
|
||||
"prost-types 0.13.4",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thread_local",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tonic",
|
||||
"tracing",
|
||||
"tracing-core",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "const-hex"
|
||||
version = "1.12.0"
|
||||
@ -1157,6 +1271,17 @@ dependencies = [
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "contract-wrapper"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"ethers",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "convert_case"
|
||||
version = "0.6.0"
|
||||
@ -2286,7 +2411,7 @@ dependencies = [
|
||||
"futures-timer",
|
||||
"futures-util",
|
||||
"hashers",
|
||||
"http",
|
||||
"http 0.2.12",
|
||||
"instant",
|
||||
"jsonwebtoken",
|
||||
"once_cell",
|
||||
@ -2905,7 +3030,26 @@ dependencies = [
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"futures-util",
|
||||
"http",
|
||||
"http 0.2.12",
|
||||
"indexmap 2.2.6",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util 0.7.11",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "h2"
|
||||
version = "0.4.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e"
|
||||
dependencies = [
|
||||
"atomic-waker",
|
||||
"bytes",
|
||||
"fnv",
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"http 1.2.0",
|
||||
"indexmap 2.2.6",
|
||||
"slab",
|
||||
"tokio",
|
||||
@ -3004,6 +3148,19 @@ dependencies = [
|
||||
"tokio-util 0.6.10",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hdrhistogram"
|
||||
version = "7.5.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d"
|
||||
dependencies = [
|
||||
"base64 0.21.7",
|
||||
"byteorder",
|
||||
"flate2",
|
||||
"nom",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.3.3"
|
||||
@ -3125,6 +3282,17 @@ dependencies = [
|
||||
"itoa",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fnv",
|
||||
"itoa",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http-body"
|
||||
version = "0.4.6"
|
||||
@ -3132,7 +3300,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"http",
|
||||
"http 0.2.12",
|
||||
"pin-project-lite 0.2.14",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http-body"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"http 1.2.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http-body-util"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-util",
|
||||
"http 1.2.0",
|
||||
"http-body 1.0.1",
|
||||
"pin-project-lite 0.2.14",
|
||||
]
|
||||
|
||||
@ -3164,9 +3355,9 @@ dependencies = [
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2",
|
||||
"http",
|
||||
"http-body",
|
||||
"h2 0.3.26",
|
||||
"http 0.2.12",
|
||||
"http-body 0.4.6",
|
||||
"httparse",
|
||||
"httpdate",
|
||||
"itoa",
|
||||
@ -3178,14 +3369,35 @@ dependencies = [
|
||||
"want",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hyper"
|
||||
version = "1.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-channel",
|
||||
"futures-util",
|
||||
"h2 0.4.7",
|
||||
"http 1.2.0",
|
||||
"http-body 1.0.1",
|
||||
"httparse",
|
||||
"httpdate",
|
||||
"itoa",
|
||||
"pin-project-lite 0.2.14",
|
||||
"smallvec",
|
||||
"tokio",
|
||||
"want",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hyper-rustls"
|
||||
version = "0.23.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c"
|
||||
dependencies = [
|
||||
"http",
|
||||
"hyper",
|
||||
"http 0.2.12",
|
||||
"hyper 0.14.29",
|
||||
"log",
|
||||
"rustls 0.20.9",
|
||||
"rustls-native-certs",
|
||||
@ -3201,8 +3413,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590"
|
||||
dependencies = [
|
||||
"futures-util",
|
||||
"http",
|
||||
"hyper",
|
||||
"http 0.2.12",
|
||||
"hyper 0.14.29",
|
||||
"rustls 0.21.12",
|
||||
"tokio",
|
||||
"tokio-rustls 0.24.1",
|
||||
@ -3216,12 +3428,25 @@ checksum = "6eea26c5d0b6ab9d72219f65000af310f042a740926f7b2fa3553e774036e2e7"
|
||||
dependencies = [
|
||||
"derive_builder",
|
||||
"dns-lookup",
|
||||
"hyper",
|
||||
"hyper 0.14.29",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hyper-timeout"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0"
|
||||
dependencies = [
|
||||
"hyper 1.5.2",
|
||||
"hyper-util",
|
||||
"pin-project-lite 0.2.14",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hyper-tls"
|
||||
version = "0.5.0"
|
||||
@ -3229,12 +3454,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"hyper",
|
||||
"hyper 0.14.29",
|
||||
"native-tls",
|
||||
"tokio",
|
||||
"tokio-native-tls",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hyper-util"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-channel",
|
||||
"futures-util",
|
||||
"http 1.2.0",
|
||||
"http-body 1.0.1",
|
||||
"hyper 1.5.2",
|
||||
"pin-project-lite 0.2.14",
|
||||
"socket2 0.5.7",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "iana-time-zone"
|
||||
version = "0.1.60"
|
||||
@ -3586,7 +3830,7 @@ dependencies = [
|
||||
"futures-timer",
|
||||
"futures-util",
|
||||
"gloo-net",
|
||||
"http",
|
||||
"http 0.2.12",
|
||||
"jsonrpsee-core",
|
||||
"jsonrpsee-types",
|
||||
"pin-project 1.1.5",
|
||||
@ -3615,7 +3859,7 @@ dependencies = [
|
||||
"futures-timer",
|
||||
"futures-util",
|
||||
"globset",
|
||||
"hyper",
|
||||
"hyper 0.14.29",
|
||||
"jsonrpsee-types",
|
||||
"lazy_static",
|
||||
"parking_lot 0.12.3",
|
||||
@ -3638,7 +3882,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5fc1d8c0e4f455c47df21f8a29f4bbbcb75eb71bfee919b92e92502b48358392"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"hyper",
|
||||
"hyper 0.14.29",
|
||||
"hyper-rustls 0.23.2",
|
||||
"jsonrpsee-core",
|
||||
"jsonrpsee-types",
|
||||
@ -3658,7 +3902,7 @@ checksum = "bdd69efeb3ce2cba767f126872f4eeb4624038a29098e75d77608b2b4345ad03"
|
||||
dependencies = [
|
||||
"futures-channel",
|
||||
"futures-util",
|
||||
"hyper",
|
||||
"hyper 0.14.29",
|
||||
"jsonrpsee-core",
|
||||
"jsonrpsee-types",
|
||||
"serde",
|
||||
@ -4713,6 +4957,12 @@ version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5"
|
||||
|
||||
[[package]]
|
||||
name = "matchit"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94"
|
||||
|
||||
[[package]]
|
||||
name = "md-5"
|
||||
version = "0.10.6"
|
||||
@ -4778,6 +5028,7 @@ dependencies = [
|
||||
"async-trait",
|
||||
"blake2",
|
||||
"contract-interface",
|
||||
"contract-wrapper",
|
||||
"ethereum-types 0.14.1",
|
||||
"ethers",
|
||||
"hex",
|
||||
@ -6025,6 +6276,16 @@ dependencies = [
|
||||
"prost-derive 0.10.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost"
|
||||
version = "0.13.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost-derive 0.13.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost-build"
|
||||
version = "0.9.0"
|
||||
@ -6106,6 +6367,19 @@ dependencies = [
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost-derive"
|
||||
version = "0.13.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"itertools 0.13.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.68",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost-types"
|
||||
version = "0.9.0"
|
||||
@ -6126,6 +6400,15 @@ dependencies = [
|
||||
"prost 0.10.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost-types"
|
||||
version = "0.13.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cc2f1e56baa61e93533aebc21af4d2134b70f66275e0fcdf3cbe43d77ff7e8fc"
|
||||
dependencies = [
|
||||
"prost 0.13.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "2.28.0"
|
||||
@ -6159,8 +6442,8 @@ dependencies = [
|
||||
"dns-lookup",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"http",
|
||||
"hyper",
|
||||
"http 0.2.12",
|
||||
"hyper 0.14.29",
|
||||
"hyper-system-resolver",
|
||||
"pin-project-lite 0.2.14",
|
||||
"thiserror",
|
||||
@ -6403,10 +6686,10 @@ dependencies = [
|
||||
"encoding_rs",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2",
|
||||
"http",
|
||||
"http-body",
|
||||
"hyper",
|
||||
"h2 0.3.26",
|
||||
"http 0.2.12",
|
||||
"http-body 0.4.6",
|
||||
"hyper 0.14.29",
|
||||
"hyper-rustls 0.24.2",
|
||||
"hyper-tls",
|
||||
"ipnet",
|
||||
@ -6422,7 +6705,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"sync_wrapper",
|
||||
"sync_wrapper 0.1.2",
|
||||
"system-configuration",
|
||||
"tokio",
|
||||
"tokio-native-tls",
|
||||
@ -7497,6 +7780,12 @@ version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
|
||||
|
||||
[[package]]
|
||||
name = "sync_wrapper"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263"
|
||||
|
||||
[[package]]
|
||||
name = "synstructure"
|
||||
version = "0.12.6"
|
||||
@ -7729,6 +8018,7 @@ dependencies = [
|
||||
"signal-hook-registry",
|
||||
"socket2 0.5.7",
|
||||
"tokio-macros",
|
||||
"tracing",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
@ -7786,9 +8076,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio-stream"
|
||||
version = "0.1.15"
|
||||
version = "0.1.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af"
|
||||
checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"pin-project-lite 0.2.14",
|
||||
@ -7895,6 +8185,62 @@ dependencies = [
|
||||
"winnow 0.6.13",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"axum",
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"h2 0.4.7",
|
||||
"http 1.2.0",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
"hyper 1.5.2",
|
||||
"hyper-timeout",
|
||||
"hyper-util",
|
||||
"percent-encoding",
|
||||
"pin-project 1.1.5",
|
||||
"prost 0.13.4",
|
||||
"socket2 0.5.7",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tower",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower"
|
||||
version = "0.4.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"indexmap 1.9.3",
|
||||
"pin-project 1.1.5",
|
||||
"pin-project-lite 0.2.14",
|
||||
"rand 0.8.5",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util 0.7.11",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower-layer"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e"
|
||||
|
||||
[[package]]
|
||||
name = "tower-service"
|
||||
version = "0.3.2"
|
||||
@ -8124,7 +8470,7 @@ dependencies = [
|
||||
"byteorder",
|
||||
"bytes",
|
||||
"data-encoding",
|
||||
"http",
|
||||
"http 0.2.12",
|
||||
"httparse",
|
||||
"log",
|
||||
"rand 0.8.5",
|
||||
@ -8916,12 +9262,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zgs_node"
|
||||
version = "0.8.2"
|
||||
version = "0.3.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"chunk_pool",
|
||||
"clap",
|
||||
"config",
|
||||
"console-subscriber",
|
||||
"contract-wrapper",
|
||||
"ctrlc",
|
||||
"duration-str",
|
||||
"error-chain",
|
||||
|
@ -16,7 +16,7 @@ Across the two lanes, 0G Storage supports the following features:
|
||||
* **General Purpose Design**: Supports atomic transactions, mutable key-value stores, and archive log systems, enabling a wide range of applications with various data types.
|
||||
* **Validated Incentivization**: Utilizes the PoRA (Proof of Random Access) mining algorithm to mitigate the data outsourcing issue and to ensure rewards are distributed to nodes who contribute to the storage network.
|
||||
|
||||
For in-depth technical details about 0G Storage, please read our [Intro to 0G Storage](https://docs.0g.ai/og-storage).
|
||||
For in-depth technical details about 0G Storage, please read our [Intro to 0G Storage](https://docs.0g.ai/0g-storage).
|
||||
|
||||
## Documentation
|
||||
|
||||
|
17
common/contract-wrapper/Cargo.toml
Normal file
17
common/contract-wrapper/Cargo.toml
Normal file
@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "contract-wrapper"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1.28", features = ["macros"] }
|
||||
ethers = "2.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
tracing = "0.1.35"
|
||||
# or `tracing` if you prefer
|
||||
|
||||
[features]
|
||||
dev = []
|
204
common/contract-wrapper/src/lib.rs
Normal file
204
common/contract-wrapper/src/lib.rs
Normal file
@ -0,0 +1,204 @@
|
||||
use ethers::{
|
||||
abi::Detokenize,
|
||||
contract::ContractCall,
|
||||
providers::{Middleware, ProviderError},
|
||||
types::{TransactionReceipt, U256},
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
use tokio::time::sleep;
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// The result of a single submission attempt.
|
||||
#[derive(Debug)]
|
||||
pub enum SubmissionAction {
|
||||
Success(TransactionReceipt),
|
||||
/// Generic "retry" signal, but we still need to know if it's "mempool/timeout" or something else.
|
||||
/// We'll parse the error string or have a separate reason in a real app.
|
||||
Retry(String),
|
||||
Error(String),
|
||||
}
|
||||
|
||||
/// Configuration for submission retries, gas price, etc.
|
||||
#[derive(Clone, Copy, Debug, Deserialize)]
|
||||
pub struct SubmitConfig {
|
||||
/// If `Some`, use this gas price for the first attempt.
|
||||
/// If `None`, fetch the current network gas price.
|
||||
pub(crate) initial_gas_price: Option<U256>,
|
||||
/// If `Some`, clamp increased gas price to this limit.
|
||||
/// If `None`, do not bump gas for mempool/timeout errors.
|
||||
pub(crate) max_gas_price: Option<U256>,
|
||||
/// Gas limit of the transaction
|
||||
pub(crate) max_gas: Option<U256>,
|
||||
/// Factor by which to multiply the gas price on each mempool/timeout error.
|
||||
/// E.g. if factor=11 => a 10% bump => newGas = (gas * factor) / 10
|
||||
pub(crate) gas_increase_factor: Option<u64>,
|
||||
/// The maximum number of gas bumps (for mempool/timeout). If `max_gas_price` is set,
|
||||
/// we typically rely on clamping. But you can still cap the number of bumps if you want.
|
||||
pub(crate) max_retries: Option<usize>,
|
||||
/// Seconds to wait between attempts.
|
||||
pub(crate) interval_secs: Option<u64>,
|
||||
}
|
||||
|
||||
const DEFAULT_INTERVAL_SECS: u64 = 2;
|
||||
const DEFAULT_MAX_RETRIES: usize = 5;
|
||||
|
||||
impl Default for SubmitConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
initial_gas_price: None,
|
||||
max_gas_price: None,
|
||||
max_gas: None,
|
||||
gas_increase_factor: Some(11), // implies 10% bump if we do (gas*11)/10
|
||||
max_retries: Some(DEFAULT_MAX_RETRIES),
|
||||
interval_secs: Some(DEFAULT_INTERVAL_SECS),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A simple function to detect if the retry is from a mempool or timeout error.
|
||||
/// Right now, we rely on `submit_once` returning `SubmissionAction::Retry` for ANY error
|
||||
/// that is "retryable," so we must parse the error string from `submit_once`, or
|
||||
/// store that string. Another approach is to return an enum with a reason from `submit_once`.
|
||||
fn is_mempool_or_timeout_error(error_str: String) -> bool {
|
||||
let lower = error_str.to_lowercase();
|
||||
lower.contains("mempool") || lower.contains("timeout")
|
||||
}
|
||||
|
||||
/// A function that performs a single submission attempt:
|
||||
/// - Sends the transaction
|
||||
/// - Awaits the receipt with limited internal retries
|
||||
/// - Returns a `SubmissionAction` indicating success, retry, or error.
|
||||
pub async fn submit_once<M, T>(call: ContractCall<M, T>) -> SubmissionAction
|
||||
where
|
||||
M: Middleware + 'static,
|
||||
T: Detokenize,
|
||||
{
|
||||
let pending_tx = match call.send().await {
|
||||
Ok(tx) => tx,
|
||||
Err(e) => {
|
||||
let msg = e.to_string();
|
||||
if is_mempool_or_timeout_error(msg.clone()) {
|
||||
return SubmissionAction::Retry(format!("mempool/timeout: {:?}", e));
|
||||
}
|
||||
|
||||
debug!("Error sending transaction: {:?}", msg);
|
||||
return SubmissionAction::Error(format!("Transaction failed: {}", msg));
|
||||
}
|
||||
};
|
||||
|
||||
debug!("Signed tx hash: {:?}", pending_tx.tx_hash());
|
||||
|
||||
let receipt_result = pending_tx.await;
|
||||
match receipt_result {
|
||||
Ok(Some(receipt)) => {
|
||||
info!("Transaction mined, receipt: {:?}", receipt);
|
||||
SubmissionAction::Success(receipt)
|
||||
}
|
||||
Ok(None) => {
|
||||
debug!("Transaction probably timed out; retrying");
|
||||
SubmissionAction::Retry("timeout, receipt is none".to_string())
|
||||
}
|
||||
Err(ProviderError::HTTPError(e)) => {
|
||||
debug!("HTTP error retrieving receipt: {:?}", e);
|
||||
SubmissionAction::Retry(format!("http error: {:?}", e))
|
||||
}
|
||||
Err(e) => SubmissionAction::Error(format!("Transaction unrecoverable: {:?}", e)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Increase gas price using integer arithmetic: (gp * factor_num) / factor_den
|
||||
fn increase_gas_price_u256(gp: U256, factor_num: u64, factor_den: u64) -> U256 {
|
||||
let num = U256::from(factor_num);
|
||||
let den = U256::from(factor_den);
|
||||
gp.checked_mul(num).unwrap_or(U256::MAX) / den
|
||||
}
|
||||
|
||||
/// A higher-level function that wraps `submit_once` in a gas-price–adjustment loop,
|
||||
/// plus a global timeout, plus distinct behavior for mempool/timeout vs other errors.
|
||||
pub async fn submit_with_retry<M, T>(
|
||||
mut call: ContractCall<M, T>,
|
||||
config: &SubmitConfig,
|
||||
middleware: Arc<M>,
|
||||
) -> Result<TransactionReceipt, String>
|
||||
where
|
||||
M: Middleware + 'static,
|
||||
T: Detokenize,
|
||||
{
|
||||
if let Some(max_gas) = config.max_gas {
|
||||
call = call.gas(max_gas);
|
||||
}
|
||||
let mut gas_price = if let Some(gp) = config.initial_gas_price {
|
||||
gp
|
||||
} else {
|
||||
middleware
|
||||
.get_gas_price()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to fetch gas price: {:?}", e))?
|
||||
};
|
||||
|
||||
// If no factor is set, default to 11 => 10% bump
|
||||
let factor_num = config.gas_increase_factor.unwrap_or(11);
|
||||
let factor_den = 10u64;
|
||||
|
||||
// Two counters: one for gas bumps, one for non-gas retries
|
||||
let mut non_gas_retries = 0;
|
||||
let max_retries = config.max_retries.unwrap_or(DEFAULT_MAX_RETRIES);
|
||||
|
||||
loop {
|
||||
// Set gas price on the call
|
||||
call = call.gas_price(gas_price);
|
||||
|
||||
match submit_once(call.clone()).await {
|
||||
SubmissionAction::Success(receipt) => {
|
||||
return Ok(receipt);
|
||||
}
|
||||
SubmissionAction::Retry(error_str) => {
|
||||
// We need to figure out if it's "mempool/timeout" or some other reason.
|
||||
// Right now, we don't have the error string from `submit_once` easily,
|
||||
// so let's assume we store it or we do a separate function that returns it.
|
||||
// For simplicity, let's do a hack: let's define a placeholder "error_str" and parse it.
|
||||
// In reality, you'd likely return `SubmissionAction::Retry(reason_str)` from `submit_once`.
|
||||
if is_mempool_or_timeout_error(error_str.clone()) {
|
||||
// Mempool/timeout error
|
||||
if let Some(max_gp) = config.max_gas_price {
|
||||
if gas_price >= max_gp {
|
||||
return Err(format!(
|
||||
"Exceeded max gas price: {}, with error msg: {}",
|
||||
max_gp, error_str
|
||||
));
|
||||
}
|
||||
// Bump the gas
|
||||
let new_price = increase_gas_price_u256(gas_price, factor_num, factor_den);
|
||||
gas_price = std::cmp::min(new_price, max_gp);
|
||||
debug!("Bumping gas price to {}", gas_price);
|
||||
} else {
|
||||
// No maxGasPrice => we do NOT bump => fail
|
||||
return Err(
|
||||
"Mempool/timeout error, no maxGasPrice set => aborting".to_string()
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Non-gas error => increment nonGasRetries
|
||||
non_gas_retries += 1;
|
||||
if non_gas_retries > max_retries {
|
||||
return Err(format!("Exceeded non-gas retries: {}", max_retries));
|
||||
}
|
||||
debug!(
|
||||
"Non-gas retry #{} (same gas price: {})",
|
||||
non_gas_retries, gas_price
|
||||
);
|
||||
}
|
||||
}
|
||||
SubmissionAction::Error(e) => {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep between attempts
|
||||
sleep(Duration::from_secs(
|
||||
config.interval_secs.unwrap_or(DEFAULT_INTERVAL_SECS),
|
||||
))
|
||||
.await;
|
||||
}
|
||||
}
|
@ -7,7 +7,7 @@
|
||||
//! block processing time).
|
||||
//! - `IncCounter`: used to represent an ideally ever-growing, never-shrinking integer (e.g.,
|
||||
//! number of block processing requests).
|
||||
//! - `IntGauge`: used to represent an varying integer (e.g., number of attestations per block).
|
||||
//! - `IntGauge`: used to represent a varying integer (e.g., number of attestations per block).
|
||||
//!
|
||||
//! ## Important
|
||||
//!
|
||||
|
@ -5,7 +5,7 @@ pub const TB: usize = 1024 * GB;
|
||||
|
||||
pub const BYTES_PER_SECTOR: usize = 256;
|
||||
pub const BYTES_PER_SEAL: usize = 4 * KB;
|
||||
pub const BYTES_PER_SCRATCHPAD: usize = 64 * KB;
|
||||
pub const BYTES_PER_SCRATCHPAD: usize = 16 * KB;
|
||||
pub const BYTES_PER_LOAD: usize = 256 * KB;
|
||||
pub const BYTES_PER_PRICING: usize = 8 * GB;
|
||||
pub const BYTES_PER_MAX_MINING_RANGE: usize = 8 * TB;
|
||||
|
@ -11,7 +11,7 @@ pub fn unused_tcp_port() -> Result<u16, String> {
|
||||
unused_port(Transport::Tcp)
|
||||
}
|
||||
|
||||
/// A convenience function for `unused_port(Transport::Tcp)`.
|
||||
/// A convenience function for `unused_port(Transport::Udp)`.
|
||||
pub fn unused_udp_port() -> Result<u16, String> {
|
||||
unused_port(Transport::Udp)
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
### Checks
|
||||
|
||||
* [ ] I've made sure the lint is passing in this PR.
|
||||
* [ ] I've made sure the linter is passing in this PR.
|
||||
* [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, in that case, please comment that they are not relevant.
|
||||
* [ ] Testing Strategy
|
||||
* [ ] Unit tests
|
||||
|
@ -4,15 +4,15 @@
|
||||
|
||||
ZeroGravity system consists of a data availability layer (0G DA) on top of a decentralized storage system (0G Storage). There is a separate consensus network that is part of both the 0G DA and the 0G Storage. For 0G Storage, the consensus is responsible for determining the ordering of the uploaded data blocks, realizing the storage mining verification and the corresponding incentive mechanism through smart contracts.
|
||||
|
||||
Figure 1 illustrates the architecture of the 0G system. When a data block enters the 0G DA, it is first erasure coded and organized into multiple consecutive chunks through erasure coding. The merkle root as a commitment of the encoded data block is then submitted to the consensus layer to keep the order of the data entering the system. The chunks are then dispersed to different storage nodes in 0G Storage where the data may be further replicated to other nodes depending on the storage fee that the user pays. The storage nodes periodically participate the mining process by interacting with the consensus network to accrue rewards from the system.
|
||||
Figure 1 illustrates the architecture of the 0G system. When a data block enters the 0G DA, it is first erasure coded and organized into multiple consecutive chunks through erasure coding. The merkle root as a commitment of the encoded data block is then submitted to the consensus layer to keep the order of the data entering the system. The chunks are then dispersed to different storage nodes in 0G Storage where the data may be further replicated to other nodes depending on the storage fee that the user pays. The storage nodes periodically participate in the mining process by interacting with the consensus network to accrue rewards from the system.
|
||||
|
||||
<figure><img src="../../.gitbook/assets/zg-storage-architecture.png" alt=""><figcaption><p>Figure 1. The Architecture of 0G System</p></figcaption></figure>
|
||||
<figure><img src="../.gitbook/assets/zg-storage-architecture.png" alt=""><figcaption><p>Figure 1. The Architecture of 0G System</p></figcaption></figure>
|
||||
|
||||
## 0G Storage
|
||||
|
||||
0G Storage employs layered design targeting to support different types of decentralized applications. Figure 2 shows the overview of the full stack layers of 0G Storage.
|
||||
|
||||
<figure><img src="../../.gitbook/assets/zg-storage-layer.png" alt=""><figcaption><p>Figure 2. Full Stack Solution of 0G Storage</p></figcaption></figure>
|
||||
<figure><img src="../.gitbook/assets/zg-storage-layer.png" alt=""><figcaption><p>Figure 2. Full Stack Solution of 0G Storage</p></figcaption></figure>
|
||||
|
||||
The lowest is a log layer which is a decentralized system. It consists of multiple storage nodes to form a storage network. The network has built-in incentive mechanism to reward the data storage. The ordering of the uploaded data is guaranteed by a sequencing mechanism to provide a log-based semantics and abstraction. This layer is used to store unstructured raw data for permanent persistency.
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Mining Reward
|
||||
|
||||
0G Storage creates pricing segments every 8 GB of data chunks over the data flow. Each pricing segment is associated with an Endowment Pool and a Reward Pool. The Endowment Pool collects the storage endowments of all the data chunks belongs to this pricing segment and releases a fixed ratio of balance to the Reward Pool every second. The rate of reward release is set to 4% per year.
|
||||
0G Storage creates pricing segments every 8 GB of data chunks over the data flow. Each pricing segment is associated with an Endowment Pool and a Reward Pool. The Endowment Pool collects the storage endowments of all the data chunks belong to this pricing segment and releases a fixed ratio of balance to the Reward Pool every second. The rate of reward release is set to 4% per year.
|
||||
|
||||
The mining reward is paid to miners for providing data service. Miners receive mining reward when submit the first legitimate PoRA for a mining epoch to 0G Storage contract.
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
The ZeroGravity network adopts a Proof of Random Access (PoRA) mechanism to incentivize miners to store data. By requiring miners to answer randomly produced queries to archived data chunks, the PoRA mechanism establishes the relation between mining proof generation power and data storage. Miners answer the queries repeatedly and computes an output digest for each loaded chunk until find a digest that satisfies the mining difficulty (i.e., has enough leading zeros). PoRA will stress the miners' disk I/O and reduce their capability to respond user queries. So 0G Storage adopts intermittent mining, in which a mining epoch starts with a block generation at a specific block height on the host chain and stops when a valid PoRA is submitted to the 0G Storage contract.
|
||||
|
||||
In a strawman design, a PoRA iteration consists of a computing stage and a loading stage. In the computing stage, a miner computes a random recall position (the universal offset in the flow) based on an arbitrary picked random nonce and a mining status read from the host chain. In the loading stage, a miner loads the archived data chunks at the given recall position, and computes output digest by hashing the tuple of mining status and the data chunks. If the output digest satisfies the target difficulty, the miner can construct a legitimate PoRA consists of the chosen random nonce, the loaded data chunk and the proof for the correctness of data chunk to the mining contract.
|
||||
In a strawman design, a PoRA iteration consists of a computing stage and a loading stage. In the computing stage, a miner computes a random recall position (the universal offset in the flow) based on an arbitrary picked random nonce and a mining status read from the host chain. In the loading stage, a miner loads the archived data chunks at the given recall position, and computes output digest by hashing the tuple of mining status and the data chunks. If the output digest satisfies the target difficulty, the miner can construct a legitimate PoRA, which consists of the chosen random nonce, the loaded data chunk and the proof for the correctness of data chunk to the mining contract.
|
||||
|
||||
## Fairness
|
||||
|
||||
@ -30,4 +30,4 @@ Precisely, the mining process has the following steps:
|
||||
6. For each piece $$\overrightarrow{v}$$, compute the Blake2b hash of the tuple ($$\mathsf{miner\_id}$$, $$\mathsf{nonce}$$, $$\mathsf{context\_digest}$$, $$\mathsf{start\_position}$$, $$\mathsf{mine\_length}$$, $$\overrightarrow{v}$$).
|
||||
7. If one of Blake2b hash output is smaller than a target value, the miner finds a legitimate PoRA solution.
|
||||
|
||||
<figure><img src="../../../.gitbook/assets/zg-storage-algorithm.png" alt=""><figcaption><p>Figure 1. Recall Position and Scratchpad Computation</p></figcaption></figure>
|
||||
<figure><img src="../../.gitbook/assets/zg-storage-algorithm.png" alt=""><figcaption><p>Figure 1. Recall Position and Scratchpad Computation</p></figcaption></figure>
|
||||
|
@ -5,12 +5,12 @@
|
||||
0G Storage is the storage layer for the ZeroGravity data availability (DA) system. The 0G Storage layer holds three important features:
|
||||
|
||||
* Built-in - It is natively built into the ZeroGravity DA system for data storage and retrieval.
|
||||
* General purpose - It is designed to support atomic transactions, mutable kv stores as well as archive log systems to enable wide range of applications with various data types.
|
||||
* General purpose - It is designed to support atomic transactions, mutable kv stores as well as archive log systems to enable a wide range of applications with various data types.
|
||||
* Incentive - Instead of being just a decentralized database, 0G Storage introduces PoRA mining algorithm to incentivize storage network participants.
|
||||
|
||||
## Integration
|
||||
|
||||
We provide a [SDK](https://github.com/0glabs/0g-js-storage-sdk) for users to easily integrate 0G Storage in their applications with the following features:
|
||||
We provide a [SDK](https://github.com/0glabs/0g-ts-sdk) for users to easily integrate 0G Storage in their applications with the following features:
|
||||
|
||||
* File Merkle Tree Class
|
||||
* Flow Contract Types
|
||||
@ -22,7 +22,7 @@ We provide a [SDK](https://github.com/0glabs/0g-js-storage-sdk) for users to eas
|
||||
|
||||
## Deployment
|
||||
|
||||
Please refer to [Deployment](../0G%20Storage/doc/install.md) page for detailed steps to compile and start a 0G Storage node.
|
||||
Please refer to [Deployment](run.md) page for detailed steps to compile and start a 0G Storage node.
|
||||
|
||||
## Test
|
||||
|
||||
|
@ -6,4 +6,4 @@ A user-defined function will be used to deserialize the raw content in the log e
|
||||
|
||||
When a new key-value node just joins the network, it connects to the log layer and plays the log entries from head to tail to construct the latest state of the key-value store. During the log entry playing, an application-specific key-value node can skip irrelevant log entries which do not contain stream IDs that it cares.
|
||||
|
||||
<figure><img src="../../.gitbook/assets/zg-storage-log.png" alt=""><figcaption><p>Figure 1. Decentralized K-V Store</p></figcaption></figure>
|
||||
<figure><img src="../.gitbook/assets/zg-storage-log.png" alt=""><figcaption><p>Figure 1. Decentralized K-V Store</p></figcaption></figure>
|
||||
|
@ -5,7 +5,7 @@
|
||||
## Prerequisites
|
||||
|
||||
- Requires python version: 3.8, 3.9 or 3.10, higher version is not guaranteed (e.g. failed to install `pysha3`).
|
||||
- Installs dependencies under root folder: `pip3 install -r requirements.txt`
|
||||
- Install dependencies under root folder: `pip3 install -r requirements.txt`
|
||||
|
||||
## Install Blockchain Nodes
|
||||
|
||||
@ -19,7 +19,7 @@ The blockchain node binaries will be compiled or downloaded from github to `test
|
||||
|
||||
## Run Tests
|
||||
|
||||
Changes to the `tests` folder and run the following command to run all tests:
|
||||
Change to the `tests` folder and run the following command to run all tests:
|
||||
|
||||
```
|
||||
python test_all.py
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
### Setup Environment
|
||||
|
||||
Install dependencies Node.js, yarn, hardhat.
|
||||
Install the dependencies: Node.js, yarn, hardhat.
|
||||
|
||||
- Linux
|
||||
|
||||
|
@ -8,8 +8,8 @@ When an application server linking with the 0G Storage key-value runtime starts
|
||||
|
||||
When an application server with the key-value runtime encounters the commit record during playing the log, it identifies a conflict window consisting of all the log entries between the start log position of the transaction and the position of the commit record. The log entries in the conflict window therefore contain the key-value operations concurrent with the transaction submitting the commit record. The runtime further detects whether these concurrent operations contain the updates on the keys belonging to the read set of the transaction. If yes, the transaction is aborted, otherwise committed successfully.
|
||||
|
||||
<figure><img src="../../.gitbook/assets/zg-storage-transaction.png" alt=""><figcaption><p>Figure 1. Transaction Processing on 0G K-V Store</p></figcaption></figure>
|
||||
<figure><img src="../.gitbook/assets/zg-storage-transaction.png" alt=""><figcaption><p>Figure 1. Transaction Processing on 0G K-V Store</p></figcaption></figure>
|
||||
|
||||
## Concurrent Assumption
|
||||
|
||||
This transaction model assumes that the transaction participants are collaborative and will honestly compose the commit record with the correct content. Although this assumption in a decentralized environment is too strong, it is still achievable for specific applications. For example, for an application like Google Docs, a user normally shares the access to others who can be trusted. In case this assumption cannot hold, the code of the transaction can be stored in the ZeroGravity log and some mechanism of verifiable computation like zero-knowledge proof or hardware with trust execution environment (TEE) can be employed by the transaction executors to detect the validity of the commit record.
|
||||
This transaction model assumes that the transaction participants are collaborative and will honestly compose the commit record with the correct content. Although this assumption in a decentralized environment is too strong, it is still achievable for specific applications. For example, for an application like Google Docs, a user normally shares the access to others who can be trusted. In case this assumption cannot hold, the code of the transaction can be stored in the ZeroGravity log and some mechanism of verifiable computation like zero-knowledge proof or hardware with trusted execution environment (TEE) can be employed by the transaction executors to detect the validity of the commit record.
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "zgs_node"
|
||||
version = "0.8.2"
|
||||
version = "0.3.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
@ -42,8 +42,13 @@ metrics = { workspace = true }
|
||||
rust-log = { package = "log", version = "0.4.22" }
|
||||
tracing-core = "0.1.32"
|
||||
tracing-log = "0.2.0"
|
||||
console-subscriber = { version = "0.4.1", optional = true }
|
||||
contract-wrapper = { path = "../common/contract-wrapper" }
|
||||
|
||||
[dependencies.libp2p]
|
||||
version = "0.45.1"
|
||||
default-features = true
|
||||
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"]
|
||||
|
||||
[features]
|
||||
tokio-console = ["console-subscriber"]
|
@ -13,7 +13,7 @@ enum SlotStatus {
|
||||
}
|
||||
|
||||
/// Sliding window is used to control the concurrent uploading process of a file.
|
||||
/// Bounded window allows segments to be uploaded concurrenly, while having a capacity
|
||||
/// Bounded window allows segments to be uploaded concurrently, while having a capacity
|
||||
/// limit on writing threads per file. Meanwhile, the left_boundary field records
|
||||
/// how many segments have been uploaded.
|
||||
struct CtrlWindow {
|
||||
@ -165,7 +165,7 @@ impl ChunkPoolWriteCtrl {
|
||||
|
||||
if file_ctrl.total_segments != total_segments {
|
||||
bail!(
|
||||
"file size in segment doesn't match with file size declared in previous segment. Previous total segments:{}, current total segments:{}s",
|
||||
"file size in segment doesn't match with file size declared in previous segment. Previous total segments:{}, current total segments:{}",
|
||||
file_ctrl.total_segments,
|
||||
total_segments
|
||||
);
|
||||
|
@ -358,7 +358,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_peek_priority() {
|
||||
fn test_announcement_cache_peek_priority() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
@ -382,7 +382,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_pop_len() {
|
||||
fn test_announcement_cache_pop_len() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
@ -404,7 +404,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_garbage_collect() {
|
||||
fn test_announcement_cache_garbage_collect() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
@ -422,7 +422,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_insert_gc() {
|
||||
fn test_announcement_cache_insert_gc() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
@ -438,7 +438,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_insert_ignore_older() {
|
||||
fn test_announcement_cache_insert_ignore_older() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
@ -461,7 +461,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_insert_overwrite() {
|
||||
fn test_announcement_cache_insert_overwrite() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
@ -479,7 +479,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_insert_cap_exceeded() {
|
||||
fn test_announcement_cache_insert_cap_exceeded() {
|
||||
let mut cache = AnnouncementCache::new(3, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
@ -499,7 +499,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_random() {
|
||||
fn test_announcement_cache_random() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
@ -515,7 +515,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_all() {
|
||||
fn test_announcement_cache_all() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
|
@ -20,7 +20,7 @@ pub struct LogSyncConfig {
|
||||
// blockchain provider retry params
|
||||
// the number of retries after a connection times out
|
||||
pub rate_limit_retries: u32,
|
||||
// the nubmer of retries for rate limited responses
|
||||
// the number of retries for rate limited responses
|
||||
pub timeout_retries: u32,
|
||||
// the duration to wait before retry, in ms
|
||||
pub initial_backoff: u64,
|
||||
|
@ -10,6 +10,7 @@ zgs_spec = { path = "../../common/spec" }
|
||||
zgs_seal = { path = "../../common/zgs_seal" }
|
||||
task_executor = { path = "../../common/task_executor" }
|
||||
contract-interface = { path = "../../common/contract-interface" }
|
||||
contract-wrapper = { path = "../../common/contract-wrapper" }
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
ethereum-types = "0.14"
|
||||
tokio = { version = "1.19.2", features = ["full"] }
|
||||
|
@ -2,7 +2,8 @@ use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use ethereum_types::{Address, H256, U256};
|
||||
use contract_wrapper::SubmitConfig;
|
||||
use ethereum_types::{Address, H256};
|
||||
use ethers::core::k256::SecretKey;
|
||||
use ethers::middleware::SignerMiddleware;
|
||||
use ethers::providers::Http;
|
||||
@ -21,7 +22,6 @@ pub struct MinerConfig {
|
||||
pub(crate) rpc_endpoint_url: String,
|
||||
pub(crate) mine_address: Address,
|
||||
pub(crate) flow_address: Address,
|
||||
pub(crate) submission_gas: Option<U256>,
|
||||
pub(crate) cpu_percentage: u64,
|
||||
pub(crate) iter_batch: usize,
|
||||
pub(crate) shard_config: ShardConfig,
|
||||
@ -29,6 +29,7 @@ pub struct MinerConfig {
|
||||
pub(crate) rate_limit_retries: u32,
|
||||
pub(crate) timeout_retries: u32,
|
||||
pub(crate) initial_backoff: u64,
|
||||
pub(crate) submission_config: SubmitConfig,
|
||||
}
|
||||
|
||||
pub type MineServiceMiddleware = SignerMiddleware<Arc<Provider<RetryClient<Http>>>, LocalWallet>;
|
||||
@ -41,7 +42,6 @@ impl MinerConfig {
|
||||
rpc_endpoint_url: String,
|
||||
mine_address: Address,
|
||||
flow_address: Address,
|
||||
submission_gas: Option<U256>,
|
||||
cpu_percentage: u64,
|
||||
iter_batch: usize,
|
||||
context_query_seconds: u64,
|
||||
@ -49,6 +49,7 @@ impl MinerConfig {
|
||||
rate_limit_retries: u32,
|
||||
timeout_retries: u32,
|
||||
initial_backoff: u64,
|
||||
submission_config: SubmitConfig,
|
||||
) -> Option<MinerConfig> {
|
||||
miner_key.map(|miner_key| MinerConfig {
|
||||
miner_id,
|
||||
@ -56,7 +57,6 @@ impl MinerConfig {
|
||||
rpc_endpoint_url,
|
||||
mine_address,
|
||||
flow_address,
|
||||
submission_gas,
|
||||
cpu_percentage,
|
||||
iter_batch,
|
||||
shard_config,
|
||||
@ -64,6 +64,7 @@ impl MinerConfig {
|
||||
rate_limit_retries,
|
||||
timeout_retries,
|
||||
initial_backoff,
|
||||
submission_config,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use contract_interface::zgs_flow::MineContext;
|
||||
use contract_interface::pora_mine::MineContext;
|
||||
use ethereum_types::{H256, U256};
|
||||
use rand::{self, Rng};
|
||||
use std::time;
|
||||
@ -35,16 +35,23 @@ pub struct PoraService {
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub(super) struct PoraPuzzle {
|
||||
context: MineContext,
|
||||
target_quality: U256,
|
||||
pora_target: U256,
|
||||
max_shards: u64,
|
||||
subtask_digest: H256,
|
||||
}
|
||||
|
||||
impl PoraPuzzle {
|
||||
pub fn new(context: MineContext, target_quality: U256, max_shards: u64) -> Self {
|
||||
pub fn new(
|
||||
context: MineContext,
|
||||
pora_target: U256,
|
||||
max_shards: u64,
|
||||
subtask_digest: H256,
|
||||
) -> Self {
|
||||
Self {
|
||||
context,
|
||||
target_quality,
|
||||
pora_target,
|
||||
max_shards,
|
||||
subtask_digest,
|
||||
}
|
||||
}
|
||||
|
||||
@ -255,7 +262,8 @@ impl PoraService {
|
||||
miner_id: &self.miner_id,
|
||||
mine_range_config: &self.mine_range,
|
||||
context: &puzzle.context,
|
||||
target_quality: &puzzle.target_quality,
|
||||
subtask_digest: &puzzle.subtask_digest,
|
||||
pora_target: &puzzle.pora_target,
|
||||
loader: &*self.loader,
|
||||
})
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ pub(crate) async fn check_and_request_miner_id(
|
||||
}
|
||||
(None, None) => {
|
||||
let beneficiary = provider.address();
|
||||
let id = request_miner_id(&mine_contract, beneficiary).await?;
|
||||
let id = request_miner_id(config, &mine_contract, beneficiary).await?;
|
||||
set_miner_id(store, &id)
|
||||
.await
|
||||
.map_err(|e| format!("set miner id on db corrupt: {:?}", e))?;
|
||||
@ -86,6 +86,7 @@ async fn check_miner_id(
|
||||
}
|
||||
|
||||
async fn request_miner_id(
|
||||
config: &MinerConfig,
|
||||
mine_contract: &PoraMine<MineServiceMiddleware>,
|
||||
beneficiary: Address,
|
||||
) -> Result<H256, String> {
|
||||
@ -94,16 +95,13 @@ async fn request_miner_id(
|
||||
let submission_call: ContractCall<_, _> =
|
||||
mine_contract.request_miner_id(beneficiary, 0).legacy();
|
||||
|
||||
let pending_tx = submission_call
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Fail to request miner id: {:?}", e))?;
|
||||
|
||||
let receipt = pending_tx
|
||||
.retries(3)
|
||||
.await
|
||||
.map_err(|e| format!("Fail to execute mine answer transaction: {:?}", e))?
|
||||
.ok_or("Request miner id transaction dropped after 3 retries")?;
|
||||
let receipt = contract_wrapper::submit_with_retry(
|
||||
submission_call,
|
||||
&config.submission_config,
|
||||
mine_contract.client().clone(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| format!("Fail to submit miner id request: {:?}", e))?;
|
||||
|
||||
let first_log = receipt
|
||||
.logs
|
||||
|
@ -2,8 +2,9 @@ use super::metrics::*;
|
||||
use crate::recall_range::RecallRange;
|
||||
use crate::{MineRangeConfig, PoraLoader};
|
||||
use blake2::{Blake2b512, Digest};
|
||||
use contract_interface::zgs_flow::MineContext;
|
||||
use contract_interface::pora_mine::MineContext;
|
||||
use ethereum_types::{H256, U256};
|
||||
use ethers::utils::keccak256;
|
||||
use lighthouse_metrics::inc_counter;
|
||||
use storage::log_store::MineLoadChunk;
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
@ -24,7 +25,8 @@ pub(crate) struct Miner<'a> {
|
||||
pub range: RecallRange,
|
||||
pub miner_id: &'a H256,
|
||||
pub context: &'a MineContext,
|
||||
pub target_quality: &'a U256,
|
||||
pub subtask_digest: &'a H256,
|
||||
pub pora_target: &'a U256,
|
||||
pub loader: &'a dyn PoraLoader,
|
||||
pub mine_range_config: &'a MineRangeConfig,
|
||||
}
|
||||
@ -106,11 +108,11 @@ impl<'a> Miner<'a> {
|
||||
.range
|
||||
.difficulty_scale_x64(self.context.flow_length.as_u64());
|
||||
|
||||
if quality <= (self.target_quality / difficulty_scale_x64) << 64 {
|
||||
if quality <= (self.pora_target / difficulty_scale_x64) << 64 {
|
||||
debug!(
|
||||
"Find a PoRA valid answer, quality: {}, target_quality {}, scale {:.3}",
|
||||
"Find a PoRA valid answer, quality: {}, pora_target {}, scale {:.3}",
|
||||
U256::MAX / quality,
|
||||
U256::MAX / self.target_quality,
|
||||
U256::MAX / self.pora_target,
|
||||
difficulty_scale_x64.as_u128() as f64 / u64::MAX as f64
|
||||
);
|
||||
inc_counter(&HIT_COUNT);
|
||||
@ -138,7 +140,7 @@ impl<'a> Miner<'a> {
|
||||
let mut hasher = Blake2b512::new();
|
||||
hasher.update(self.miner_id);
|
||||
hasher.update(nonce);
|
||||
hasher.update(self.context.digest);
|
||||
hasher.update(self.subtask_digest);
|
||||
hasher.update(self.range.digest());
|
||||
hasher.finalize().into()
|
||||
};
|
||||
@ -148,7 +150,11 @@ impl<'a> Miner<'a> {
|
||||
let mut scratch_pad =
|
||||
[[0u8; BLAKE2B_OUTPUT_BYTES]; BYTES_PER_SCRATCHPAD / BLAKE2B_OUTPUT_BYTES];
|
||||
for scratch_pad_cell in scratch_pad.iter_mut() {
|
||||
digest = Blake2b512::new().chain_update(digest).finalize().into();
|
||||
let output0 = keccak256(digest);
|
||||
digest[..32].copy_from_slice(&output0);
|
||||
let output1 = keccak256(digest);
|
||||
digest[32..].copy_from_slice(&output1);
|
||||
|
||||
*scratch_pad_cell = digest;
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ impl Sealer {
|
||||
|
||||
async fn update_flow_length(&mut self) -> Result<()> {
|
||||
let recent_context = self.flow_contract.make_context_with_result().call().await?;
|
||||
debug!(target: "seal", "Recent context is {:?}", recent_context);
|
||||
debug!("Recent context is {:?}", recent_context);
|
||||
|
||||
let recent_flow_length = recent_context.flow_length.as_u64();
|
||||
if self.last_context_flow_length < recent_flow_length {
|
||||
|
@ -46,6 +46,7 @@ impl MineService {
|
||||
msg_recv.resubscribe(),
|
||||
provider.clone(),
|
||||
&config,
|
||||
miner_id,
|
||||
);
|
||||
|
||||
let mine_answer_receiver = PoraService::spawn(
|
||||
|
@ -1,13 +1,11 @@
|
||||
use contract_interface::PoraAnswer;
|
||||
use contract_interface::{PoraMine, ZgsFlow};
|
||||
use ethereum_types::U256;
|
||||
use contract_wrapper::SubmitConfig;
|
||||
use ethers::contract::ContractCall;
|
||||
use ethers::prelude::{Http, Provider, RetryClient};
|
||||
use ethers::providers::PendingTransaction;
|
||||
use hex::ToHex;
|
||||
use shared_types::FlowRangeProof;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use storage::H256;
|
||||
use storage_async::Store;
|
||||
use task_executor::TaskExecutor;
|
||||
@ -19,15 +17,13 @@ use crate::watcher::MineContextMessage;
|
||||
|
||||
use zgs_spec::{BYTES_PER_SEAL, SECTORS_PER_SEAL};
|
||||
|
||||
const SUBMISSION_RETRIES: usize = 15;
|
||||
|
||||
pub struct Submitter {
|
||||
mine_answer_receiver: mpsc::UnboundedReceiver<AnswerWithoutProof>,
|
||||
mine_context_receiver: broadcast::Receiver<MineContextMessage>,
|
||||
mine_contract: PoraMine<MineServiceMiddleware>,
|
||||
flow_contract: ZgsFlow<Provider<RetryClient<Http>>>,
|
||||
default_gas_limit: Option<U256>,
|
||||
store: Arc<Store>,
|
||||
config: SubmitConfig,
|
||||
}
|
||||
|
||||
impl Submitter {
|
||||
@ -41,8 +37,7 @@ impl Submitter {
|
||||
config: &MinerConfig,
|
||||
) {
|
||||
let mine_contract = PoraMine::new(config.mine_address, signing_provider);
|
||||
let flow_contract = ZgsFlow::new(config.flow_address, provider);
|
||||
let default_gas_limit = config.submission_gas;
|
||||
let flow_contract = ZgsFlow::new(config.flow_address, provider.clone());
|
||||
|
||||
let submitter = Submitter {
|
||||
mine_answer_receiver,
|
||||
@ -50,7 +45,7 @@ impl Submitter {
|
||||
mine_contract,
|
||||
flow_contract,
|
||||
store,
|
||||
default_gas_limit,
|
||||
config: config.submission_config,
|
||||
};
|
||||
executor.spawn(
|
||||
async move { Box::pin(submitter.start()).await },
|
||||
@ -134,11 +129,7 @@ impl Submitter {
|
||||
};
|
||||
trace!("submit_answer: answer={:?}", answer);
|
||||
|
||||
let mut submission_call: ContractCall<_, _> = self.mine_contract.submit(answer).legacy();
|
||||
|
||||
if let Some(gas_limit) = self.default_gas_limit {
|
||||
submission_call = submission_call.gas(gas_limit);
|
||||
}
|
||||
let submission_call: ContractCall<_, _> = self.mine_contract.submit(answer).legacy();
|
||||
|
||||
if let Some(calldata) = submission_call.calldata() {
|
||||
debug!(
|
||||
@ -153,27 +144,13 @@ impl Submitter {
|
||||
submission_call.estimate_gas().await
|
||||
);
|
||||
|
||||
let pending_transaction: PendingTransaction<'_, _> = submission_call
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Fail to send PoRA submission transaction: {:?}", e))?;
|
||||
|
||||
debug!(
|
||||
"Signed submission transaction hash: {:?}",
|
||||
pending_transaction.tx_hash()
|
||||
);
|
||||
|
||||
let receipt = pending_transaction
|
||||
.retries(SUBMISSION_RETRIES)
|
||||
.interval(Duration::from_secs(2))
|
||||
.await
|
||||
.map_err(|e| format!("Fail to execute PoRA submission transaction: {:?}", e))?
|
||||
.ok_or(format!(
|
||||
"PoRA submission transaction dropped after {} retries",
|
||||
SUBMISSION_RETRIES
|
||||
))?;
|
||||
|
||||
info!("Submit PoRA success, receipt: {:?}", receipt);
|
||||
contract_wrapper::submit_with_retry(
|
||||
submission_call,
|
||||
&self.config,
|
||||
self.mine_contract.client().clone(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to submit mine answer: {:?}", e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
#![allow(unused)]
|
||||
|
||||
use contract_interface::{zgs_flow::MineContext, PoraMine, ZgsFlow};
|
||||
use contract_interface::{zgs_flow::MineContext, PoraMine, WorkerContext, ZgsFlow};
|
||||
use ethereum_types::{Address, H256, U256};
|
||||
use ethers::{
|
||||
contract::Contract,
|
||||
@ -28,6 +28,8 @@ lazy_static! {
|
||||
H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap();
|
||||
}
|
||||
|
||||
const PORA_VERSION: u64 = 1;
|
||||
|
||||
pub struct MineContextWatcher {
|
||||
provider: Arc<Provider<RetryClient<Http>>>,
|
||||
flow_contract: ZgsFlow<Provider<RetryClient<Http>>>,
|
||||
@ -36,6 +38,7 @@ pub struct MineContextWatcher {
|
||||
mine_context_sender: broadcast::Sender<MineContextMessage>,
|
||||
last_report: MineContextMessage,
|
||||
query_interval: Duration,
|
||||
miner_id: H256,
|
||||
|
||||
msg_recv: broadcast::Receiver<MinerMessage>,
|
||||
}
|
||||
@ -46,6 +49,7 @@ impl MineContextWatcher {
|
||||
msg_recv: broadcast::Receiver<MinerMessage>,
|
||||
provider: Arc<Provider<RetryClient<Http>>>,
|
||||
config: &MinerConfig,
|
||||
miner_id: H256,
|
||||
) -> broadcast::Receiver<MineContextMessage> {
|
||||
let mine_contract = PoraMine::new(config.mine_address, provider.clone());
|
||||
let flow_contract = ZgsFlow::new(config.flow_address, provider.clone());
|
||||
@ -60,6 +64,7 @@ impl MineContextWatcher {
|
||||
msg_recv,
|
||||
last_report: None,
|
||||
query_interval: config.context_query_interval,
|
||||
miner_id,
|
||||
};
|
||||
executor.spawn(
|
||||
async move { Box::pin(watcher.start()).await },
|
||||
@ -105,28 +110,14 @@ impl MineContextWatcher {
|
||||
}
|
||||
|
||||
async fn query_recent_context(&mut self) -> Result<(), String> {
|
||||
let context_call = self.flow_contract.make_context_with_result();
|
||||
let valid_call = self.mine_contract.can_submit();
|
||||
let quality_call = self.mine_contract.pora_target();
|
||||
let shards_call = self.mine_contract.max_shards();
|
||||
|
||||
let (context, can_submit, quality, max_shards) = try_join!(
|
||||
context_call.call(),
|
||||
valid_call.call(),
|
||||
quality_call.call(),
|
||||
shards_call.call()
|
||||
)
|
||||
.map_err(|e| format!("Failed to query mining context: {:?}", e))?;
|
||||
let report = if can_submit && context.digest != EMPTY_HASH.0 {
|
||||
Some(PoraPuzzle::new(context, quality, max_shards))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let report = self.fetch_pora_puzzle().await?;
|
||||
|
||||
if report == self.last_report {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
debug!("Update pora puzzle: {:?}", report);
|
||||
|
||||
self.mine_context_sender
|
||||
.send(report.clone())
|
||||
.map_err(|e| format!("Failed to send out the most recent mine context: {:?}", e))?;
|
||||
@ -134,4 +125,41 @@ impl MineContextWatcher {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn fetch_pora_puzzle(&self) -> Result<Option<PoraPuzzle>, String> {
|
||||
let pora_version = self
|
||||
.mine_contract
|
||||
.pora_version()
|
||||
.call()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to query mining version: {:?}", e))?;
|
||||
|
||||
if pora_version != PORA_VERSION {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let miner_id = self.miner_id.0;
|
||||
let WorkerContext {
|
||||
context,
|
||||
pora_target,
|
||||
subtask_digest,
|
||||
max_shards,
|
||||
} = self
|
||||
.mine_contract
|
||||
.compute_worker_context(miner_id)
|
||||
.call()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to query mining context: {:?}", e))?;
|
||||
|
||||
if pora_target.is_zero() || context.digest == EMPTY_HASH.0 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
Ok(Some(PoraPuzzle::new(
|
||||
context,
|
||||
pora_target,
|
||||
max_shards,
|
||||
H256(subtask_digest),
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
@ -740,7 +740,7 @@ where
|
||||
&error,
|
||||
ConnectionDirection::Outgoing,
|
||||
);
|
||||
// inform failures of requests comming outside the behaviour
|
||||
// inform failures of requests coming outside the behaviour
|
||||
if let RequestId::Application(id) = id {
|
||||
self.add_event(BehaviourEvent::RPCFailed { peer_id, id });
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ pub struct Config {
|
||||
/// Subscribe to all subnets for the duration of the runtime.
|
||||
pub subscribe_all_subnets: bool,
|
||||
|
||||
/// Import/aggregate all attestations recieved on subscribed subnets for the duration of the
|
||||
/// Import/aggregate all attestations received on subscribed subnets for the duration of the
|
||||
/// runtime.
|
||||
pub import_all_attestations: bool,
|
||||
|
||||
|
@ -84,7 +84,7 @@ enum EventStream {
|
||||
InActive,
|
||||
}
|
||||
|
||||
/// The main discovery service. This can be disabled via CLI arguements. When disabled the
|
||||
/// The main discovery service. This can be disabled via CLI arguments. When disabled the
|
||||
/// underlying processes are not started, but this struct still maintains our current ENR.
|
||||
pub struct Discovery {
|
||||
/// A collection of seen live ENRs for quick lookup and to map peer-id's to ENRs.
|
||||
|
@ -134,7 +134,7 @@ impl NetworkBehaviour for PeerManager {
|
||||
BanResult::NotBanned => {}
|
||||
}
|
||||
|
||||
// Count dialing peers in the limit if the peer dialied us.
|
||||
// Count dialing peers in the limit if the peer dialed us.
|
||||
let count_dialing = endpoint.is_listener();
|
||||
// Check the connection limits
|
||||
if self.peer_limit_reached(count_dialing)
|
||||
|
@ -19,7 +19,7 @@ pub struct Client {
|
||||
|
||||
#[derive(Clone, Copy, Debug, Serialize, PartialEq, Eq, AsRefStr, IntoStaticStr, EnumIter)]
|
||||
pub enum ClientKind {
|
||||
/// An Zgs node.
|
||||
/// A Zgs node.
|
||||
Zgs,
|
||||
/// An unknown client.
|
||||
Unknown,
|
||||
|
@ -104,7 +104,7 @@ pub enum Version {
|
||||
V1,
|
||||
}
|
||||
|
||||
/// RPC Encondings supported.
|
||||
/// RPC Encodings supported.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Encoding {
|
||||
SSZSnappy,
|
||||
|
@ -54,7 +54,7 @@ impl Service {
|
||||
struct Ev(PeerManagerEvent);
|
||||
impl From<void::Void> for Ev {
|
||||
fn from(_: void::Void) -> Self {
|
||||
unreachable!("No events are emmited")
|
||||
unreachable!("No events are emitted")
|
||||
}
|
||||
}
|
||||
impl From<PeerManagerEvent> for Ev {
|
||||
|
@ -276,7 +276,7 @@ impl Pruner {
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_shard_config(store: &Store) -> Result<Option<ShardConfig>> {
|
||||
pub async fn get_shard_config(store: &Store) -> Result<Option<ShardConfig>> {
|
||||
store
|
||||
.get_config_decoded(&SHARD_CONFIG_KEY, DATA_DB_KEY)
|
||||
.await
|
||||
|
@ -18,7 +18,6 @@ pub struct RpcServerImpl {
|
||||
|
||||
#[async_trait]
|
||||
impl RpcServer for RpcServerImpl {
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn find_file(&self, tx_seq: u64) -> RpcResult<()> {
|
||||
info!("admin_findFile({tx_seq})");
|
||||
|
||||
@ -39,7 +38,6 @@ impl RpcServer for RpcServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn shutdown(&self) -> RpcResult<()> {
|
||||
info!("admin_shutdown()");
|
||||
|
||||
@ -51,7 +49,6 @@ impl RpcServer for RpcServerImpl {
|
||||
.map_err(|e| error::internal_error(format!("Failed to send shutdown command: {:?}", e)))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn start_sync_file(&self, tx_seq: u64) -> RpcResult<()> {
|
||||
info!("admin_startSyncFile({tx_seq})");
|
||||
|
||||
@ -72,7 +69,6 @@ impl RpcServer for RpcServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn start_sync_chunks(
|
||||
&self,
|
||||
tx_seq: u64,
|
||||
@ -102,7 +98,6 @@ impl RpcServer for RpcServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn terminate_sync(&self, tx_seq: u64) -> RpcResult<bool> {
|
||||
info!("admin_terminateSync({tx_seq})");
|
||||
|
||||
@ -131,7 +126,6 @@ impl RpcServer for RpcServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn get_sync_status(&self, tx_seq: u64) -> RpcResult<String> {
|
||||
info!("admin_getSyncStatus({tx_seq})");
|
||||
|
||||
@ -148,7 +142,6 @@ impl RpcServer for RpcServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn get_sync_info(&self, tx_seq: Option<u64>) -> RpcResult<HashMap<u64, FileSyncInfo>> {
|
||||
info!(?tx_seq, "admin_getSyncInfo()");
|
||||
|
||||
@ -163,7 +156,6 @@ impl RpcServer for RpcServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn get_network_info(&self) -> RpcResult<NetworkInfo> {
|
||||
info!("admin_getNetworkInfo()");
|
||||
|
||||
|
@ -63,7 +63,11 @@ pub trait Rpc {
|
||||
async fn check_file_finalized(&self, tx_seq_or_root: TxSeqOrRoot) -> RpcResult<Option<bool>>;
|
||||
|
||||
#[method(name = "getFileInfo")]
|
||||
async fn get_file_info(&self, data_root: DataRoot) -> RpcResult<Option<FileInfo>>;
|
||||
async fn get_file_info(
|
||||
&self,
|
||||
data_root: DataRoot,
|
||||
need_available: bool,
|
||||
) -> RpcResult<Option<FileInfo>>;
|
||||
|
||||
#[method(name = "getFileInfoByTxSeq")]
|
||||
async fn get_file_info_by_tx_seq(&self, tx_seq: u64) -> RpcResult<Option<FileInfo>>;
|
||||
|
@ -17,7 +17,6 @@ pub struct RpcServerImpl {
|
||||
|
||||
#[async_trait]
|
||||
impl RpcServer for RpcServerImpl {
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn get_status(&self) -> RpcResult<Status> {
|
||||
info!("zgs_getStatus()");
|
||||
let sync_progress = self
|
||||
@ -96,7 +95,7 @@ impl RpcServer for RpcServerImpl {
|
||||
let tx_seq = try_option!(
|
||||
self.ctx
|
||||
.log_store
|
||||
.get_tx_seq_by_data_root(&data_root)
|
||||
.get_tx_seq_by_data_root(&data_root, true)
|
||||
.await?
|
||||
);
|
||||
|
||||
@ -122,7 +121,12 @@ impl RpcServer for RpcServerImpl {
|
||||
) -> RpcResult<Option<SegmentWithProof>> {
|
||||
info!(%data_root, %index, "zgs_downloadSegmentWithProof");
|
||||
|
||||
let tx = try_option!(self.ctx.log_store.get_tx_by_data_root(&data_root).await?);
|
||||
let tx = try_option!(
|
||||
self.ctx
|
||||
.log_store
|
||||
.get_tx_by_data_root(&data_root, true)
|
||||
.await?
|
||||
);
|
||||
|
||||
self.get_segment_with_proof_by_tx(tx, index).await
|
||||
}
|
||||
@ -145,7 +149,12 @@ impl RpcServer for RpcServerImpl {
|
||||
let seq = match tx_seq_or_root {
|
||||
TxSeqOrRoot::TxSeq(v) => v,
|
||||
TxSeqOrRoot::Root(v) => {
|
||||
try_option!(self.ctx.log_store.get_tx_seq_by_data_root(&v).await?)
|
||||
try_option!(
|
||||
self.ctx
|
||||
.log_store
|
||||
.get_tx_seq_by_data_root(&v, false)
|
||||
.await?
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
@ -164,10 +173,19 @@ impl RpcServer for RpcServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_file_info(&self, data_root: DataRoot) -> RpcResult<Option<FileInfo>> {
|
||||
async fn get_file_info(
|
||||
&self,
|
||||
data_root: DataRoot,
|
||||
need_available: bool,
|
||||
) -> RpcResult<Option<FileInfo>> {
|
||||
debug!(%data_root, "zgs_getFileInfo");
|
||||
|
||||
let tx = try_option!(self.ctx.log_store.get_tx_by_data_root(&data_root).await?);
|
||||
let tx = try_option!(
|
||||
self.ctx
|
||||
.log_store
|
||||
.get_tx_by_data_root(&data_root, need_available)
|
||||
.await?
|
||||
);
|
||||
|
||||
Ok(Some(self.get_file_info_by_tx(tx).await?))
|
||||
}
|
||||
@ -289,7 +307,7 @@ impl RpcServerImpl {
|
||||
let maybe_tx = self
|
||||
.ctx
|
||||
.log_store
|
||||
.get_tx_by_data_root(&segment.root)
|
||||
.get_tx_by_data_root(&segment.root, false)
|
||||
.await?;
|
||||
|
||||
self.put_segment_with_maybe_tx(segment, maybe_tx).await
|
||||
|
@ -7,7 +7,7 @@ use network::{
|
||||
self, new_network_channel, Keypair, NetworkConfig, NetworkGlobals, NetworkReceiver,
|
||||
NetworkSender, RequestId, Service as LibP2PService,
|
||||
};
|
||||
use pruner::{Pruner, PrunerConfig, PrunerMessage};
|
||||
use pruner::{get_shard_config, Pruner, PrunerConfig, PrunerMessage};
|
||||
use router::RouterService;
|
||||
use rpc::RPCConfig;
|
||||
use std::sync::Arc;
|
||||
@ -203,7 +203,7 @@ impl ClientBuilder {
|
||||
if let Some(config) = config {
|
||||
let executor = require!("miner", self, runtime_context).clone().executor;
|
||||
let network_send = require!("miner", self, network).send.clone();
|
||||
let store = self.async_store.as_ref().unwrap().clone();
|
||||
let store = require!("miner", self, async_store).clone();
|
||||
|
||||
let send = MineService::spawn(executor, network_send, config, store).await?;
|
||||
self.miner = Some(MinerComponents { send });
|
||||
@ -225,7 +225,11 @@ impl ClientBuilder {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
pub async fn with_shard(self, config: ShardConfig) -> Result<Self, String> {
|
||||
pub async fn with_shard(self, mut config: ShardConfig) -> Result<Self, String> {
|
||||
let store = require!("shard", self, async_store).clone();
|
||||
if let Some(stored_config) = get_shard_config(store.as_ref()).await.unwrap_or(None) {
|
||||
config = stored_config;
|
||||
}
|
||||
self.async_store
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
|
@ -1,4 +1,4 @@
|
||||
//! This crate aims to provide a common set of tools that can be used to create a "environment" to
|
||||
//! This crate aims to provide a common set of tools that can be used to create an "environment" to
|
||||
//! run Zgs services. This allows for the unification of creating tokio runtimes, etc.
|
||||
//!
|
||||
//! The idea is that the main thread creates an `Environment`, which is then used to spawn a
|
||||
|
@ -1,7 +1,7 @@
|
||||
#![allow(clippy::field_reassign_with_default)]
|
||||
|
||||
use crate::ZgsConfig;
|
||||
use ethereum_types::{H256, U256};
|
||||
use ethereum_types::H256;
|
||||
use ethers::prelude::{Http, Middleware, Provider};
|
||||
use log_entry_sync::{CacheConfig, ContractAddress, LogSyncConfig};
|
||||
use miner::MinerConfig;
|
||||
@ -179,7 +179,6 @@ impl ZgsConfig {
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let submission_gas = self.miner_submission_gas.map(U256::from);
|
||||
let cpu_percentage = self.miner_cpu_percentage;
|
||||
let iter_batch = self.mine_iter_batch_size;
|
||||
let context_query_seconds = self.mine_context_query_seconds;
|
||||
@ -192,7 +191,6 @@ impl ZgsConfig {
|
||||
self.blockchain_rpc_endpoint.clone(),
|
||||
mine_address,
|
||||
flow_address,
|
||||
submission_gas,
|
||||
cpu_percentage,
|
||||
iter_batch,
|
||||
context_query_seconds,
|
||||
@ -200,6 +198,7 @@ impl ZgsConfig {
|
||||
self.rate_limit_retries,
|
||||
self.timeout_retries,
|
||||
self.initial_backoff,
|
||||
self.submission_config,
|
||||
))
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,6 @@ build_config! {
|
||||
(mine_contract_address, (String), "".to_string())
|
||||
(miner_id, (Option<String>), None)
|
||||
(miner_key, (Option<String>), None)
|
||||
(miner_submission_gas, (Option<u64>), None)
|
||||
(miner_cpu_percentage, (u64), 100)
|
||||
(mine_iter_batch_size, (usize), 100)
|
||||
(reward_contract_address, (String), "".to_string())
|
||||
@ -106,6 +105,9 @@ pub struct ZgsConfig {
|
||||
// rpc config, configured by [rpc] section by `config` crate.
|
||||
pub rpc: rpc::RPCConfig,
|
||||
|
||||
// submission config, configured by [submission_config] section by `config` crate.
|
||||
pub submission_config: contract_wrapper::SubmitConfig,
|
||||
|
||||
// metrics config, configured by [metrics] section by `config` crate.
|
||||
pub metrics: metrics::MetricsConfiguration,
|
||||
}
|
||||
|
@ -1,7 +1,8 @@
|
||||
use task_executor::TaskExecutor;
|
||||
use tracing::Level;
|
||||
use tracing_log::AsLog;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
use tracing_subscriber::{EnvFilter, Layer};
|
||||
|
||||
const LOG_RELOAD_PERIOD_SEC: u64 = 30;
|
||||
|
||||
@ -15,19 +16,26 @@ pub fn configure(log_level_file: &str, log_directory: &str, executor: TaskExecut
|
||||
.unwrap_or_default()
|
||||
.trim_end()
|
||||
.to_string();
|
||||
let filter = EnvFilter::try_new(config.clone()).expect("invalid log level");
|
||||
let (filter, reload_handle) = tracing_subscriber::reload::Layer::new(filter);
|
||||
|
||||
let builder = tracing_subscriber::fmt()
|
||||
.with_max_level(Level::TRACE)
|
||||
.with_env_filter(EnvFilter::try_new(config.clone()).expect("invalid log level"))
|
||||
let fmt_layer = tracing_subscriber::fmt::layer()
|
||||
.with_writer(non_blocking)
|
||||
.with_ansi(false)
|
||||
// .with_file(true)
|
||||
// .with_line_number(true)
|
||||
// .with_thread_names(true)
|
||||
.with_filter_reloading();
|
||||
|
||||
let handle = builder.reload_handle();
|
||||
builder.init();
|
||||
.compact()
|
||||
.with_filter(filter);
|
||||
// .with_file(true)
|
||||
// .with_line_number(true)
|
||||
// .with_thread_names(true)
|
||||
let subscriber = tracing_subscriber::registry().with(fmt_layer);
|
||||
#[cfg(feature = "tokio-console")]
|
||||
{
|
||||
subscriber.with(console_subscriber::spawn()).init();
|
||||
}
|
||||
#[cfg(not(feature = "tokio-console"))]
|
||||
{
|
||||
subscriber.init();
|
||||
}
|
||||
|
||||
// periodically check for config changes
|
||||
executor.spawn(
|
||||
@ -57,7 +65,7 @@ pub fn configure(log_level_file: &str, log_directory: &str, executor: TaskExecut
|
||||
|
||||
println!("Updating log config to {:?}", new_config);
|
||||
|
||||
match handle.reload(&new_config) {
|
||||
match reload_handle.reload(&new_config) {
|
||||
Ok(()) => {
|
||||
rust_log::set_max_level(tracing_core::LevelFilter::current().as_log());
|
||||
config = new_config
|
||||
|
@ -23,6 +23,8 @@ async fn start_node(context: RuntimeContext, config: ZgsConfig) -> Result<Client
|
||||
ClientBuilder::default()
|
||||
.with_runtime_context(context)
|
||||
.with_rocksdb_store(&storage_config)?
|
||||
.with_shard(shard_config)
|
||||
.await?
|
||||
.with_log_sync(log_sync_config)
|
||||
.await?
|
||||
.with_file_location_cache(config.file_location_cache)
|
||||
@ -34,8 +36,6 @@ async fn start_node(context: RuntimeContext, config: ZgsConfig) -> Result<Client
|
||||
.await?
|
||||
.with_miner(miner_config)
|
||||
.await?
|
||||
.with_shard(shard_config)
|
||||
.await?
|
||||
.with_pruner(pruner_config)
|
||||
.await?
|
||||
.with_rpc(config.rpc)
|
||||
|
@ -59,15 +59,23 @@ impl Store {
|
||||
delegate!(fn get_proof_at_root(root: Option<DataRoot>, index: u64, length: u64) -> Result<FlowRangeProof>);
|
||||
delegate!(fn get_context() -> Result<(DataRoot, u64)>);
|
||||
|
||||
pub async fn get_tx_seq_by_data_root(&self, data_root: &DataRoot) -> Result<Option<u64>> {
|
||||
pub async fn get_tx_seq_by_data_root(
|
||||
&self,
|
||||
data_root: &DataRoot,
|
||||
need_available: bool,
|
||||
) -> Result<Option<u64>> {
|
||||
let root = *data_root;
|
||||
self.spawn(move |store| store.get_tx_seq_by_data_root(&root))
|
||||
self.spawn(move |store| store.get_tx_seq_by_data_root(&root, need_available))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_tx_by_data_root(&self, data_root: &DataRoot) -> Result<Option<Transaction>> {
|
||||
pub async fn get_tx_by_data_root(
|
||||
&self,
|
||||
data_root: &DataRoot,
|
||||
need_available: bool,
|
||||
) -> Result<Option<Transaction>> {
|
||||
let root = *data_root;
|
||||
self.spawn(move |store| store.get_tx_by_data_root(&root))
|
||||
self.spawn(move |store| store.get_tx_by_data_root(&root, need_available))
|
||||
.await
|
||||
}
|
||||
|
||||
|
@ -511,7 +511,7 @@ impl LogStoreChunkRead for LogManager {
|
||||
index_start: usize,
|
||||
index_end: usize,
|
||||
) -> crate::error::Result<Option<ChunkArray>> {
|
||||
let tx_seq = try_option!(self.get_tx_seq_by_data_root(data_root)?);
|
||||
let tx_seq = try_option!(self.get_tx_seq_by_data_root(data_root, true)?);
|
||||
self.get_chunks_by_tx_and_index_range(tx_seq, index_start, index_end)
|
||||
}
|
||||
|
||||
@ -536,13 +536,27 @@ impl LogStoreRead for LogManager {
|
||||
self.tx_store.get_tx_by_seq_number(seq)
|
||||
}
|
||||
|
||||
fn get_tx_seq_by_data_root(&self, data_root: &DataRoot) -> crate::error::Result<Option<u64>> {
|
||||
fn get_tx_seq_by_data_root(
|
||||
&self,
|
||||
data_root: &DataRoot,
|
||||
need_available: bool,
|
||||
) -> crate::error::Result<Option<u64>> {
|
||||
let seq_list = self.tx_store.get_tx_seq_list_by_data_root(data_root)?;
|
||||
let mut available_seq = None;
|
||||
for tx_seq in &seq_list {
|
||||
if self.tx_store.check_tx_completed(*tx_seq)? {
|
||||
// Return the first finalized tx if possible.
|
||||
return Ok(Some(*tx_seq));
|
||||
}
|
||||
if need_available
|
||||
&& available_seq.is_none()
|
||||
&& !self.tx_store.check_tx_pruned(*tx_seq)?
|
||||
{
|
||||
available_seq = Some(*tx_seq);
|
||||
}
|
||||
}
|
||||
if need_available {
|
||||
return Ok(available_seq);
|
||||
}
|
||||
// No tx is finalized, return the first one.
|
||||
Ok(seq_list.first().cloned())
|
||||
@ -1157,6 +1171,7 @@ impl LogManager {
|
||||
.get_tx_by_seq_number(from_tx_seq)?
|
||||
.ok_or_else(|| anyhow!("from tx missing"))?;
|
||||
let mut to_tx_offset_list = Vec::with_capacity(to_tx_seq_list.len());
|
||||
|
||||
for seq in to_tx_seq_list {
|
||||
// No need to copy data for completed tx.
|
||||
if self.check_tx_completed(seq)? {
|
||||
|
@ -31,14 +31,22 @@ pub trait LogStoreRead: LogStoreChunkRead {
|
||||
fn get_tx_by_seq_number(&self, seq: u64) -> Result<Option<Transaction>>;
|
||||
|
||||
/// Get a transaction by the data root of its data.
|
||||
/// If all txs are not finalized, return the first one.
|
||||
/// If all txs are not finalized, return the first one if need available is false.
|
||||
/// Otherwise, return the first finalized tx.
|
||||
fn get_tx_seq_by_data_root(&self, data_root: &DataRoot) -> Result<Option<u64>>;
|
||||
fn get_tx_seq_by_data_root(
|
||||
&self,
|
||||
data_root: &DataRoot,
|
||||
need_available: bool,
|
||||
) -> Result<Option<u64>>;
|
||||
|
||||
/// If all txs are not finalized, return the first one.
|
||||
/// If all txs are not finalized, return the first one if need available is false.
|
||||
/// Otherwise, return the first finalized tx.
|
||||
fn get_tx_by_data_root(&self, data_root: &DataRoot) -> Result<Option<Transaction>> {
|
||||
match self.get_tx_seq_by_data_root(data_root)? {
|
||||
fn get_tx_by_data_root(
|
||||
&self,
|
||||
data_root: &DataRoot,
|
||||
need_available: bool,
|
||||
) -> Result<Option<Transaction>> {
|
||||
match self.get_tx_seq_by_data_root(data_root, need_available)? {
|
||||
Some(seq) => self.get_tx_by_seq_number(seq),
|
||||
None => Ok(None),
|
||||
}
|
||||
|
@ -1,4 +1,7 @@
|
||||
use crate::{controllers::SyncState, SyncRequest, SyncResponse, SyncSender};
|
||||
use crate::{
|
||||
controllers::{FailureReason, SyncState},
|
||||
SyncRequest, SyncResponse, SyncSender,
|
||||
};
|
||||
use anyhow::{bail, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::HashSet, fmt::Debug, sync::Arc, time::Duration};
|
||||
@ -126,7 +129,10 @@ impl Batcher {
|
||||
"Failed to sync file and terminate the failed file sync"
|
||||
);
|
||||
self.terminate_file_sync(tx_seq, false).await;
|
||||
Ok(Some(SyncResult::Failed))
|
||||
match reason {
|
||||
FailureReason::TimeoutFindFile => Ok(Some(SyncResult::Timeout)),
|
||||
_ => Ok(Some(SyncResult::Failed)),
|
||||
}
|
||||
}
|
||||
|
||||
// finding peers timeout
|
||||
|
@ -1,6 +1,6 @@
|
||||
use super::{batcher::Batcher, sync_store::SyncStore};
|
||||
use super::{batcher::Batcher, metrics::RandomBatcherMetrics, sync_store::SyncStore};
|
||||
use crate::{
|
||||
auto_sync::{batcher::SyncResult, metrics, sync_store::Queue},
|
||||
auto_sync::{batcher::SyncResult, sync_store::Queue},
|
||||
Config, SyncSender,
|
||||
};
|
||||
use anyhow::Result;
|
||||
@ -19,6 +19,7 @@ pub struct RandomBatcherState {
|
||||
pub tasks: Vec<u64>,
|
||||
pub pending_txs: usize,
|
||||
pub ready_txs: usize,
|
||||
pub cached_ready_txs: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -27,6 +28,7 @@ pub struct RandomBatcher {
|
||||
config: Config,
|
||||
batcher: Batcher,
|
||||
sync_store: Arc<SyncStore>,
|
||||
metrics: Arc<RandomBatcherMetrics>,
|
||||
}
|
||||
|
||||
impl RandomBatcher {
|
||||
@ -36,6 +38,7 @@ impl RandomBatcher {
|
||||
store: Store,
|
||||
sync_send: SyncSender,
|
||||
sync_store: Arc<SyncStore>,
|
||||
metrics: Arc<RandomBatcherMetrics>,
|
||||
) -> Self {
|
||||
Self {
|
||||
name,
|
||||
@ -47,17 +50,19 @@ impl RandomBatcher {
|
||||
sync_send,
|
||||
),
|
||||
sync_store,
|
||||
metrics,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_state(&self) -> Result<RandomBatcherState> {
|
||||
let (pending_txs, ready_txs) = self.sync_store.stat().await?;
|
||||
let (pending_txs, ready_txs, cached_ready_txs) = self.sync_store.stat().await?;
|
||||
|
||||
Ok(RandomBatcherState {
|
||||
name: self.name.clone(),
|
||||
tasks: self.batcher.tasks().await,
|
||||
pending_txs,
|
||||
ready_txs,
|
||||
cached_ready_txs,
|
||||
})
|
||||
}
|
||||
|
||||
@ -71,11 +76,10 @@ impl RandomBatcher {
|
||||
}
|
||||
|
||||
loop {
|
||||
// if let Ok(state) = self.get_state().await {
|
||||
// metrics::RANDOM_STATE_TXS_SYNCING.update(state.tasks.len() as u64);
|
||||
// metrics::RANDOM_STATE_TXS_READY.update(state.ready_txs as u64);
|
||||
// metrics::RANDOM_STATE_TXS_PENDING.update(state.pending_txs as u64);
|
||||
// }
|
||||
if let Ok(state) = self.get_state().await {
|
||||
self.metrics
|
||||
.update_state(state.ready_txs, state.pending_txs);
|
||||
}
|
||||
|
||||
match self.sync_once().await {
|
||||
Ok(true) => {}
|
||||
@ -106,11 +110,7 @@ impl RandomBatcher {
|
||||
};
|
||||
|
||||
debug!(%tx_seq, ?sync_result, "Completed to sync file, state = {:?}", self.get_state().await);
|
||||
match sync_result {
|
||||
SyncResult::Completed => metrics::RANDOM_SYNC_RESULT_COMPLETED.mark(1),
|
||||
SyncResult::Failed => metrics::RANDOM_SYNC_RESULT_FAILED.inc(1),
|
||||
SyncResult::Timeout => metrics::RANDOM_SYNC_RESULT_TIMEOUT.inc(1),
|
||||
}
|
||||
self.metrics.update_result(sync_result);
|
||||
|
||||
if matches!(sync_result, SyncResult::Completed) {
|
||||
self.sync_store.remove(tx_seq).await?;
|
||||
|
@ -45,7 +45,7 @@ impl HistoricalTxWriter {
|
||||
}
|
||||
|
||||
pub async fn get_state(&self) -> Result<HistoricalTxWriterState> {
|
||||
let (pending_txs, ready_txs) = self.sync_store.stat().await?;
|
||||
let (pending_txs, ready_txs, _) = self.sync_store.stat().await?;
|
||||
|
||||
Ok(HistoricalTxWriterState {
|
||||
next_tx_seq: self.next_tx_seq.load(Ordering::Relaxed),
|
||||
|
@ -19,6 +19,7 @@ use super::{
|
||||
batcher_random::RandomBatcher,
|
||||
batcher_serial::SerialBatcher,
|
||||
historical_tx_writer::HistoricalTxWriter,
|
||||
metrics,
|
||||
sync_store::{Queue, SyncStore},
|
||||
};
|
||||
|
||||
@ -45,11 +46,12 @@ impl AutoSyncManager {
|
||||
// use v2 db to avoid reading v1 files that announced from the whole network instead of neighbors
|
||||
Arc::new(SyncStore::new_with_name(
|
||||
store.clone(),
|
||||
config.ready_txs_cache_cap,
|
||||
"pendingv2",
|
||||
"readyv2",
|
||||
))
|
||||
} else {
|
||||
Arc::new(SyncStore::new(store.clone()))
|
||||
Arc::new(SyncStore::new(store.clone(), 0))
|
||||
};
|
||||
let catched_up = Arc::new(AtomicBool::new(false));
|
||||
|
||||
@ -83,6 +85,7 @@ impl AutoSyncManager {
|
||||
store.clone(),
|
||||
sync_send.clone(),
|
||||
sync_store,
|
||||
metrics::RANDOM_ANNOUNCED.clone(),
|
||||
);
|
||||
executor.spawn(random.clone().start(catched_up.clone()), "auto_sync_random");
|
||||
|
||||
@ -96,6 +99,7 @@ impl AutoSyncManager {
|
||||
if config.neighbors_only {
|
||||
let historical_sync_store = Arc::new(SyncStore::new_with_name(
|
||||
store.clone(),
|
||||
0,
|
||||
"pendingv2_historical",
|
||||
"readyv2_historical",
|
||||
));
|
||||
@ -111,6 +115,7 @@ impl AutoSyncManager {
|
||||
store,
|
||||
sync_send,
|
||||
historical_sync_store,
|
||||
metrics::RANDOM_HISTORICAL.clone(),
|
||||
);
|
||||
executor.spawn(
|
||||
random_historical.start(catched_up.clone()),
|
||||
|
@ -1,6 +1,46 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use metrics::{register_meter, Counter, CounterUsize, Gauge, GaugeUsize, Histogram, Meter, Sample};
|
||||
use metrics::{
|
||||
register_meter, register_meter_with_group, Counter, CounterUsize, Gauge, GaugeUsize, Histogram,
|
||||
Meter, Sample,
|
||||
};
|
||||
|
||||
use super::batcher::SyncResult;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RandomBatcherMetrics {
|
||||
pub ready_txs: Arc<dyn Gauge<usize>>,
|
||||
pub pending_txs: Arc<dyn Gauge<usize>>,
|
||||
|
||||
pub completed_qps: Arc<dyn Meter>,
|
||||
pub failed_qps: Arc<dyn Meter>,
|
||||
pub timeout_qps: Arc<dyn Meter>,
|
||||
}
|
||||
|
||||
impl RandomBatcherMetrics {
|
||||
pub fn new(group_name: &str) -> Self {
|
||||
Self {
|
||||
ready_txs: GaugeUsize::register_with_group(group_name, "ready_txs"),
|
||||
pending_txs: GaugeUsize::register_with_group(group_name, "pending_txs"),
|
||||
completed_qps: register_meter_with_group(group_name, "completed_qps"),
|
||||
failed_qps: register_meter_with_group(group_name, "failed_qps"),
|
||||
timeout_qps: register_meter_with_group(group_name, "timeout_qps"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_state(&self, ready_txs: usize, pending_txs: usize) {
|
||||
self.ready_txs.update(ready_txs);
|
||||
self.pending_txs.update(pending_txs);
|
||||
}
|
||||
|
||||
pub fn update_result(&self, result: SyncResult) {
|
||||
match result {
|
||||
SyncResult::Completed => self.completed_qps.mark(1),
|
||||
SyncResult::Failed => self.failed_qps.mark(1),
|
||||
SyncResult::Timeout => self.timeout_qps.mark(1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
// sequential auto sync
|
||||
@ -14,11 +54,6 @@ lazy_static::lazy_static! {
|
||||
pub static ref SEQUENTIAL_SYNC_RESULT_TIMEOUT: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_sequential_sync_result_timeout");
|
||||
|
||||
// random auto sync
|
||||
// pub static ref RANDOM_STATE_TXS_SYNCING: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_syncing", 1024);
|
||||
// pub static ref RANDOM_STATE_TXS_READY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_ready", 1024);
|
||||
// pub static ref RANDOM_STATE_TXS_PENDING: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_pending", 1024);
|
||||
|
||||
pub static ref RANDOM_SYNC_RESULT_COMPLETED: Arc<dyn Meter> = register_meter("sync_auto_random_sync_result_completed");
|
||||
pub static ref RANDOM_SYNC_RESULT_FAILED: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_random_sync_result_failed");
|
||||
pub static ref RANDOM_SYNC_RESULT_TIMEOUT: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_random_sync_result_timeout");
|
||||
pub static ref RANDOM_ANNOUNCED: Arc<RandomBatcherMetrics> = Arc::new(RandomBatcherMetrics::new("sync_auto_random_announced"));
|
||||
pub static ref RANDOM_HISTORICAL: Arc<RandomBatcherMetrics> = Arc::new(RandomBatcherMetrics::new("sync_auto_random_historical"));
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
use super::tx_store::TxStore;
|
||||
use super::tx_store::{CachedTxStore, TxStore};
|
||||
use anyhow::Result;
|
||||
use std::sync::Arc;
|
||||
use storage::log_store::{
|
||||
@ -33,35 +33,36 @@ pub struct SyncStore {
|
||||
|
||||
/// Ready transactions to sync with high priority since announcement
|
||||
/// already received from other peers.
|
||||
ready_txs: TxStore,
|
||||
ready_txs: CachedTxStore,
|
||||
}
|
||||
|
||||
impl SyncStore {
|
||||
pub fn new(store: Store) -> Self {
|
||||
Self {
|
||||
store: Arc::new(RwLock::new(store)),
|
||||
pending_txs: TxStore::new("pending"),
|
||||
ready_txs: TxStore::new("ready"),
|
||||
}
|
||||
pub fn new(store: Store, ready_txs_cache_cap: usize) -> Self {
|
||||
Self::new_with_name(store, ready_txs_cache_cap, "pending", "ready")
|
||||
}
|
||||
|
||||
pub fn new_with_name(store: Store, pending: &'static str, ready: &'static str) -> Self {
|
||||
pub fn new_with_name(
|
||||
store: Store,
|
||||
ready_txs_cache_cap: usize,
|
||||
pending: &'static str,
|
||||
ready: &'static str,
|
||||
) -> Self {
|
||||
Self {
|
||||
store: Arc::new(RwLock::new(store)),
|
||||
pending_txs: TxStore::new(pending),
|
||||
ready_txs: TxStore::new(ready),
|
||||
ready_txs: CachedTxStore::new(ready, ready_txs_cache_cap),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of pending txs and ready txs.
|
||||
pub async fn stat(&self) -> Result<(usize, usize)> {
|
||||
pub async fn stat(&self) -> Result<(usize, usize, usize)> {
|
||||
let async_store = self.store.read().await;
|
||||
let store = async_store.get_store();
|
||||
|
||||
let num_pending_txs = self.pending_txs.count(store)?;
|
||||
let num_ready_txs = self.ready_txs.count(store)?;
|
||||
let (num_ready_txs, num_cached_ready_txs) = self.ready_txs.count(store).await?;
|
||||
|
||||
Ok((num_pending_txs, num_ready_txs))
|
||||
Ok((num_pending_txs, num_ready_txs, num_cached_ready_txs))
|
||||
}
|
||||
|
||||
pub async fn get_tx_seq_range(&self) -> Result<(Option<u64>, Option<u64>)> {
|
||||
@ -112,7 +113,7 @@ impl SyncStore {
|
||||
|
||||
match queue {
|
||||
Queue::Ready => {
|
||||
if !self.ready_txs.add(store, Some(&mut tx), tx_seq)? {
|
||||
if !self.ready_txs.add(store, Some(&mut tx), tx_seq).await? {
|
||||
return Ok(InsertResult::AlreadyExists);
|
||||
}
|
||||
|
||||
@ -130,7 +131,7 @@ impl SyncStore {
|
||||
return Ok(InsertResult::AlreadyExists);
|
||||
}
|
||||
|
||||
let removed = self.ready_txs.remove(store, Some(&mut tx), tx_seq)?;
|
||||
let removed = self.ready_txs.remove(store, Some(&mut tx), tx_seq).await?;
|
||||
store.exec_configs(tx, DATA_DB_KEY)?;
|
||||
|
||||
if removed {
|
||||
@ -152,7 +153,7 @@ impl SyncStore {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let added = self.ready_txs.add(store, Some(&mut tx), tx_seq)?;
|
||||
let added = self.ready_txs.add(store, Some(&mut tx), tx_seq).await?;
|
||||
|
||||
store.exec_configs(tx, DATA_DB_KEY)?;
|
||||
|
||||
@ -164,7 +165,7 @@ impl SyncStore {
|
||||
let store = async_store.get_store();
|
||||
|
||||
// try to find a tx in ready queue with high priority
|
||||
if let Some(val) = self.ready_txs.random(store)? {
|
||||
if let Some(val) = self.ready_txs.random(store).await? {
|
||||
return Ok(Some(val));
|
||||
}
|
||||
|
||||
@ -177,7 +178,7 @@ impl SyncStore {
|
||||
let store = async_store.get_store();
|
||||
|
||||
// removed in ready queue
|
||||
if self.ready_txs.remove(store, None, tx_seq)? {
|
||||
if self.ready_txs.remove(store, None, tx_seq).await? {
|
||||
return Ok(Some(Queue::Ready));
|
||||
}
|
||||
|
||||
@ -199,7 +200,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_tx_seq_range() {
|
||||
let runtime = TestStoreRuntime::default();
|
||||
let store = SyncStore::new(runtime.store.clone());
|
||||
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||
|
||||
// check values by default
|
||||
assert_eq!(store.get_tx_seq_range().await.unwrap(), (None, None));
|
||||
@ -215,7 +216,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_insert() {
|
||||
let runtime = TestStoreRuntime::default();
|
||||
let store = SyncStore::new(runtime.store.clone());
|
||||
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||
|
||||
assert_eq!(store.contains(1).await.unwrap(), None);
|
||||
assert_eq!(store.insert(1, Pending).await.unwrap(), NewAdded);
|
||||
@ -234,7 +235,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_upgrade() {
|
||||
let runtime = TestStoreRuntime::default();
|
||||
let store = SyncStore::new(runtime.store.clone());
|
||||
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||
|
||||
// cannot upgrade by default
|
||||
assert!(!store.upgrade(3).await.unwrap());
|
||||
@ -253,7 +254,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_random() {
|
||||
let runtime = TestStoreRuntime::default();
|
||||
let store = SyncStore::new(runtime.store.clone());
|
||||
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||
|
||||
// no tx by default
|
||||
assert_eq!(store.random().await.unwrap(), None);
|
||||
@ -273,7 +274,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_remove() {
|
||||
let runtime = TestStoreRuntime::default();
|
||||
let store = SyncStore::new(runtime.store.clone());
|
||||
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||
|
||||
// cannot remove by default
|
||||
assert_eq!(store.remove(1).await.unwrap(), None);
|
||||
|
@ -1,8 +1,12 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::Result;
|
||||
use rand::seq::IteratorRandom;
|
||||
use rand::Rng;
|
||||
use storage::log_store::config::{ConfigTx, ConfigurableExt};
|
||||
use storage::log_store::log_manager::DATA_DB_KEY;
|
||||
use storage::log_store::Store;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
/// TxStore is used to store pending transactions that to be synchronized in advance.
|
||||
///
|
||||
@ -138,6 +142,99 @@ impl TxStore {
|
||||
}
|
||||
}
|
||||
|
||||
/// Cache the recent inserted tx in memory for random pick with priority.
|
||||
pub struct CachedTxStore {
|
||||
tx_store: TxStore,
|
||||
cache_cap: usize,
|
||||
cache: RwLock<HashSet<u64>>,
|
||||
}
|
||||
|
||||
impl CachedTxStore {
|
||||
pub fn new(name: &'static str, cache_cap: usize) -> Self {
|
||||
Self {
|
||||
tx_store: TxStore::new(name),
|
||||
cache_cap,
|
||||
cache: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has(&self, store: &dyn Store, tx_seq: u64) -> Result<bool> {
|
||||
self.tx_store.has(store, tx_seq)
|
||||
}
|
||||
|
||||
pub async fn count(&self, store: &dyn Store) -> Result<(usize, usize)> {
|
||||
if self.cache_cap == 0 {
|
||||
return Ok((self.tx_store.count(store)?, 0));
|
||||
}
|
||||
|
||||
let cache = self.cache.read().await;
|
||||
|
||||
Ok((self.tx_store.count(store)?, cache.len()))
|
||||
}
|
||||
|
||||
pub async fn add(
|
||||
&self,
|
||||
store: &dyn Store,
|
||||
db_tx: Option<&mut ConfigTx>,
|
||||
tx_seq: u64,
|
||||
) -> Result<bool> {
|
||||
if self.cache_cap == 0 {
|
||||
return self.tx_store.add(store, db_tx, tx_seq);
|
||||
}
|
||||
|
||||
let mut cache = self.cache.write().await;
|
||||
|
||||
let added = self.tx_store.add(store, db_tx, tx_seq)?;
|
||||
|
||||
if added {
|
||||
cache.insert(tx_seq);
|
||||
|
||||
if cache.len() > self.cache_cap {
|
||||
if let Some(popped) = cache.iter().choose(&mut rand::thread_rng()).cloned() {
|
||||
cache.remove(&popped);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(added)
|
||||
}
|
||||
|
||||
pub async fn random(&self, store: &dyn Store) -> Result<Option<u64>> {
|
||||
if self.cache_cap == 0 {
|
||||
return self.tx_store.random(store);
|
||||
}
|
||||
|
||||
let cache = self.cache.read().await;
|
||||
|
||||
if let Some(v) = cache.iter().choose(&mut rand::thread_rng()).cloned() {
|
||||
return Ok(Some(v));
|
||||
}
|
||||
|
||||
self.tx_store.random(store)
|
||||
}
|
||||
|
||||
pub async fn remove(
|
||||
&self,
|
||||
store: &dyn Store,
|
||||
db_tx: Option<&mut ConfigTx>,
|
||||
tx_seq: u64,
|
||||
) -> Result<bool> {
|
||||
if self.cache_cap == 0 {
|
||||
return self.tx_store.remove(store, db_tx, tx_seq);
|
||||
}
|
||||
|
||||
let mut cache: tokio::sync::RwLockWriteGuard<'_, HashSet<u64>> = self.cache.write().await;
|
||||
|
||||
let removed = self.tx_store.remove(store, db_tx, tx_seq)?;
|
||||
|
||||
if removed {
|
||||
cache.remove(&tx_seq);
|
||||
}
|
||||
|
||||
Ok(removed)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::test_util::tests::TestStoreRuntime;
|
||||
|
@ -62,6 +62,7 @@ pub struct Config {
|
||||
pub sequential_find_peer_timeout: Duration,
|
||||
#[serde(deserialize_with = "deserialize_duration")]
|
||||
pub random_find_peer_timeout: Duration,
|
||||
pub ready_txs_cache_cap: usize,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@ -69,18 +70,18 @@ impl Default for Config {
|
||||
Self {
|
||||
// sync service config
|
||||
neighbors_only: true,
|
||||
heartbeat_interval: Duration::from_secs(5),
|
||||
heartbeat_interval: Duration::from_secs(3),
|
||||
auto_sync_enabled: false,
|
||||
max_sync_files: 8,
|
||||
max_sync_files: 16,
|
||||
sync_file_by_rpc_enabled: true,
|
||||
sync_file_on_announcement_enabled: false,
|
||||
|
||||
// serial sync config
|
||||
max_chunks_to_request: 2 * 1024,
|
||||
max_request_failures: 5,
|
||||
max_request_failures: 3,
|
||||
peer_connect_timeout: Duration::from_secs(15),
|
||||
peer_disconnect_timeout: Duration::from_secs(15),
|
||||
peer_find_timeout: Duration::from_secs(120),
|
||||
peer_find_timeout: Duration::from_secs(5),
|
||||
peer_chunks_download_timeout: Duration::from_secs(15),
|
||||
peer_wait_outgoing_connection_timeout: Duration::from_secs(10),
|
||||
peer_next_chunks_request_wait_timeout: Duration::from_secs(3),
|
||||
@ -91,9 +92,10 @@ impl Default for Config {
|
||||
auto_sync_idle_interval: Duration::from_secs(3),
|
||||
auto_sync_error_interval: Duration::from_secs(10),
|
||||
max_sequential_workers: 0,
|
||||
max_random_workers: 2,
|
||||
sequential_find_peer_timeout: Duration::from_secs(60),
|
||||
random_find_peer_timeout: Duration::from_secs(500),
|
||||
max_random_workers: 8,
|
||||
sequential_find_peer_timeout: Duration::from_secs(5),
|
||||
random_find_peer_timeout: Duration::from_secs(5),
|
||||
ready_txs_cache_cap: 1_000_000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -233,13 +233,13 @@ batcher_announcement_capacity = 100
|
||||
auto_sync_enabled = true
|
||||
|
||||
# Maximum number of files in sync from other peers simultaneously.
|
||||
# max_sync_files = 8
|
||||
# max_sync_files = 16
|
||||
|
||||
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
|
||||
# sync_file_by_rpc_enabled = true
|
||||
|
||||
# Maximum number of continous failures to terminate a file sync.
|
||||
# max_request_failures = 5
|
||||
# Maximum number of continuous failures to terminate a file sync.
|
||||
# max_request_failures = 3
|
||||
|
||||
# Timeout to dial peers.
|
||||
# peer_connect_timeout = "15s"
|
||||
@ -248,7 +248,7 @@ auto_sync_enabled = true
|
||||
# peer_disconnect_timeout = "15s"
|
||||
|
||||
# Timeout to find peers via FIND_FILE P2P pubsub message.
|
||||
# peer_find_timeout = "120s"
|
||||
# peer_find_timeout = "5s"
|
||||
|
||||
# Timeout to download data from remote peer.
|
||||
# peer_chunks_download_timeout = "15s"
|
||||
@ -261,13 +261,13 @@ auto_sync_enabled = true
|
||||
# max_sequential_workers = 0
|
||||
|
||||
# Maximum threads to sync files randomly.
|
||||
# max_random_workers = 2
|
||||
# max_random_workers = 8
|
||||
|
||||
# Timeout to terminate a file sync in sequence.
|
||||
# sequential_find_peer_timeout = "60s"
|
||||
# sequential_find_peer_timeout = "5s"
|
||||
|
||||
# Timeout to terminate a file sync randomly.
|
||||
# random_find_peer_timeout = "500s"
|
||||
# random_find_peer_timeout = "5s"
|
||||
|
||||
#######################################################################
|
||||
### File Location Cache Options ###
|
||||
|
@ -245,13 +245,13 @@ batcher_announcement_capacity = 100
|
||||
auto_sync_enabled = true
|
||||
|
||||
# Maximum number of files in sync from other peers simultaneously.
|
||||
# max_sync_files = 8
|
||||
# max_sync_files = 16
|
||||
|
||||
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
|
||||
# sync_file_by_rpc_enabled = true
|
||||
|
||||
# Maximum number of continous failures to terminate a file sync.
|
||||
# max_request_failures = 5
|
||||
# Maximum number of continuous failures to terminate a file sync.
|
||||
# max_request_failures = 3
|
||||
|
||||
# Timeout to dial peers.
|
||||
# peer_connect_timeout = "15s"
|
||||
@ -260,7 +260,7 @@ auto_sync_enabled = true
|
||||
# peer_disconnect_timeout = "15s"
|
||||
|
||||
# Timeout to find peers via FIND_FILE P2P pubsub message.
|
||||
# peer_find_timeout = "120s"
|
||||
# peer_find_timeout = "5s"
|
||||
|
||||
# Timeout to download data from remote peer.
|
||||
# peer_chunks_download_timeout = "15s"
|
||||
@ -273,13 +273,13 @@ auto_sync_enabled = true
|
||||
# max_sequential_workers = 0
|
||||
|
||||
# Maximum threads to sync files randomly.
|
||||
# max_random_workers = 2
|
||||
# max_random_workers = 8
|
||||
|
||||
# Timeout to terminate a file sync in sequence.
|
||||
# sequential_find_peer_timeout = "60s"
|
||||
# sequential_find_peer_timeout = "5s"
|
||||
|
||||
# Timeout to terminate a file sync randomly.
|
||||
# random_find_peer_timeout = "500s"
|
||||
# random_find_peer_timeout = "5s"
|
||||
|
||||
#######################################################################
|
||||
### File Location Cache Options ###
|
||||
|
@ -247,13 +247,13 @@
|
||||
# auto_sync_enabled = false
|
||||
|
||||
# Maximum number of files in sync from other peers simultaneously.
|
||||
# max_sync_files = 8
|
||||
# max_sync_files = 16
|
||||
|
||||
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
|
||||
# sync_file_by_rpc_enabled = true
|
||||
|
||||
# Maximum number of continous failures to terminate a file sync.
|
||||
# max_request_failures = 5
|
||||
# Maximum number of continuous failures to terminate a file sync.
|
||||
# max_request_failures = 3
|
||||
|
||||
# Timeout to dial peers.
|
||||
# peer_connect_timeout = "15s"
|
||||
@ -262,7 +262,7 @@
|
||||
# peer_disconnect_timeout = "15s"
|
||||
|
||||
# Timeout to find peers via FIND_FILE P2P pubsub message.
|
||||
# peer_find_timeout = "120s"
|
||||
# peer_find_timeout = "5s"
|
||||
|
||||
# Timeout to download data from remote peer.
|
||||
# peer_chunks_download_timeout = "15s"
|
||||
@ -275,13 +275,13 @@
|
||||
# max_sequential_workers = 0
|
||||
|
||||
# Maximum threads to sync files randomly.
|
||||
# max_random_workers = 2
|
||||
# max_random_workers = 8
|
||||
|
||||
# Timeout to terminate a file sync in sequence.
|
||||
# sequential_find_peer_timeout = "60s"
|
||||
# sequential_find_peer_timeout = "5s"
|
||||
|
||||
# Timeout to terminate a file sync randomly.
|
||||
# random_find_peer_timeout = "500s"
|
||||
# random_find_peer_timeout = "5s"
|
||||
|
||||
#######################################################################
|
||||
### File Location Cache Options ###
|
||||
|
@ -1 +1 @@
|
||||
debug,hyper=info,h2=info,rpc=info,discv5=info,jsonrpsee_http_server=info
|
||||
debug,hyper=info,h2=info,rpc=info,discv5=info,jsonrpsee_http_server=info,multistream_select=info,libp2p_core=info,libp2p_gossipsub=info
|
@ -1 +1 @@
|
||||
1.75.0
|
||||
1.78.0
|
||||
|
@ -1,12 +1,11 @@
|
||||
|
||||
set -e
|
||||
|
||||
artifacts_path="$1"
|
||||
|
||||
check_abis() {
|
||||
for contract_name in "$@"; do
|
||||
diff $(./scripts/search_abi.sh "$artifacts_path" "$contract_name.json") "storage-contracts-abis/$contract_name.json"
|
||||
diff "$(./scripts/search_abi.sh "$artifacts_path" "$contract_name.json")" "storage-contracts-abis/$contract_name.json"
|
||||
done
|
||||
}
|
||||
check_abis DummyMarket DummyReward Flow PoraMine PoraMineTest FixedPrice ChunkLinearReward FixedPriceFlow
|
||||
|
||||
check_abis DummyMarket DummyReward Flow PoraMine PoraMineTest FixedPrice ChunkLinearReward FixedPriceFlow
|
||||
|
@ -50,7 +50,7 @@ copy_file() {
|
||||
|
||||
copy_abis() {
|
||||
for contract_name in "$@"; do
|
||||
copy_file $(./scripts/search_abi.sh "$path/artifacts" "$contract_name.json") "storage-contracts-abis/$contract_name.json"
|
||||
copy_file "$(./scripts/search_abi.sh "$path/artifacts" "$contract_name.json")" "storage-contracts-abis/$contract_name.json"
|
||||
done
|
||||
}
|
||||
|
||||
|
@ -10,22 +10,22 @@ PUBLIC_IP=$(curl -s https://ipinfo.io/ip)
|
||||
FILE=run/config.toml
|
||||
|
||||
# enable sync
|
||||
sed -in-place='' 's/# \[sync\]/\[sync\]/g' $FILE
|
||||
sed -i 's/# \[sync\]/\[sync\]/g' $FILE
|
||||
# enable auto_sync
|
||||
sed -in-place='' 's/# auto_sync_enabled = false/auto_sync_enabled = true/g' $FILE
|
||||
sed -i 's/# auto_sync_enabled = false/auto_sync_enabled = true/g' $FILE
|
||||
# reduce timeout for finding peers
|
||||
sed -in-place='' 's/# find_peer_timeout = .*/find_peer_timeout = "10s"/g' $FILE
|
||||
sed -i 's/# find_peer_timeout = .*/find_peer_timeout = "10s"/g' $FILE
|
||||
# set public ip
|
||||
sed -in-place='' "s/# network_enr_address = .*/network_enr_address = \"$PUBLIC_IP\"/g" $FILE
|
||||
sed -i "s/# network_enr_address = .*/network_enr_address = \"$PUBLIC_IP\"/g" $FILE
|
||||
# set miner key
|
||||
sed -in-place='' "s/miner_key = \"\"/miner_key = \"$MINER_KEY\"/g" $FILE
|
||||
sed -i "s/miner_key = \"\"/miner_key = \"$MINER_KEY\"/g" $FILE
|
||||
# set miner contract address
|
||||
sed -in-place='' "s/mine_contract_address = .*/mine_contract_address = \"$MINE_CONTRACT\"/g" $FILE
|
||||
sed -i "s/mine_contract_address = .*/mine_contract_address = \"$MINE_CONTRACT\"/g" $FILE
|
||||
# set blockchain rpc endpoint
|
||||
sed -in-place='' "s|blockchain_rpc_endpoint = .*|blockchain_rpc_endpoint = \"$BLOCKCHAIN_RPC\"|g" $FILE
|
||||
sed -i "s|blockchain_rpc_endpoint = .*|blockchain_rpc_endpoint = \"$BLOCKCHAIN_RPC\"|g" $FILE
|
||||
# set flow contract address
|
||||
sed -in-place='' "s/log_contract_address = .*/log_contract_address = \"$FLOW_CONTRACT\"/g" $FILE
|
||||
sed -i "s/log_contract_address = .*/log_contract_address = \"$FLOW_CONTRACT\"/g" $FILE
|
||||
# set contract deployed block number
|
||||
sed -in-place='' "s/log_sync_start_block_number = .*/log_sync_start_block_number = $BLOCK_NUMBER/g" $FILE
|
||||
sed -i "s/log_sync_start_block_number = .*/log_sync_start_block_number = $BLOCK_NUMBER/g" $FILE
|
||||
# update the boot node ids
|
||||
sed -in-place='' 's|network_boot_nodes = .*|network_boot_nodes = ["/ip4/54.219.26.22/udp/1234/p2p/16Uiu2HAmTVDGNhkHD98zDnJxQWu3i1FL1aFYeh9wiQTNu4pDCgps","/ip4/52.52.127.117/udp/1234/p2p/16Uiu2HAkzRjxK2gorngB1Xq84qDrT4hSVznYDHj6BkbaE4SGx9oS","/ip4/18.167.69.68/udp/1234/p2p/16Uiu2HAm2k6ua2mGgvZ8rTMV8GhpW71aVzkQWy7D37TTDuLCpgmX"]|g' $FILE
|
||||
sed -i 's|network_boot_nodes = .*|network_boot_nodes = ["/ip4/54.219.26.22/udp/1234/p2p/16Uiu2HAmTVDGNhkHD98zDnJxQWu3i1FL1aFYeh9wiQTNu4pDCgps","/ip4/52.52.127.117/udp/1234/p2p/16Uiu2HAkzRjxK2gorngB1Xq84qDrT4hSVznYDHj6BkbaE4SGx9oS","/ip4/18.167.69.68/udp/1234/p2p/16Uiu2HAm2k6ua2mGgvZ8rTMV8GhpW71aVzkQWy7D37TTDuLCpgmX"]|g' $FILE
|
||||
|
@ -1 +1 @@
|
||||
bea58429e436e4952ae69235d9079cfc4ac5f3b3
|
||||
1e931c7b168f9bc2b55f7b8fd96946e35b373048
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -17,7 +17,10 @@ class ExampleTest(TestFramework):
|
||||
self.contract.submit(submissions)
|
||||
wait_until(lambda: self.contract.num_submissions() == 1)
|
||||
wait_until(lambda: client.zgs_get_file_info(data_root) is not None)
|
||||
wait_until(lambda: not client.zgs_get_file_info(data_root)["isCached"] and client.zgs_get_file_info(data_root)["uploadedSegNum"] == 1)
|
||||
wait_until(
|
||||
lambda: not client.zgs_get_file_info(data_root)["isCached"]
|
||||
and client.zgs_get_file_info(data_root)["uploadedSegNum"] == 1
|
||||
)
|
||||
client.zgs_upload_segment(segments[1])
|
||||
wait_until(lambda: client.zgs_get_file_info(data_root)["finalized"])
|
||||
|
||||
|
@ -3,12 +3,11 @@ import os
|
||||
from web3 import Web3
|
||||
|
||||
ZGS_CONFIG = {
|
||||
"network_enr_address": "127.0.0.1",
|
||||
"log_config_file": "log_config",
|
||||
"confirmation_block_count": 1,
|
||||
"discv5_disable_ip_limit": True,
|
||||
"network_peer_manager": {
|
||||
"heartbeat_interval": "1s"
|
||||
},
|
||||
"network_peer_manager": {"heartbeat_interval": "1s"},
|
||||
"router": {
|
||||
"private_ip_enabled": True,
|
||||
},
|
||||
@ -21,7 +20,7 @@ ZGS_CONFIG = {
|
||||
"auto_sync_idle_interval": "1s",
|
||||
"sequential_find_peer_timeout": "10s",
|
||||
"random_find_peer_timeout": "10s",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
CONFIG_DIR = os.path.dirname(__file__)
|
||||
@ -74,11 +73,12 @@ TX_PARAMS1 = {
|
||||
NO_SEAL_FLAG = 0x1
|
||||
NO_MERKLE_PROOF_FLAG = 0x2
|
||||
|
||||
|
||||
def update_config(default: dict, custom: dict):
|
||||
"""
|
||||
Supports to update configurations with dict value.
|
||||
"""
|
||||
for (key, value) in custom.items():
|
||||
for key, value in custom.items():
|
||||
if default.get(key) is None or type(value) != dict:
|
||||
default[key] = value
|
||||
else:
|
||||
|
@ -20,17 +20,15 @@ class CrashTest(TestFramework):
|
||||
|
||||
segment = submit_data(self.nodes[0], chunk_data)
|
||||
self.log.info("segment: %s", segment)
|
||||
wait_until(lambda: self.nodes[0].zgs_get_file_info(data_root)["finalized"] is True)
|
||||
wait_until(
|
||||
lambda: self.nodes[0].zgs_get_file_info(data_root)["finalized"] is True
|
||||
)
|
||||
|
||||
for i in range(1, self.num_nodes):
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root) is not None
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root) is not None)
|
||||
self.nodes[i].admin_start_sync_file(0)
|
||||
self.log.info("wait for node: %s", i)
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root)["finalized"]
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root)["finalized"])
|
||||
|
||||
# 2: first node runnging, other nodes killed
|
||||
self.log.info("kill node")
|
||||
@ -56,22 +54,16 @@ class CrashTest(TestFramework):
|
||||
for i in range(2, self.num_nodes):
|
||||
self.start_storage_node(i)
|
||||
self.nodes[i].wait_for_rpc_connection()
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root) is not None
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root) is not None)
|
||||
self.nodes[i].admin_start_sync_file(1)
|
||||
|
||||
self.nodes[i].stop(kill=True)
|
||||
self.start_storage_node(i)
|
||||
self.nodes[i].wait_for_rpc_connection()
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root) is not None
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root) is not None)
|
||||
self.nodes[i].admin_start_sync_file(1)
|
||||
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root)["finalized"]
|
||||
)
|
||||
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root)["finalized"])
|
||||
|
||||
# 4: node[1..] synced contract entries and killed
|
||||
self.log.info("kill node 0")
|
||||
@ -96,13 +88,9 @@ class CrashTest(TestFramework):
|
||||
self.log.info("wait for node: %s", i)
|
||||
self.start_storage_node(i)
|
||||
self.nodes[i].wait_for_rpc_connection()
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root) is not None
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root) is not None)
|
||||
self.nodes[i].admin_start_sync_file(2)
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root)["finalized"]
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root)["finalized"])
|
||||
|
||||
# 5: node[1..] synced contract entries and killed, sync disorder
|
||||
self.nodes[0].stop(kill=True)
|
||||
@ -137,21 +125,13 @@ class CrashTest(TestFramework):
|
||||
self.log.info("wait for node: %s", i)
|
||||
self.start_storage_node(i)
|
||||
self.nodes[i].wait_for_rpc_connection()
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root1) is not None
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root1) is not None)
|
||||
self.nodes[i].admin_start_sync_file(4)
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root1)["finalized"]
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root1)["finalized"])
|
||||
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root) is not None
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root) is not None)
|
||||
self.nodes[i].admin_start_sync_file(3)
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root)["finalized"]
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root)["finalized"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -45,13 +45,9 @@ class FuzzTest(TestFramework):
|
||||
lock.release()
|
||||
|
||||
log.info("submit data via client %s", idx)
|
||||
wait_until(
|
||||
lambda: nodes[idx].zgs_get_file_info(data_root) is not None
|
||||
)
|
||||
wait_until(lambda: nodes[idx].zgs_get_file_info(data_root) is not None)
|
||||
segment = submit_data(nodes[idx], chunk_data)
|
||||
wait_until(
|
||||
lambda: nodes[idx].zgs_get_file_info(data_root)["finalized"]
|
||||
)
|
||||
wait_until(lambda: nodes[idx].zgs_get_file_info(data_root)["finalized"])
|
||||
|
||||
lock.acquire()
|
||||
nodes_index.append(idx)
|
||||
@ -65,17 +61,15 @@ class FuzzTest(TestFramework):
|
||||
lambda: nodes[idx].zgs_get_file_info(data_root) is not None
|
||||
)
|
||||
|
||||
def wait_finalized():
|
||||
def wait_finalized():
|
||||
ret = nodes[idx].zgs_get_file_info(data_root)
|
||||
if ret["finalized"]:
|
||||
return True
|
||||
else:
|
||||
nodes[idx].admin_start_sync_file(ret['tx']['seq'])
|
||||
nodes[idx].admin_start_sync_file(ret["tx"]["seq"])
|
||||
return False
|
||||
|
||||
wait_until(
|
||||
lambda: wait_finalized(), timeout = 180
|
||||
)
|
||||
wait_until(lambda: wait_finalized(), timeout=180)
|
||||
|
||||
def run_small_chunk_size(nodes, contract, log):
|
||||
sizes = [i for i in range(1, SAMLL_SIZE + 1)]
|
||||
@ -84,7 +78,7 @@ class FuzzTest(TestFramework):
|
||||
run_chunk_size(sizes, nodes, contract, log)
|
||||
|
||||
def run_large_chunk_size(nodes, contract, log):
|
||||
sizes = [i for i in range(256 * 1024 * 256 - LARGE_SIZE, 256 * 1024 * 256 )]
|
||||
sizes = [i for i in range(256 * 1024 * 256 - LARGE_SIZE, 256 * 1024 * 256)]
|
||||
random.shuffle(sizes)
|
||||
|
||||
run_chunk_size(sizes, nodes, contract, log)
|
||||
|
@ -18,7 +18,6 @@ class LongTimeMineTest(TestFramework):
|
||||
self.mine_period = 15
|
||||
self.launch_wait_seconds = 15
|
||||
|
||||
|
||||
def submit_data(self, item, size):
|
||||
submissions_before = self.contract.num_submissions()
|
||||
client = self.nodes[0]
|
||||
@ -44,7 +43,10 @@ class LongTimeMineTest(TestFramework):
|
||||
self.submit_data(b"\x11", 2000)
|
||||
|
||||
self.log.info("Start mine")
|
||||
wait_until(lambda: int(blockchain.eth_blockNumber(), 16) > self.mine_period, timeout=180)
|
||||
wait_until(
|
||||
lambda: int(blockchain.eth_blockNumber(), 16) > self.mine_period,
|
||||
timeout=180,
|
||||
)
|
||||
|
||||
self.log.info("Wait for the first mine answer")
|
||||
wait_until(lambda: self.mine_contract.last_mined_epoch() == 1)
|
||||
|
@ -15,7 +15,11 @@ class MineTest(TestFramework):
|
||||
}
|
||||
self.mine_period = int(45 / self.block_time)
|
||||
self.launch_wait_seconds = 15
|
||||
self.log.info("Contract Info: Est. block time %.2f, Mine period %d", self.block_time, self.mine_period)
|
||||
self.log.info(
|
||||
"Contract Info: Est. block time %.2f, Mine period %d",
|
||||
self.block_time,
|
||||
self.mine_period,
|
||||
)
|
||||
|
||||
def submit_data(self, item, size):
|
||||
submissions_before = self.contract.num_submissions()
|
||||
@ -37,7 +41,11 @@ class MineTest(TestFramework):
|
||||
|
||||
first_block = self.contract.first_block()
|
||||
self.log.info("Current block number %d", int(blockchain.eth_blockNumber(), 16))
|
||||
self.log.info("Flow deployment block number %d, epoch 1 start %d", first_block, first_block + self.mine_period)
|
||||
self.log.info(
|
||||
"Flow deployment block number %d, epoch 1 start %d",
|
||||
first_block,
|
||||
first_block + self.mine_period,
|
||||
)
|
||||
wait_until(lambda: self.contract.epoch() >= 1, timeout=180)
|
||||
|
||||
quality = int(2**256 / 100 / estimate_st_performance())
|
||||
@ -54,27 +62,39 @@ class MineTest(TestFramework):
|
||||
self.contract.update_context()
|
||||
|
||||
self.log.info("Wait for the first mine answer")
|
||||
wait_until(lambda: self.mine_contract.last_mined_epoch() == start_epoch + 1 and not self.mine_contract.can_submit(), timeout=180)
|
||||
wait_until(
|
||||
lambda: self.mine_contract.last_mined_epoch() == start_epoch + 1
|
||||
and not self.mine_contract.can_submit(),
|
||||
timeout=180,
|
||||
)
|
||||
|
||||
self.log.info("Wait for the second mine context release")
|
||||
wait_until(lambda: self.contract.epoch() >= start_epoch + 2, timeout=180)
|
||||
self.contract.update_context()
|
||||
|
||||
self.log.info("Wait for the second mine answer")
|
||||
wait_until(lambda: self.mine_contract.last_mined_epoch() == start_epoch + 2 and not self.mine_contract.can_submit(), timeout=180)
|
||||
wait_until(
|
||||
lambda: self.mine_contract.last_mined_epoch() == start_epoch + 2
|
||||
and not self.mine_contract.can_submit(),
|
||||
timeout=180,
|
||||
)
|
||||
|
||||
self.nodes[0].miner_stop()
|
||||
self.log.info("Wait for the third mine context release")
|
||||
wait_until(lambda: self.contract.epoch() >= start_epoch + 3, timeout=180)
|
||||
self.contract.update_context()
|
||||
|
||||
|
||||
self.log.info("Submit the second data chunk")
|
||||
self.submit_data(b"\x22", 2000)
|
||||
# Now the storage node should have the latest flow, but the mining context is using an old one.
|
||||
self.nodes[0].miner_start()
|
||||
|
||||
self.log.info("Wait for the third mine answer")
|
||||
wait_until(lambda: self.mine_contract.last_mined_epoch() == start_epoch + 3 and not self.mine_contract.can_submit(), timeout=180)
|
||||
wait_until(
|
||||
lambda: self.mine_contract.last_mined_epoch() == start_epoch + 3
|
||||
and not self.mine_contract.can_submit(),
|
||||
timeout=180,
|
||||
)
|
||||
|
||||
self.log.info("Current block number %d", int(blockchain.eth_blockNumber(), 16))
|
||||
|
||||
|
@ -2,13 +2,18 @@
|
||||
from test_framework.test_framework import TestFramework
|
||||
from config.node_config import MINER_ID, GENESIS_PRIV_KEY
|
||||
from utility.submission import create_submission, submit_data
|
||||
from utility.utils import wait_until, assert_equal, assert_greater_than, estimate_st_performance
|
||||
from utility.utils import (
|
||||
wait_until,
|
||||
assert_equal,
|
||||
assert_greater_than,
|
||||
estimate_st_performance,
|
||||
)
|
||||
from test_framework.blockchain_node import BlockChainNodeType
|
||||
import time
|
||||
|
||||
import math
|
||||
|
||||
PRICE_PER_SECTOR = math.ceil(10 * (10 ** 18) / (2 ** 30) * 256 / 12)
|
||||
PRICE_PER_SECTOR = math.ceil(10 * (10**18) / (2**30) * 256 / 12)
|
||||
|
||||
|
||||
class MineTest(TestFramework):
|
||||
@ -23,17 +28,21 @@ class MineTest(TestFramework):
|
||||
self.enable_market = True
|
||||
self.mine_period = int(50 / self.block_time)
|
||||
self.launch_wait_seconds = 15
|
||||
self.log.info("Contract Info: Est. block time %.2f, Mine period %d", self.block_time, self.mine_period)
|
||||
self.log.info(
|
||||
"Contract Info: Est. block time %.2f, Mine period %d",
|
||||
self.block_time,
|
||||
self.mine_period,
|
||||
)
|
||||
|
||||
def submit_data(self, item, size, no_submit = False):
|
||||
def submit_data(self, item, size, no_submit=False):
|
||||
submissions_before = self.contract.num_submissions()
|
||||
client = self.nodes[0]
|
||||
chunk_data = item * 256 * size
|
||||
submissions, data_root = create_submission(chunk_data)
|
||||
value = int(size * PRICE_PER_SECTOR * 1.1)
|
||||
self.contract.submit(submissions, tx_prarams = {"value": value})
|
||||
self.contract.submit(submissions, tx_prarams={"value": value})
|
||||
wait_until(lambda: self.contract.num_submissions() == submissions_before + 1)
|
||||
|
||||
|
||||
if not no_submit:
|
||||
wait_until(lambda: client.zgs_get_file_info(data_root) is not None)
|
||||
segment = submit_data(client, chunk_data)
|
||||
@ -48,11 +57,15 @@ class MineTest(TestFramework):
|
||||
difficulty = int(2**256 / 5 / estimate_st_performance())
|
||||
self.mine_contract.set_quality(difficulty)
|
||||
|
||||
SECTORS_PER_PRICING = int(8 * ( 2 ** 30 ) / 256)
|
||||
SECTORS_PER_PRICING = int(8 * (2**30) / 256)
|
||||
|
||||
first_block = self.contract.first_block()
|
||||
self.log.info("Current block number %d", int(blockchain.eth_blockNumber(), 16))
|
||||
self.log.info("Flow deployment block number %d, epoch 1 start %d, wait for epoch 1 start", first_block, first_block + self.mine_period)
|
||||
self.log.info(
|
||||
"Flow deployment block number %d, epoch 1 start %d, wait for epoch 1 start",
|
||||
first_block,
|
||||
first_block + self.mine_period,
|
||||
)
|
||||
wait_until(lambda: self.contract.epoch() >= 1, timeout=180)
|
||||
|
||||
self.log.info("Submit the actual data chunk (256 MB)")
|
||||
@ -63,8 +76,12 @@ class MineTest(TestFramework):
|
||||
# wait_until(lambda: self.contract.epoch() >= 1, timeout=180)
|
||||
|
||||
start_epoch = self.contract.epoch()
|
||||
|
||||
self.log.info("Submission Done, epoch is %d, current block number %d", start_epoch, int(blockchain.eth_blockNumber(), 16))
|
||||
|
||||
self.log.info(
|
||||
"Submission Done, epoch is %d, current block number %d",
|
||||
start_epoch,
|
||||
int(blockchain.eth_blockNumber(), 16),
|
||||
)
|
||||
|
||||
self.log.info("Wait for mine context release")
|
||||
wait_until(lambda: self.contract.epoch() >= start_epoch + 1, timeout=180)
|
||||
@ -72,32 +89,43 @@ class MineTest(TestFramework):
|
||||
self.contract.update_context()
|
||||
|
||||
self.log.info("Wait for mine answer")
|
||||
wait_until(lambda: self.mine_contract.last_mined_epoch() == start_epoch + 1 and not self.mine_contract.can_submit(), timeout=120)
|
||||
wait_until(
|
||||
lambda: self.mine_contract.last_mined_epoch() == start_epoch + 1
|
||||
and not self.mine_contract.can_submit(),
|
||||
timeout=120,
|
||||
)
|
||||
|
||||
rewards = self.reward_contract.reward_distributes()
|
||||
assert_equal(len(rewards), 2)
|
||||
assert_equal(len(rewards), 4)
|
||||
firstReward = rewards[0].args.amount
|
||||
self.log.info("Received reward %d Gwei", firstReward / (10**9))
|
||||
|
||||
self.reward_contract.donate(10000 * 10 ** 18)
|
||||
self.reward_contract.donate(10000 * 10**18)
|
||||
self.log.info("Donation Done")
|
||||
self.log.info("Submit the data hash only (8 GB)")
|
||||
self.submit_data(b"\x11", int(SECTORS_PER_PRICING), no_submit=True)
|
||||
current_epoch = self.contract.epoch()
|
||||
assert_equal(current_epoch, start_epoch + 1);
|
||||
self.log.info("Sumission Done, epoch is %d, current block number %d", self.contract.epoch(), int(blockchain.eth_blockNumber(), 16))
|
||||
assert_equal(current_epoch, start_epoch + 1)
|
||||
self.log.info(
|
||||
"Sumission Done, epoch is %d, current block number %d",
|
||||
self.contract.epoch(),
|
||||
int(blockchain.eth_blockNumber(), 16),
|
||||
)
|
||||
|
||||
self.log.info("Wait for mine context release")
|
||||
wait_until(lambda: self.contract.epoch() >= start_epoch + 2, timeout=180)
|
||||
self.contract.update_context()
|
||||
|
||||
self.log.info("Wait for mine answer")
|
||||
wait_until(lambda: self.mine_contract.last_mined_epoch() == start_epoch + 2 and not self.mine_contract.can_submit())
|
||||
wait_until(
|
||||
lambda: self.mine_contract.last_mined_epoch() == start_epoch + 2
|
||||
and not self.mine_contract.can_submit()
|
||||
)
|
||||
assert_equal(self.contract.epoch(), start_epoch + 2)
|
||||
|
||||
rewards = self.reward_contract.reward_distributes()
|
||||
assert_equal(len(rewards), 4)
|
||||
secondReward = rewards[2].args.amount
|
||||
assert_equal(len(rewards), 8)
|
||||
secondReward = rewards[4].args.amount
|
||||
self.log.info("Received reward %d Gwei", secondReward / (10**9))
|
||||
|
||||
assert_greater_than(secondReward, 100 * firstReward / (start_epoch + 1))
|
||||
|
@ -7,6 +7,7 @@ from config.node_config import ZGS_KEY_FILE, ZGS_NODEID
|
||||
from test_framework.test_framework import TestFramework
|
||||
from utility.utils import p2p_port
|
||||
|
||||
|
||||
class NetworkDiscoveryTest(TestFramework):
|
||||
"""
|
||||
This is to test whether community nodes could connect to each other via UDP discovery.
|
||||
@ -24,7 +25,6 @@ class NetworkDiscoveryTest(TestFramework):
|
||||
"network_enr_address": "127.0.0.1",
|
||||
"network_enr_tcp_port": bootnode_port,
|
||||
"network_enr_udp_port": bootnode_port,
|
||||
|
||||
# disable trusted nodes
|
||||
"network_libp2p_nodes": [],
|
||||
}
|
||||
@ -37,7 +37,6 @@ class NetworkDiscoveryTest(TestFramework):
|
||||
"network_enr_address": "127.0.0.1",
|
||||
"network_enr_tcp_port": p2p_port(i),
|
||||
"network_enr_udp_port": p2p_port(i),
|
||||
|
||||
# disable trusted nodes and enable bootnodes
|
||||
"network_libp2p_nodes": [],
|
||||
"network_boot_nodes": bootnodes,
|
||||
@ -57,7 +56,13 @@ class NetworkDiscoveryTest(TestFramework):
|
||||
total_connected += info["connectedPeers"]
|
||||
self.log.info(
|
||||
"Node[%s] peers: total = %s, banned = %s, disconnected = %s, connected = %s (in = %s, out = %s)",
|
||||
i, info["totalPeers"], info["bannedPeers"], info["disconnectedPeers"], info["connectedPeers"], info["connectedIncomingPeers"], info["connectedOutgoingPeers"],
|
||||
i,
|
||||
info["totalPeers"],
|
||||
info["bannedPeers"],
|
||||
info["disconnectedPeers"],
|
||||
info["connectedPeers"],
|
||||
info["connectedIncomingPeers"],
|
||||
info["connectedOutgoingPeers"],
|
||||
)
|
||||
|
||||
if total_connected >= self.num_nodes * (self.num_nodes - 1):
|
||||
@ -66,5 +71,6 @@ class NetworkDiscoveryTest(TestFramework):
|
||||
self.log.info("====================================")
|
||||
self.log.info("All nodes connected to each other successfully")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
NetworkDiscoveryTest().main()
|
||||
|
@ -7,6 +7,7 @@ from config.node_config import ZGS_KEY_FILE, ZGS_NODEID
|
||||
from test_framework.test_framework import TestFramework
|
||||
from utility.utils import p2p_port
|
||||
|
||||
|
||||
class NetworkDiscoveryUpgradeTest(TestFramework):
|
||||
"""
|
||||
This is to test that low version community nodes could not connect to bootnodes.
|
||||
@ -24,7 +25,6 @@ class NetworkDiscoveryUpgradeTest(TestFramework):
|
||||
"network_enr_address": "127.0.0.1",
|
||||
"network_enr_tcp_port": bootnode_port,
|
||||
"network_enr_udp_port": bootnode_port,
|
||||
|
||||
# disable trusted nodes
|
||||
"network_libp2p_nodes": [],
|
||||
}
|
||||
@ -37,11 +37,9 @@ class NetworkDiscoveryUpgradeTest(TestFramework):
|
||||
"network_enr_address": "127.0.0.1",
|
||||
"network_enr_tcp_port": p2p_port(i),
|
||||
"network_enr_udp_port": p2p_port(i),
|
||||
|
||||
# disable trusted nodes and enable bootnodes
|
||||
"network_libp2p_nodes": [],
|
||||
"network_boot_nodes": bootnodes,
|
||||
|
||||
# disable network identity in ENR
|
||||
"discv5_disable_enr_network_id": True,
|
||||
}
|
||||
@ -57,7 +55,13 @@ class NetworkDiscoveryUpgradeTest(TestFramework):
|
||||
total_connected += info["connectedPeers"]
|
||||
self.log.info(
|
||||
"Node[%s] peers: total = %s, banned = %s, disconnected = %s, connected = %s (in = %s, out = %s)",
|
||||
i, info["totalPeers"], info["bannedPeers"], info["disconnectedPeers"], info["connectedPeers"], info["connectedIncomingPeers"], info["connectedOutgoingPeers"],
|
||||
i,
|
||||
info["totalPeers"],
|
||||
info["bannedPeers"],
|
||||
info["disconnectedPeers"],
|
||||
info["connectedPeers"],
|
||||
info["connectedIncomingPeers"],
|
||||
info["connectedOutgoingPeers"],
|
||||
)
|
||||
|
||||
# ENR incompatible and should not discover each other for TCP connection
|
||||
@ -66,5 +70,6 @@ class NetworkDiscoveryUpgradeTest(TestFramework):
|
||||
self.log.info("====================================")
|
||||
self.log.info("ENR incompatible nodes do not connect to each other")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
NetworkDiscoveryUpgradeTest().main()
|
||||
|
@ -7,6 +7,7 @@ from config.node_config import ZGS_KEY_FILE, ZGS_NODEID
|
||||
from test_framework.test_framework import TestFramework
|
||||
from utility.utils import p2p_port
|
||||
|
||||
|
||||
class NetworkTcpShardTest(TestFramework):
|
||||
"""
|
||||
This is to test TCP connection for shard config mismatched peers of UDP discovery.
|
||||
@ -24,12 +25,10 @@ class NetworkTcpShardTest(TestFramework):
|
||||
"network_enr_address": "127.0.0.1",
|
||||
"network_enr_tcp_port": bootnode_port,
|
||||
"network_enr_udp_port": bootnode_port,
|
||||
|
||||
# disable trusted nodes
|
||||
"network_libp2p_nodes": [],
|
||||
|
||||
# custom shard config
|
||||
"shard_position": "0/4"
|
||||
"shard_position": "0/4",
|
||||
}
|
||||
|
||||
# setup node 1 & 2 as community nodes
|
||||
@ -40,13 +39,11 @@ class NetworkTcpShardTest(TestFramework):
|
||||
"network_enr_address": "127.0.0.1",
|
||||
"network_enr_tcp_port": p2p_port(i),
|
||||
"network_enr_udp_port": p2p_port(i),
|
||||
|
||||
# disable trusted nodes and enable bootnodes
|
||||
"network_libp2p_nodes": [],
|
||||
"network_boot_nodes": bootnodes,
|
||||
|
||||
# custom shard config
|
||||
"shard_position": f"{i}/4"
|
||||
"shard_position": f"{i}/4",
|
||||
}
|
||||
|
||||
def run_test(self):
|
||||
@ -60,7 +57,13 @@ class NetworkTcpShardTest(TestFramework):
|
||||
info = self.nodes[i].rpc.admin_getNetworkInfo()
|
||||
self.log.info(
|
||||
"Node[%s] peers: total = %s, banned = %s, disconnected = %s, connected = %s (in = %s, out = %s)",
|
||||
i, info["totalPeers"], info["bannedPeers"], info["disconnectedPeers"], info["connectedPeers"], info["connectedIncomingPeers"], info["connectedOutgoingPeers"],
|
||||
i,
|
||||
info["totalPeers"],
|
||||
info["bannedPeers"],
|
||||
info["disconnectedPeers"],
|
||||
info["connectedPeers"],
|
||||
info["connectedIncomingPeers"],
|
||||
info["connectedOutgoingPeers"],
|
||||
)
|
||||
|
||||
if i == timeout_secs - 1:
|
||||
@ -72,5 +75,6 @@ class NetworkTcpShardTest(TestFramework):
|
||||
self.log.info("====================================")
|
||||
self.log.info("All nodes discovered but not connected for each other")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
NetworkTcpShardTest().main()
|
||||
|
@ -23,7 +23,11 @@ class PrunerTest(TestFramework):
|
||||
self.mine_period = int(45 / self.block_time)
|
||||
self.lifetime_seconds = 240
|
||||
self.launch_wait_seconds = 15
|
||||
self.log.info("Contract Info: Est. block time %.2f, Mine period %d", self.block_time, self.mine_period)
|
||||
self.log.info(
|
||||
"Contract Info: Est. block time %.2f, Mine period %d",
|
||||
self.block_time,
|
||||
self.mine_period,
|
||||
)
|
||||
|
||||
def run_test(self):
|
||||
client = self.nodes[0]
|
||||
@ -31,7 +35,10 @@ class PrunerTest(TestFramework):
|
||||
chunk_data = b"\x02" * 16 * 256 * 1024
|
||||
# chunk_data = b"\x02" * 5 * 1024 * 1024 * 1024
|
||||
submissions, data_root = create_submission(chunk_data)
|
||||
self.contract.submit(submissions, tx_prarams = {"value": int(len(chunk_data) / 256 * PRICE_PER_SECTOR * 1.1)})
|
||||
self.contract.submit(
|
||||
submissions,
|
||||
tx_prarams={"value": int(len(chunk_data) / 256 * PRICE_PER_SECTOR * 1.1)},
|
||||
)
|
||||
wait_until(lambda: self.contract.num_submissions() == 1)
|
||||
wait_until(lambda: client.zgs_get_file_info(data_root) is not None)
|
||||
|
||||
|
@ -7,6 +7,7 @@ from utility.submission import create_submission
|
||||
from utility.submission import submit_data
|
||||
from utility.utils import wait_until
|
||||
|
||||
|
||||
class RandomTest(TestFramework):
|
||||
def setup_params(self):
|
||||
self.num_blockchain_nodes = 1
|
||||
@ -32,14 +33,18 @@ class RandomTest(TestFramework):
|
||||
else:
|
||||
size = random.randint(0, max_size)
|
||||
no_data = random.random() <= no_data_ratio
|
||||
self.log.info(f"choose {chosen_node}, seq={i}, size={size}, no_data={no_data}")
|
||||
self.log.info(
|
||||
f"choose {chosen_node}, seq={i}, size={size}, no_data={no_data}"
|
||||
)
|
||||
|
||||
client = self.nodes[chosen_node]
|
||||
chunk_data = random.randbytes(size)
|
||||
submissions, data_root = create_submission(chunk_data)
|
||||
self.contract.submit(submissions)
|
||||
wait_until(lambda: self.contract.num_submissions() == i + 1)
|
||||
wait_until(lambda: client.zgs_get_file_info(data_root) is not None, timeout=120)
|
||||
wait_until(
|
||||
lambda: client.zgs_get_file_info(data_root) is not None, timeout=120
|
||||
)
|
||||
if not no_data:
|
||||
submit_data(client, chunk_data)
|
||||
wait_until(lambda: client.zgs_get_file_info(data_root)["finalized"])
|
||||
@ -47,8 +52,17 @@ class RandomTest(TestFramework):
|
||||
for node_index in range(len(self.nodes)):
|
||||
if node_index != chosen_node:
|
||||
self.log.debug(f"check {node_index}")
|
||||
wait_until(lambda: self.nodes[node_index].zgs_get_file_info(data_root) is not None, timeout=300)
|
||||
wait_until(lambda: self.nodes[node_index].zgs_get_file_info(data_root)["finalized"], timeout=300)
|
||||
wait_until(
|
||||
lambda: self.nodes[node_index].zgs_get_file_info(data_root)
|
||||
is not None,
|
||||
timeout=300,
|
||||
)
|
||||
wait_until(
|
||||
lambda: self.nodes[node_index].zgs_get_file_info(data_root)[
|
||||
"finalized"
|
||||
],
|
||||
timeout=300,
|
||||
)
|
||||
# TODO(zz): This is a temp solution to trigger auto sync after all nodes started.
|
||||
if i >= tx_count - 2:
|
||||
continue
|
||||
@ -72,7 +86,10 @@ class RandomTest(TestFramework):
|
||||
if not no_data:
|
||||
for node in self.nodes:
|
||||
self.log.debug(f"check {data_root}, {node.index}")
|
||||
wait_until(lambda: node.zgs_get_file_info(data_root)["finalized"], timeout=300)
|
||||
wait_until(
|
||||
lambda: node.zgs_get_file_info(data_root)["finalized"],
|
||||
timeout=300,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -32,8 +32,6 @@ class RootConsistencyTest(TestFramework):
|
||||
assert_equal(contract_length, expected_length)
|
||||
assert_equal(contract_root, node_root[2:])
|
||||
|
||||
|
||||
|
||||
def run_test(self):
|
||||
self.assert_flow_status(1)
|
||||
|
||||
@ -48,7 +46,6 @@ class RootConsistencyTest(TestFramework):
|
||||
|
||||
self.submit_data(b"\x13", 512 + 256)
|
||||
self.assert_flow_status(1024 + 512 + 256)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -29,6 +29,7 @@ class RpcTest(TestFramework):
|
||||
|
||||
wait_until(lambda: client1.zgs_get_file_info(data_root) is not None)
|
||||
assert_equal(client1.zgs_get_file_info(data_root)["finalized"], False)
|
||||
assert_equal(client1.zgs_get_file_info(data_root)["pruned"], False)
|
||||
|
||||
wait_until(lambda: client2.zgs_get_file_info(data_root) is not None)
|
||||
assert_equal(client2.zgs_get_file_info(data_root)["finalized"], False)
|
||||
@ -37,17 +38,13 @@ class RpcTest(TestFramework):
|
||||
self.log.info("segment: %s", segment)
|
||||
|
||||
wait_until(lambda: client1.zgs_get_file_info(data_root)["finalized"])
|
||||
assert_equal(
|
||||
client1.zgs_download_segment(data_root, 0, 1), segment[0]["data"]
|
||||
)
|
||||
assert_equal(client1.zgs_download_segment(data_root, 0, 1), segment[0]["data"])
|
||||
|
||||
client2.admin_start_sync_file(0)
|
||||
wait_until(lambda: client2.sync_status_is_completed_or_unknown(0))
|
||||
|
||||
wait_until(lambda: client2.zgs_get_file_info(data_root)["finalized"])
|
||||
assert_equal(
|
||||
client2.zgs_download_segment(data_root, 0, 1), segment[0]["data"]
|
||||
)
|
||||
assert_equal(client2.zgs_download_segment(data_root, 0, 1), segment[0]["data"])
|
||||
|
||||
self.__test_upload_file_with_cli(client1)
|
||||
|
||||
@ -89,9 +86,7 @@ class RpcTest(TestFramework):
|
||||
)
|
||||
)
|
||||
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(root)["finalized"]
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(root)["finalized"])
|
||||
|
||||
assert_equal(
|
||||
client1.zgs_download_segment(root, 0, 2),
|
||||
|
@ -41,7 +41,11 @@ class SubmissionTest(TestFramework):
|
||||
for tx_offset in range(same_root_tx_count + 1):
|
||||
tx_seq = next_tx_seq - 1 - tx_offset
|
||||
# old txs are finalized after finalizing the new tx, so we may need to wait here.
|
||||
wait_until(lambda: self.nodes[0].zgs_get_file_info_by_tx_seq(tx_seq)["finalized"])
|
||||
wait_until(
|
||||
lambda: self.nodes[0].zgs_get_file_info_by_tx_seq(tx_seq)[
|
||||
"finalized"
|
||||
]
|
||||
)
|
||||
|
||||
# Send tx after uploading data
|
||||
for _ in range(same_root_tx_count):
|
||||
@ -57,7 +61,10 @@ class SubmissionTest(TestFramework):
|
||||
|
||||
client = self.nodes[node_idx]
|
||||
wait_until(lambda: client.zgs_get_file_info_by_tx_seq(tx_seq) is not None)
|
||||
wait_until(lambda: client.zgs_get_file_info_by_tx_seq(tx_seq)["finalized"] == data_finalized)
|
||||
wait_until(
|
||||
lambda: client.zgs_get_file_info_by_tx_seq(tx_seq)["finalized"]
|
||||
== data_finalized
|
||||
)
|
||||
|
||||
def submit_data(self, chunk_data, node_idx=0):
|
||||
_, data_root = create_submission(chunk_data)
|
||||
|
@ -18,30 +18,30 @@ class ShardSubmitTest(TestFramework):
|
||||
self.num_blockchain_nodes = 1
|
||||
self.num_nodes = 4
|
||||
self.zgs_node_configs[0] = {
|
||||
"db_max_num_sectors": 2 ** 30,
|
||||
"shard_position": "0/4"
|
||||
"db_max_num_sectors": 2**30,
|
||||
"shard_position": "0/4",
|
||||
}
|
||||
self.zgs_node_configs[1] = {
|
||||
"db_max_num_sectors": 2 ** 30,
|
||||
"shard_position": "1/4"
|
||||
"db_max_num_sectors": 2**30,
|
||||
"shard_position": "1/4",
|
||||
}
|
||||
self.zgs_node_configs[2] = {
|
||||
"db_max_num_sectors": 2 ** 30,
|
||||
"shard_position": "2/4"
|
||||
"db_max_num_sectors": 2**30,
|
||||
"shard_position": "2/4",
|
||||
}
|
||||
self.zgs_node_configs[3] = {
|
||||
"db_max_num_sectors": 2 ** 30,
|
||||
"shard_position": "3/4"
|
||||
"db_max_num_sectors": 2**30,
|
||||
"shard_position": "3/4",
|
||||
}
|
||||
|
||||
|
||||
def run_test(self):
|
||||
data_size = [
|
||||
256*960,
|
||||
256*1024,
|
||||
256 * 960,
|
||||
256 * 1024,
|
||||
2,
|
||||
255,
|
||||
256*960,
|
||||
256*120,
|
||||
256 * 960,
|
||||
256 * 120,
|
||||
256,
|
||||
257,
|
||||
1023,
|
||||
@ -77,5 +77,6 @@ class ShardSubmitTest(TestFramework):
|
||||
submit_data(client, chunk_data)
|
||||
wait_until(lambda: client.zgs_get_file_info(data_root)["finalized"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
ShardSubmitTest().main()
|
||||
|
@ -13,16 +13,16 @@ class PrunerTest(TestFramework):
|
||||
self.num_blockchain_nodes = 1
|
||||
self.num_nodes = 4
|
||||
self.zgs_node_configs[0] = {
|
||||
"db_max_num_sectors": 2 ** 30,
|
||||
"shard_position": "0/2"
|
||||
"db_max_num_sectors": 2**30,
|
||||
"shard_position": "0/2",
|
||||
}
|
||||
self.zgs_node_configs[1] = {
|
||||
"db_max_num_sectors": 2 ** 30,
|
||||
"shard_position": "1/2"
|
||||
"db_max_num_sectors": 2**30,
|
||||
"shard_position": "1/2",
|
||||
}
|
||||
self.zgs_node_configs[3] = {
|
||||
"db_max_num_sectors": 2 ** 30,
|
||||
"shard_position": "1/4"
|
||||
"db_max_num_sectors": 2**30,
|
||||
"shard_position": "1/4",
|
||||
}
|
||||
self.enable_market = True
|
||||
|
||||
@ -31,7 +31,10 @@ class PrunerTest(TestFramework):
|
||||
|
||||
chunk_data = b"\x02" * 8 * 256 * 1024
|
||||
submissions, data_root = create_submission(chunk_data)
|
||||
self.contract.submit(submissions, tx_prarams = {"value": int(len(chunk_data) / 256 * PRICE_PER_SECTOR * 1.1)})
|
||||
self.contract.submit(
|
||||
submissions,
|
||||
tx_prarams={"value": int(len(chunk_data) / 256 * PRICE_PER_SECTOR * 1.1)},
|
||||
)
|
||||
wait_until(lambda: self.contract.num_submissions() == 1)
|
||||
wait_until(lambda: client.zgs_get_file_info(data_root) is not None)
|
||||
|
||||
@ -57,10 +60,18 @@ class PrunerTest(TestFramework):
|
||||
for i in range(len(segments)):
|
||||
index_store = i % 2
|
||||
index_empty = 1 - i % 2
|
||||
seg0 = self.nodes[index_store].zgs_download_segment(data_root, i * 1024, (i + 1) * 1024)
|
||||
seg1 = self.nodes[index_empty].zgs_download_segment(data_root, i * 1024, (i + 1) * 1024)
|
||||
seg2 = self.nodes[2].zgs_download_segment(data_root, i * 1024, (i + 1) * 1024)
|
||||
seg3 = self.nodes[3].zgs_download_segment(data_root, i * 1024, (i + 1) * 1024)
|
||||
seg0 = self.nodes[index_store].zgs_download_segment(
|
||||
data_root, i * 1024, (i + 1) * 1024
|
||||
)
|
||||
seg1 = self.nodes[index_empty].zgs_download_segment(
|
||||
data_root, i * 1024, (i + 1) * 1024
|
||||
)
|
||||
seg2 = self.nodes[2].zgs_download_segment(
|
||||
data_root, i * 1024, (i + 1) * 1024
|
||||
)
|
||||
seg3 = self.nodes[3].zgs_download_segment(
|
||||
data_root, i * 1024, (i + 1) * 1024
|
||||
)
|
||||
# base64 encoding size
|
||||
assert_equal(len(seg0), 349528)
|
||||
assert_equal(seg1, None)
|
||||
|
@ -5,6 +5,7 @@ import shutil
|
||||
from test_framework.test_framework import TestFramework
|
||||
from utility.utils import wait_until
|
||||
|
||||
|
||||
class SnapshotTask(TestFramework):
|
||||
def setup_params(self):
|
||||
self.num_nodes = 2
|
||||
@ -28,11 +29,11 @@ class SnapshotTask(TestFramework):
|
||||
|
||||
# Start the last node to verify historical file sync
|
||||
self.nodes[1].shutdown()
|
||||
shutil.rmtree(os.path.join(self.nodes[1].data_dir, 'db/data_db'))
|
||||
|
||||
shutil.rmtree(os.path.join(self.nodes[1].data_dir, "db/data_db"))
|
||||
|
||||
self.start_storage_node(1)
|
||||
self.nodes[1].wait_for_rpc_connection()
|
||||
|
||||
|
||||
wait_until(lambda: self.nodes[1].zgs_get_file_info(data_root_1) is not None)
|
||||
wait_until(lambda: self.nodes[1].zgs_get_file_info(data_root_1)["finalized"])
|
||||
|
||||
|
@ -77,9 +77,7 @@ class SubmissionTest(TestFramework):
|
||||
continue
|
||||
|
||||
# Wait for log entry before file sync, otherwise, admin_startSyncFile will be failed.
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root) is not None
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root) is not None)
|
||||
|
||||
self.nodes[i].admin_start_sync_file(submission_index - 1)
|
||||
|
||||
@ -89,15 +87,11 @@ class SubmissionTest(TestFramework):
|
||||
)
|
||||
)
|
||||
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root)["finalized"]
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root)["finalized"])
|
||||
|
||||
assert_equal(
|
||||
base64.b64decode(
|
||||
self.nodes[i]
|
||||
.zgs_download_segment(data_root, 0, 1)
|
||||
.encode("utf-8")
|
||||
self.nodes[i].zgs_download_segment(data_root, 0, 1).encode("utf-8")
|
||||
),
|
||||
first_entry,
|
||||
)
|
||||
|
@ -3,19 +3,14 @@
|
||||
from test_framework.test_framework import TestFramework
|
||||
from utility.utils import wait_until
|
||||
|
||||
class AutoRandomSyncV2Test(TestFramework):
|
||||
|
||||
class AutoSyncHistoricalTest(TestFramework):
|
||||
def setup_params(self):
|
||||
self.num_nodes = 4
|
||||
|
||||
# Enable random auto sync v2
|
||||
# Enable auto sync
|
||||
for i in range(self.num_nodes):
|
||||
self.zgs_node_configs[i] = {
|
||||
"sync": {
|
||||
"auto_sync_enabled": True,
|
||||
"max_sequential_workers": 0,
|
||||
"max_random_workers": 3,
|
||||
}
|
||||
}
|
||||
self.zgs_node_configs[i] = {"sync": {"auto_sync_enabled": True}}
|
||||
|
||||
def run_test(self):
|
||||
# Stop the last node to verify historical file sync
|
||||
@ -28,17 +23,36 @@ class AutoRandomSyncV2Test(TestFramework):
|
||||
# Files should be available on other nodes via auto sync
|
||||
for i in range(1, self.num_nodes - 1):
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root_1) is not None)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root_1)["finalized"])
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root_1)["finalized"]
|
||||
)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root_2) is not None)
|
||||
wait_until(lambda: self.nodes[i].zgs_get_file_info(data_root_2)["finalized"])
|
||||
wait_until(
|
||||
lambda: self.nodes[i].zgs_get_file_info(data_root_2)["finalized"]
|
||||
)
|
||||
|
||||
# Start the last node to verify historical file sync
|
||||
self.start_storage_node(self.num_nodes - 1)
|
||||
self.nodes[self.num_nodes - 1].wait_for_rpc_connection()
|
||||
wait_until(lambda: self.nodes[self.num_nodes - 1].zgs_get_file_info(data_root_1) is not None)
|
||||
wait_until(lambda: self.nodes[self.num_nodes - 1].zgs_get_file_info(data_root_1)["finalized"])
|
||||
wait_until(lambda: self.nodes[self.num_nodes - 1].zgs_get_file_info(data_root_2) is not None)
|
||||
wait_until(lambda: self.nodes[self.num_nodes - 1].zgs_get_file_info(data_root_2)["finalized"])
|
||||
wait_until(
|
||||
lambda: self.nodes[self.num_nodes - 1].zgs_get_file_info(data_root_1)
|
||||
is not None
|
||||
)
|
||||
wait_until(
|
||||
lambda: self.nodes[self.num_nodes - 1].zgs_get_file_info(data_root_1)[
|
||||
"finalized"
|
||||
]
|
||||
)
|
||||
wait_until(
|
||||
lambda: self.nodes[self.num_nodes - 1].zgs_get_file_info(data_root_2)
|
||||
is not None
|
||||
)
|
||||
wait_until(
|
||||
lambda: self.nodes[self.num_nodes - 1].zgs_get_file_info(data_root_2)[
|
||||
"finalized"
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
AutoRandomSyncV2Test().main()
|
||||
AutoSyncHistoricalTest().main()
|
@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from test_framework.test_framework import TestFramework
|
||||
from utility.utils import wait_until
|
||||
|
||||
class AutoRandomSyncTest(TestFramework):
|
||||
def setup_params(self):
|
||||
self.num_nodes = 2
|
||||
|
||||
# Enable random auto sync only
|
||||
for i in range(self.num_nodes):
|
||||
self.zgs_node_configs[i] = {
|
||||
"sync": {
|
||||
"auto_sync_enabled": True,
|
||||
"max_sequential_workers": 0,
|
||||
"max_random_workers": 3,
|
||||
}
|
||||
}
|
||||
|
||||
def run_test(self):
|
||||
# Submit and upload files on node 0
|
||||
data_root_1 = self.__upload_file__(0, 256 * 1024)
|
||||
data_root_2 = self.__upload_file__(0, 256 * 1024)
|
||||
|
||||
# Files should be available on node 1 via auto sync
|
||||
wait_until(lambda: self.nodes[1].zgs_get_file_info(data_root_1) is not None)
|
||||
wait_until(lambda: self.nodes[1].zgs_get_file_info(data_root_1)["finalized"])
|
||||
wait_until(lambda: self.nodes[1].zgs_get_file_info(data_root_2) is not None)
|
||||
wait_until(lambda: self.nodes[1].zgs_get_file_info(data_root_2)["finalized"])
|
||||
|
||||
if __name__ == "__main__":
|
||||
AutoRandomSyncTest().main()
|
@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from test_framework.test_framework import TestFramework
|
||||
from utility.utils import wait_until
|
||||
|
||||
class AutoSequentialSyncTest(TestFramework):
|
||||
def setup_params(self):
|
||||
self.num_nodes = 2
|
||||
|
||||
# Enable sequential auto sync only
|
||||
for i in range(self.num_nodes):
|
||||
self.zgs_node_configs[i] = {
|
||||
"sync": {
|
||||
"auto_sync_enabled": True,
|
||||
"max_sequential_workers": 3,
|
||||
"max_random_workers": 0,
|
||||
}
|
||||
}
|
||||
|
||||
def run_test(self):
|
||||
# Submit and upload files on node 0
|
||||
data_root_1 = self.__upload_file__(0, 256 * 1024)
|
||||
data_root_2 = self.__upload_file__(0, 256 * 1024)
|
||||
|
||||
# Files should be available on node 1 via auto sync
|
||||
wait_until(lambda: self.nodes[1].zgs_get_file_info(data_root_1) is not None)
|
||||
wait_until(lambda: self.nodes[1].zgs_get_file_info(data_root_1)["finalized"])
|
||||
wait_until(lambda: self.nodes[1].zgs_get_file_info(data_root_2) is not None)
|
||||
wait_until(lambda: self.nodes[1].zgs_get_file_info(data_root_2)["finalized"])
|
||||
|
||||
if __name__ == "__main__":
|
||||
AutoSequentialSyncTest().main()
|
@ -3,19 +3,14 @@
|
||||
from test_framework.test_framework import TestFramework
|
||||
from utility.utils import wait_until
|
||||
|
||||
|
||||
class AutoSyncTest(TestFramework):
|
||||
def setup_params(self):
|
||||
self.num_nodes = 2
|
||||
|
||||
# Enable auto sync
|
||||
for i in range(self.num_nodes):
|
||||
self.zgs_node_configs[i] = {
|
||||
"sync": {
|
||||
"auto_sync_enabled": True,
|
||||
"max_sequential_workers": 3,
|
||||
"max_random_workers": 3,
|
||||
}
|
||||
}
|
||||
self.zgs_node_configs[i] = {"sync": {"auto_sync_enabled": True}}
|
||||
|
||||
def run_test(self):
|
||||
# Submit and upload files on node 0
|
||||
@ -28,5 +23,6 @@ class AutoSyncTest(TestFramework):
|
||||
wait_until(lambda: self.nodes[1].zgs_get_file_info(data_root_2) is not None)
|
||||
wait_until(lambda: self.nodes[1].zgs_get_file_info(data_root_2)["finalized"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
AutoSyncTest().main()
|
78
tests/sync_chunks_test.py
Normal file
78
tests/sync_chunks_test.py
Normal file
@ -0,0 +1,78 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import random
|
||||
|
||||
from test_framework.test_framework import TestFramework
|
||||
from utility.submission import data_to_segments
|
||||
from utility.utils import assert_equal, wait_until
|
||||
|
||||
|
||||
class SyncChunksTest(TestFramework):
|
||||
"""
|
||||
By default, auto_sync_enabled and sync_file_on_announcement_enabled are both false,
|
||||
and chunks sync should be triggered by rpc.
|
||||
"""
|
||||
|
||||
def setup_params(self):
|
||||
self.num_nodes = 2
|
||||
|
||||
# enable find chunks topic
|
||||
for i in range(self.num_nodes):
|
||||
self.zgs_node_configs[i] = {"network_find_chunks_enabled": True}
|
||||
|
||||
def run_test(self):
|
||||
client1 = self.nodes[0]
|
||||
client2 = self.nodes[1]
|
||||
|
||||
# Prepare 3 segments to upload
|
||||
chunk_data = random.randbytes(256 * 1024 * 3)
|
||||
data_root = self.__submit_file__(chunk_data)
|
||||
|
||||
# Ensure log entry sync from blockchain node
|
||||
wait_until(lambda: client1.zgs_get_file_info(data_root) is not None)
|
||||
assert_equal(client1.zgs_get_file_info(data_root)["finalized"], False)
|
||||
|
||||
# Upload only 2nd segment to storage node
|
||||
segments = data_to_segments(chunk_data)
|
||||
self.log.info(
|
||||
"segments: %s", [(s["root"], s["index"], s["proof"]) for s in segments]
|
||||
)
|
||||
assert client1.zgs_upload_segment(segments[1]) is None
|
||||
|
||||
# segment 0 is not able to download
|
||||
assert client1.zgs_download_segment_decoded(data_root, 0, 1024) is None
|
||||
# segment 1 is available to download
|
||||
assert_equal(
|
||||
client1.zgs_download_segment_decoded(data_root, 1024, 2048),
|
||||
chunk_data[1024 * 256 : 2048 * 256],
|
||||
)
|
||||
# segment 2 is not able to download
|
||||
assert client1.zgs_download_segment_decoded(data_root, 2048, 3072) is None
|
||||
|
||||
# Segment 1 should not be able to download on node 2
|
||||
wait_until(lambda: client2.zgs_get_file_info(data_root) is not None)
|
||||
assert_equal(client2.zgs_get_file_info(data_root)["finalized"], False)
|
||||
assert client2.zgs_download_segment_decoded(data_root, 1024, 2048) is None
|
||||
|
||||
# Restart node 1 to check if the proof nodes are persisted.
|
||||
self.stop_storage_node(0)
|
||||
self.start_storage_node(0)
|
||||
self.nodes[0].wait_for_rpc_connection()
|
||||
|
||||
# Trigger chunks sync by rpc
|
||||
assert client2.admin_start_sync_chunks(0, 1024, 2048) is None
|
||||
wait_until(lambda: client2.sync_status_is_completed_or_unknown(0))
|
||||
wait_until(
|
||||
lambda: client2.zgs_download_segment_decoded(data_root, 1024, 2048)
|
||||
is not None
|
||||
)
|
||||
|
||||
# Validate data
|
||||
assert_equal(
|
||||
client2.zgs_download_segment_decoded(data_root, 1024, 2048),
|
||||
chunk_data[1024 * 256 : 2048 * 256],
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
SyncChunksTest().main()
|
53
tests/sync_file_test.py
Normal file
53
tests/sync_file_test.py
Normal file
@ -0,0 +1,53 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import time
|
||||
|
||||
from test_framework.test_framework import TestFramework
|
||||
from utility.utils import assert_equal, wait_until
|
||||
|
||||
|
||||
class SyncFileTest(TestFramework):
|
||||
"""
|
||||
By default, auto_sync_enabled and sync_file_on_announcement_enabled are both false,
|
||||
and file sync should be triggered by rpc.
|
||||
"""
|
||||
|
||||
def setup_params(self):
|
||||
self.num_nodes = 2
|
||||
|
||||
def run_test(self):
|
||||
client1 = self.nodes[0]
|
||||
client2 = self.nodes[1]
|
||||
|
||||
# stop client2, preventing it from receiving AnnounceFile
|
||||
client2.shutdown()
|
||||
|
||||
data_root = self.__upload_file__(0, 256 * 1024)
|
||||
|
||||
# restart client2
|
||||
client2.start()
|
||||
client2.wait_for_rpc_connection()
|
||||
|
||||
# File should not be auto sync on node 2 and there is no cached file locations
|
||||
wait_until(lambda: client2.zgs_get_file_info(data_root) is not None)
|
||||
time.sleep(3)
|
||||
assert_equal(client2.zgs_get_file_info(data_root)["finalized"], False)
|
||||
# file sync use ASK_FILE & ANSWER FILE protocol, and do not cache file announcement anymore.
|
||||
# assert(client2.admin_get_file_location(0) is None)
|
||||
|
||||
# Trigger file sync by rpc
|
||||
assert client2.admin_start_sync_file(0) is None
|
||||
wait_until(lambda: client2.sync_status_is_completed_or_unknown(0))
|
||||
wait_until(lambda: client2.zgs_get_file_info(data_root)["finalized"])
|
||||
# file sync use ASK_FILE & ANSWER FILE protocol, and do not cache file announcement anymore.
|
||||
# assert(client2.admin_get_file_location(0) is not None)
|
||||
|
||||
# Validate data
|
||||
assert_equal(
|
||||
client2.zgs_download_segment(data_root, 0, 1024),
|
||||
client1.zgs_download_segment(data_root, 0, 1024),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
SyncFileTest().main()
|
@ -1,109 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import random
|
||||
import time
|
||||
|
||||
from test_framework.test_framework import TestFramework
|
||||
from utility.submission import data_to_segments
|
||||
from utility.utils import (
|
||||
assert_equal,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
class SyncTest(TestFramework):
|
||||
def setup_params(self):
|
||||
self.num_nodes = 2
|
||||
|
||||
# enable find chunks topic
|
||||
for i in range(self.num_nodes):
|
||||
self.zgs_node_configs[i] = {
|
||||
"network_find_chunks_enabled": True
|
||||
}
|
||||
|
||||
def run_test(self):
|
||||
# By default, auto_sync_enabled and sync_file_on_announcement_enabled are both false,
|
||||
# and file or chunks sync should be triggered by rpc.
|
||||
self.__test_sync_file_by_rpc()
|
||||
self.__test_sync_chunks_by_rpc()
|
||||
|
||||
def __test_sync_file_by_rpc(self):
|
||||
self.log.info("Begin to test file sync by rpc")
|
||||
|
||||
client1 = self.nodes[0]
|
||||
client2 = self.nodes[1]
|
||||
|
||||
# stop client2, preventing it from receiving AnnounceFile
|
||||
client2.shutdown()
|
||||
|
||||
data_root = self.__upload_file__(0, 256 * 1024)
|
||||
|
||||
# restart client2
|
||||
client2.start()
|
||||
client2.wait_for_rpc_connection()
|
||||
|
||||
# File should not be auto sync on node 2 and there is no cached file locations
|
||||
wait_until(lambda: client2.zgs_get_file_info(data_root) is not None)
|
||||
time.sleep(3)
|
||||
assert_equal(client2.zgs_get_file_info(data_root)["finalized"], False)
|
||||
# file sync use ASK_FILE & ANSWER FILE protocol, and do not cache file announcement anymore.
|
||||
# assert(client2.admin_get_file_location(0) is None)
|
||||
|
||||
# Trigger file sync by rpc
|
||||
assert(client2.admin_start_sync_file(0) is None)
|
||||
wait_until(lambda: client2.sync_status_is_completed_or_unknown(0))
|
||||
wait_until(lambda: client2.zgs_get_file_info(data_root)["finalized"])
|
||||
# file sync use ASK_FILE & ANSWER FILE protocol, and do not cache file announcement anymore.
|
||||
# assert(client2.admin_get_file_location(0) is not None)
|
||||
|
||||
# Validate data
|
||||
assert_equal(
|
||||
client2.zgs_download_segment(data_root, 0, 1024),
|
||||
client1.zgs_download_segment(data_root, 0, 1024),
|
||||
)
|
||||
|
||||
def __test_sync_chunks_by_rpc(self):
|
||||
self.log.info("Begin to test chunks sync by rpc")
|
||||
|
||||
client1 = self.nodes[0]
|
||||
client2 = self.nodes[1]
|
||||
|
||||
# Prepare 3 segments to upload
|
||||
chunk_data = random.randbytes(256 * 1024 * 3)
|
||||
data_root = self.__submit_file__(chunk_data)
|
||||
|
||||
# Ensure log entry sync from blockchain node
|
||||
wait_until(lambda: client1.zgs_get_file_info(data_root) is not None)
|
||||
assert_equal(client1.zgs_get_file_info(data_root)["finalized"], False)
|
||||
|
||||
# Upload only 2nd segment to storage node
|
||||
segments = data_to_segments(chunk_data)
|
||||
self.log.info("segments: %s", [(s["root"], s["index"], s["proof"]) for s in segments])
|
||||
assert(client1.zgs_upload_segment(segments[1]) is None)
|
||||
|
||||
# segment 0 is not able to download
|
||||
assert(client1.zgs_download_segment_decoded(data_root, 0, 1024) is None)
|
||||
# segment 1 is available to download
|
||||
assert_equal(client1.zgs_download_segment_decoded(data_root, 1024, 2048), chunk_data[1024*256:2048*256])
|
||||
# segment 2 is not able to download
|
||||
assert(client1.zgs_download_segment_decoded(data_root, 2048, 3072) is None)
|
||||
|
||||
# Segment 1 should not be able to download on node 2
|
||||
wait_until(lambda: client2.zgs_get_file_info(data_root) is not None)
|
||||
assert_equal(client2.zgs_get_file_info(data_root)["finalized"], False)
|
||||
assert(client2.zgs_download_segment_decoded(data_root, 1024, 2048) is None)
|
||||
|
||||
# Restart node 1 to check if the proof nodes are persisted.
|
||||
self.stop_storage_node(0)
|
||||
self.start_storage_node(0)
|
||||
self.nodes[0].wait_for_rpc_connection()
|
||||
|
||||
# Trigger chunks sync by rpc
|
||||
assert(client2.admin_start_sync_chunks(1, 1024, 2048) is None)
|
||||
wait_until(lambda: client2.sync_status_is_completed_or_unknown(1))
|
||||
wait_until(lambda: client2.zgs_download_segment_decoded(data_root, 1024, 2048) is not None)
|
||||
|
||||
# Validate data
|
||||
assert_equal(client2.zgs_download_segment_decoded(data_root, 1024, 2048), chunk_data[1024*256:2048*256])
|
||||
|
||||
if __name__ == "__main__":
|
||||
SyncTest().main()
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user