From 24d41f32b2a57323b36092fb407d1f164806d646 Mon Sep 17 00:00:00 2001 From: boqiu <82121246@qq.com> Date: Tue, 20 Aug 2024 16:12:34 +0800 Subject: [PATCH 1/7] add metrics in log sync package --- Cargo.lock | 2 ++ node/log_entry_sync/Cargo.toml | 4 +++- node/log_entry_sync/src/sync_manager/metrics.rs | 7 +++++++ node/log_entry_sync/src/sync_manager/mod.rs | 9 +++++++-- 4 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 node/log_entry_sync/src/sync_manager/metrics.rs diff --git a/Cargo.lock b/Cargo.lock index 714294b..3a85a3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4538,6 +4538,8 @@ dependencies = [ "futures-core", "futures-util", "jsonrpsee", + "lazy_static", + "metrics", "serde_json", "shared_types", "storage", diff --git a/node/log_entry_sync/Cargo.toml b/node/log_entry_sync/Cargo.toml index c5b90e2..72651f8 100644 --- a/node/log_entry_sync/Cargo.toml +++ b/node/log_entry_sync/Cargo.toml @@ -21,4 +21,6 @@ storage = { path = "../storage" } contract-interface = { path = "../../common/contract-interface" } futures-core = "0.3.28" futures-util = "0.3.28" -thiserror = "1.0.44" \ No newline at end of file +thiserror = "1.0.44" +lazy_static = "1.4.0" +metrics = { workspace = true } diff --git a/node/log_entry_sync/src/sync_manager/metrics.rs b/node/log_entry_sync/src/sync_manager/metrics.rs new file mode 100644 index 0000000..37bee24 --- /dev/null +++ b/node/log_entry_sync/src/sync_manager/metrics.rs @@ -0,0 +1,7 @@ +use std::sync::Arc; + +use metrics::{register_timer, Timer}; + +lazy_static::lazy_static! { + pub static ref STORE_PUT_TX: Arc = register_timer("log_entry_sync_store_put_tx"); +} diff --git a/node/log_entry_sync/src/sync_manager/mod.rs b/node/log_entry_sync/src/sync_manager/mod.rs index 0a07f40..c561676 100644 --- a/node/log_entry_sync/src/sync_manager/mod.rs +++ b/node/log_entry_sync/src/sync_manager/mod.rs @@ -11,7 +11,7 @@ use std::collections::BTreeMap; use std::fmt::Debug; use std::future::Future; use std::sync::Arc; -use std::time::Duration; +use std::time::{Duration, Instant}; use storage::log_store::{tx_store::BlockHashAndSubmissionIndex, Store}; use task_executor::{ShutdownReason, TaskExecutor}; use tokio::sync::broadcast; @@ -358,7 +358,11 @@ impl LogSyncManager { } async fn put_tx_inner(&mut self, tx: Transaction) -> bool { - if let Err(e) = self.store.put_tx(tx.clone()) { + let start_time = Instant::now(); + let result = self.store.put_tx(tx.clone()); + metrics::STORE_PUT_TX.update_since(start_time); + + if let Err(e) = result { error!("put_tx error: e={:?}", e); false } else { @@ -458,3 +462,4 @@ pub(crate) mod config; mod data_cache; mod log_entry_fetcher; mod log_query; +mod metrics; From 47a0e776bd301d36d3886ca988789ef830b28a2c Mon Sep 17 00:00:00 2001 From: boqiu <82121246@qq.com> Date: Tue, 20 Aug 2024 16:20:28 +0800 Subject: [PATCH 2/7] udpate auto sync metrics --- node/sync/src/auto_sync/batcher_random.rs | 2 +- node/sync/src/auto_sync/batcher_serial.rs | 2 +- node/sync/src/auto_sync/metrics.rs | 8 +++----- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/node/sync/src/auto_sync/batcher_random.rs b/node/sync/src/auto_sync/batcher_random.rs index 2578e2b..496b0ab 100644 --- a/node/sync/src/auto_sync/batcher_random.rs +++ b/node/sync/src/auto_sync/batcher_random.rs @@ -96,7 +96,7 @@ impl RandomBatcher { debug!(%tx_seq, ?sync_result, "Completed to sync file, state = {:?}", self.get_state().await); match sync_result { - SyncResult::Completed => metrics::RANDOM_SYNC_RESULT_COMPLETED.inc(1), + SyncResult::Completed => metrics::RANDOM_SYNC_RESULT_COMPLETED.mark(1), SyncResult::Failed => metrics::RANDOM_SYNC_RESULT_FAILED.inc(1), SyncResult::Timeout => metrics::RANDOM_SYNC_RESULT_TIMEOUT.inc(1), } diff --git a/node/sync/src/auto_sync/batcher_serial.rs b/node/sync/src/auto_sync/batcher_serial.rs index ff347ad..9322c72 100644 --- a/node/sync/src/auto_sync/batcher_serial.rs +++ b/node/sync/src/auto_sync/batcher_serial.rs @@ -257,7 +257,7 @@ impl SerialBatcher { info!(%tx_seq, ?sync_result, "Completed to sync file, state = {:?}", self.get_state().await); match sync_result { - SyncResult::Completed => metrics::SEQUENTIAL_SYNC_RESULT_COMPLETED.inc(1), + SyncResult::Completed => metrics::SEQUENTIAL_SYNC_RESULT_COMPLETED.mark(1), SyncResult::Failed => metrics::SEQUENTIAL_SYNC_RESULT_FAILED.inc(1), SyncResult::Timeout => metrics::SEQUENTIAL_SYNC_RESULT_TIMEOUT.inc(1), } diff --git a/node/sync/src/auto_sync/metrics.rs b/node/sync/src/auto_sync/metrics.rs index 39d9394..61d9c0f 100644 --- a/node/sync/src/auto_sync/metrics.rs +++ b/node/sync/src/auto_sync/metrics.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use metrics::{Counter, CounterUsize, Gauge, GaugeUsize, Histogram, Sample}; +use metrics::{register_meter, Counter, CounterUsize, Gauge, GaugeUsize, Histogram, Meter, Sample}; lazy_static::lazy_static! { // sequential auto sync @@ -9,8 +9,7 @@ lazy_static::lazy_static! { pub static ref SEQUENTIAL_STATE_TXS_PENDING: Arc = Sample::ExpDecay(0.015).register("sync_auto_sequential_state_txs_pending", 1024); pub static ref SEQUENTIAL_STATE_GAP_NEXT_DB: Arc> = GaugeUsize::register("sync_auto_sequential_state_gap_next_db"); - pub static ref SEQUENTIAL_SYNC_RESULT_TOTAL: Arc> = CounterUsize::register("sync_auto_sequential_sync_result_total"); - pub static ref SEQUENTIAL_SYNC_RESULT_COMPLETED: Arc> = CounterUsize::register("sync_auto_sequential_sync_result_completed"); + pub static ref SEQUENTIAL_SYNC_RESULT_COMPLETED: Arc = register_meter("sync_auto_sequential_sync_result_completed"); pub static ref SEQUENTIAL_SYNC_RESULT_FAILED: Arc> = CounterUsize::register("sync_auto_sequential_sync_result_failed"); pub static ref SEQUENTIAL_SYNC_RESULT_TIMEOUT: Arc> = CounterUsize::register("sync_auto_sequential_sync_result_timeout"); @@ -19,8 +18,7 @@ lazy_static::lazy_static! { pub static ref RANDOM_STATE_TXS_READY: Arc = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_ready", 1024); pub static ref RANDOM_STATE_TXS_PENDING: Arc = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_pending", 1024); - pub static ref RANDOM_SYNC_RESULT_TOTAL: Arc> = CounterUsize::register("sync_auto_random_sync_result_total"); - pub static ref RANDOM_SYNC_RESULT_COMPLETED: Arc> = CounterUsize::register("sync_auto_random_sync_result_completed"); + pub static ref RANDOM_SYNC_RESULT_COMPLETED: Arc = register_meter("sync_auto_random_sync_result_completed"); pub static ref RANDOM_SYNC_RESULT_FAILED: Arc> = CounterUsize::register("sync_auto_random_sync_result_failed"); pub static ref RANDOM_SYNC_RESULT_TIMEOUT: Arc> = CounterUsize::register("sync_auto_random_sync_result_timeout"); } From 914fc785debf080093822de768b423a1f86ce42c Mon Sep 17 00:00:00 2001 From: boqiu <82121246@qq.com> Date: Tue, 20 Aug 2024 17:32:03 +0800 Subject: [PATCH 3/7] Add metrics for completed file sync --- node/sync/src/controllers/metrics.rs | 8 ++++++++ node/sync/src/controllers/mod.rs | 1 + node/sync/src/controllers/serial.rs | 11 ++++++----- node/sync/src/lib.rs | 8 ++++---- 4 files changed, 19 insertions(+), 9 deletions(-) create mode 100644 node/sync/src/controllers/metrics.rs diff --git a/node/sync/src/controllers/metrics.rs b/node/sync/src/controllers/metrics.rs new file mode 100644 index 0000000..85e231f --- /dev/null +++ b/node/sync/src/controllers/metrics.rs @@ -0,0 +1,8 @@ +use std::sync::Arc; + +use metrics::{register_timer, Timer}; + +lazy_static::lazy_static! { + pub static ref SERIAL_SYNC_FILE_COMPLETED: Arc = register_timer("sync_controllers_serial_sync_file_completed"); + pub static ref SERIAL_SYNC_CHUNKS_COMPLETED: Arc = register_timer("sync_controllers_serial_sync_chunks_completed"); +} diff --git a/node/sync/src/controllers/mod.rs b/node/sync/src/controllers/mod.rs index ad2374f..0ade410 100644 --- a/node/sync/src/controllers/mod.rs +++ b/node/sync/src/controllers/mod.rs @@ -1,3 +1,4 @@ +mod metrics; mod peers; mod serial; diff --git a/node/sync/src/controllers/serial.rs b/node/sync/src/controllers/serial.rs index 91937ed..cb4f46f 100644 --- a/node/sync/src/controllers/serial.rs +++ b/node/sync/src/controllers/serial.rs @@ -1,6 +1,6 @@ use crate::context::SyncNetworkContext; use crate::controllers::peers::{PeerState, SyncPeers}; -use crate::controllers::{FileSyncGoal, FileSyncInfo}; +use crate::controllers::{metrics, FileSyncGoal, FileSyncInfo}; use crate::{Config, InstantWrapper}; use file_location_cache::FileLocationCache; use libp2p::swarm::DialError; @@ -311,15 +311,15 @@ impl SerialSyncController { .peers .add_new_peer_with_config(peer_id, addr.clone(), shard_config) { - info!(%self.tx_seq, %peer_id, %addr, "Found new peer"); + debug!(%self.tx_seq, %peer_id, %addr, "Found new peer"); true } else { // e.g. multiple `AnnounceFile` messages propagated - debug!(%self.tx_seq, %peer_id, %addr, "Found an existing peer"); + trace!(%self.tx_seq, %peer_id, %addr, "Found an existing peer"); false } } else { - debug!(%self.tx_seq, %peer_id, %addr, "Found peer without shard config"); + info!(%self.tx_seq, %peer_id, %addr, "Found peer without shard config"); false } } @@ -406,7 +406,6 @@ impl SerialSyncController { } pub async fn on_response(&mut self, from_peer_id: PeerId, response: ChunkArrayWithProof) { - debug!(%self.tx_seq, %from_peer_id, "Received RPC response"); if self.handle_on_response_mismatch(from_peer_id) { return; } @@ -511,6 +510,7 @@ impl SerialSyncController { // completed to download chunks if !self.goal.is_all_chunks() { self.state = SyncState::Completed; + metrics::SERIAL_SYNC_CHUNKS_COMPLETED.update_since(self.since.0); return; } @@ -523,6 +523,7 @@ impl SerialSyncController { Ok(true) => { info!(%self.tx_seq, "Succeeded to finalize file"); self.state = SyncState::Completed; + metrics::SERIAL_SYNC_FILE_COMPLETED.update_since(self.since.0); } Ok(false) => { warn!(?self.tx_id, %self.tx_seq, "Transaction reverted during finalize_tx"); diff --git a/node/sync/src/lib.rs b/node/sync/src/lib.rs index 3d53a1d..f2250ca 100644 --- a/node/sync/src/lib.rs +++ b/node/sync/src/lib.rs @@ -68,10 +68,10 @@ impl Default for Config { // serial sync config max_chunks_to_request: 2 * 1024, max_request_failures: 5, - peer_connect_timeout: Duration::from_secs(5), - peer_disconnect_timeout: Duration::from_secs(5), - peer_find_timeout: Duration::from_secs(5), - peer_chunks_download_timeout: Duration::from_secs(5), + peer_connect_timeout: Duration::from_secs(15), + peer_disconnect_timeout: Duration::from_secs(15), + peer_find_timeout: Duration::from_secs(30), + peer_chunks_download_timeout: Duration::from_secs(15), peer_wait_outgoing_connection_timeout: Duration::from_secs(10), peer_next_chunks_request_wait_timeout: Duration::from_secs(3), From 4aae302df58418b773bfae6fe34becda38e92934 Mon Sep 17 00:00:00 2001 From: boqiu <82121246@qq.com> Date: Tue, 20 Aug 2024 18:50:17 +0800 Subject: [PATCH 4/7] add more metrics for serial file sync --- node/sync/src/controllers/metrics.rs | 4 +++- node/sync/src/controllers/serial.rs | 8 ++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/node/sync/src/controllers/metrics.rs b/node/sync/src/controllers/metrics.rs index 85e231f..4e6df22 100644 --- a/node/sync/src/controllers/metrics.rs +++ b/node/sync/src/controllers/metrics.rs @@ -1,8 +1,10 @@ use std::sync::Arc; -use metrics::{register_timer, Timer}; +use metrics::{register_timer, Counter, CounterUsize, Histogram, Sample, Timer}; lazy_static::lazy_static! { pub static ref SERIAL_SYNC_FILE_COMPLETED: Arc = register_timer("sync_controllers_serial_sync_file_completed"); pub static ref SERIAL_SYNC_CHUNKS_COMPLETED: Arc = register_timer("sync_controllers_serial_sync_chunks_completed"); + pub static ref SERIAL_SYNC_SEGMENT_LATENCY: Arc = Sample::ExpDecay(0.015).register("sync_controllers_serial_sync_segment_latency", 1024); + pub static ref SERIAL_SYNC_UNEXPECTED_ERRORS: Arc> = CounterUsize::register("sync_controllers_serial_sync_unexpected_errors"); } diff --git a/node/sync/src/controllers/serial.rs b/node/sync/src/controllers/serial.rs index cb4f46f..dd23e70 100644 --- a/node/sync/src/controllers/serial.rs +++ b/node/sync/src/controllers/serial.rs @@ -428,6 +428,7 @@ impl SerialSyncController { let data_len = response.chunks.data.len(); if data_len == 0 || data_len % CHUNK_SIZE > 0 { warn!(%from_peer_id, %self.tx_seq, %data_len, "Invalid chunk response data length"); + metrics::SERIAL_SYNC_UNEXPECTED_ERRORS.inc(1); self.ban_peer(from_peer_id, "Invalid chunk response data length"); self.state = SyncState::Idle; return; @@ -465,6 +466,7 @@ impl SerialSyncController { } Err(err) => { warn!(%err, %self.tx_seq, "Failed to validate chunks response"); + metrics::SERIAL_SYNC_UNEXPECTED_ERRORS.inc(1); self.ban_peer(from_peer_id, "Chunk array validation failed"); self.state = SyncState::Idle; return; @@ -473,6 +475,8 @@ impl SerialSyncController { self.failures = 0; + metrics::SERIAL_SYNC_SEGMENT_LATENCY.update_since(since.0); + let shard_config = self.store.get_store().flow().get_shard_config(); let next_chunk = shard_config.next_segment_index( (from_chunk / PORA_CHUNK_SIZE as u64) as usize, @@ -487,6 +491,7 @@ impl SerialSyncController { Ok(true) => self.next_chunk = next_chunk as u64, Ok(false) => { warn!(%self.tx_seq, ?self.tx_id, "Transaction reverted while storing chunks"); + metrics::SERIAL_SYNC_UNEXPECTED_ERRORS.inc(1); self.state = SyncState::Failed { reason: FailureReason::TxReverted(self.tx_id), }; @@ -494,6 +499,7 @@ impl SerialSyncController { } Err(err) => { error!(%err, %self.tx_seq, "Unexpected DB error while storing chunks"); + metrics::SERIAL_SYNC_UNEXPECTED_ERRORS.inc(1); self.state = SyncState::Failed { reason: FailureReason::DBError(err.to_string()), }; @@ -527,12 +533,14 @@ impl SerialSyncController { } Ok(false) => { warn!(?self.tx_id, %self.tx_seq, "Transaction reverted during finalize_tx"); + metrics::SERIAL_SYNC_UNEXPECTED_ERRORS.inc(1); self.state = SyncState::Failed { reason: FailureReason::TxReverted(self.tx_id), }; } Err(err) => { error!(%err, %self.tx_seq, "Unexpected error during finalize_tx"); + metrics::SERIAL_SYNC_UNEXPECTED_ERRORS.inc(1); self.state = SyncState::Failed { reason: FailureReason::DBError(err.to_string()), }; From eb521fc24b3ef6e1fdfef1dfcef970eddf76aa05 Mon Sep 17 00:00:00 2001 From: boqiu <82121246@qq.com> Date: Wed, 21 Aug 2024 11:29:28 +0800 Subject: [PATCH 5/7] adjust default timeout value for auto sync --- node/sync/src/auto_sync/batcher.rs | 19 ++++++++++++------- node/sync/src/auto_sync/batcher_random.rs | 15 +++++++++++---- node/sync/src/auto_sync/batcher_serial.rs | 17 +++++++++++------ node/sync/src/lib.rs | 13 ++++++++----- run/config-testnet-standard.toml | 12 ++++-------- run/config-testnet-turbo.toml | 12 ++++-------- run/config.toml | 12 ++++-------- 7 files changed, 54 insertions(+), 46 deletions(-) diff --git a/node/sync/src/auto_sync/batcher.rs b/node/sync/src/auto_sync/batcher.rs index 0107413..ab1f9fd 100644 --- a/node/sync/src/auto_sync/batcher.rs +++ b/node/sync/src/auto_sync/batcher.rs @@ -1,7 +1,7 @@ -use crate::{controllers::SyncState, Config, SyncRequest, SyncResponse, SyncSender}; +use crate::{controllers::SyncState, SyncRequest, SyncResponse, SyncSender}; use anyhow::{bail, Result}; use serde::{Deserialize, Serialize}; -use std::{collections::HashSet, fmt::Debug, sync::Arc}; +use std::{collections::HashSet, fmt::Debug, sync::Arc, time::Duration}; use storage_async::Store; use tokio::sync::RwLock; @@ -15,18 +15,23 @@ pub enum SyncResult { /// Supports to sync files concurrently. #[derive(Clone)] pub struct Batcher { - pub(crate) config: Config, capacity: usize, + find_peer_timeout: Duration, tasks: Arc>>, // files to sync store: Store, sync_send: SyncSender, } impl Batcher { - pub fn new(config: Config, capacity: usize, store: Store, sync_send: SyncSender) -> Self { + pub fn new( + capacity: usize, + find_peer_timeout: Duration, + store: Store, + sync_send: SyncSender, + ) -> Self { Self { - config, capacity, + find_peer_timeout, tasks: Default::default(), store, sync_send, @@ -128,7 +133,7 @@ impl Batcher { // finding peers timeout Some(SyncState::FindingPeers { origin, .. }) - if origin.elapsed() > self.config.find_peer_timeout => + if origin.elapsed() > self.find_peer_timeout => { debug!(%tx_seq, "Terminate file sync due to finding peers timeout"); self.terminate_file_sync(tx_seq, false).await; @@ -137,7 +142,7 @@ impl Batcher { // connecting peers timeout Some(SyncState::ConnectingPeers { origin, .. }) - if origin.elapsed() > self.config.find_peer_timeout => + if origin.elapsed() > self.find_peer_timeout => { debug!(%tx_seq, "Terminate file sync due to connecting peers timeout"); self.terminate_file_sync(tx_seq, false).await; diff --git a/node/sync/src/auto_sync/batcher_random.rs b/node/sync/src/auto_sync/batcher_random.rs index 496b0ab..a070eb1 100644 --- a/node/sync/src/auto_sync/batcher_random.rs +++ b/node/sync/src/auto_sync/batcher_random.rs @@ -22,6 +22,7 @@ pub struct RandomBatcherState { #[derive(Clone)] pub struct RandomBatcher { + config: Config, batcher: Batcher, sync_store: Arc, } @@ -34,7 +35,13 @@ impl RandomBatcher { sync_store: Arc, ) -> Self { Self { - batcher: Batcher::new(config, config.max_random_workers, store, sync_send), + config, + batcher: Batcher::new( + config.max_random_workers, + config.random_find_peer_timeout, + store, + sync_send, + ), sync_store, } } @@ -56,7 +63,7 @@ impl RandomBatcher { // disable file sync until catched up if !catched_up.load(Ordering::Relaxed) { trace!("Cannot sync file in catch-up phase"); - sleep(self.batcher.config.auto_sync_idle_interval).await; + sleep(self.config.auto_sync_idle_interval).await; continue; } @@ -73,11 +80,11 @@ impl RandomBatcher { "File sync still in progress or idle, state = {:?}", self.get_state().await ); - sleep(self.batcher.config.auto_sync_idle_interval).await; + sleep(self.config.auto_sync_idle_interval).await; } Err(err) => { warn!(%err, "Failed to sync file once, state = {:?}", self.get_state().await); - sleep(self.batcher.config.auto_sync_error_interval).await; + sleep(self.config.auto_sync_error_interval).await; } } } diff --git a/node/sync/src/auto_sync/batcher_serial.rs b/node/sync/src/auto_sync/batcher_serial.rs index 9322c72..e4a3986 100644 --- a/node/sync/src/auto_sync/batcher_serial.rs +++ b/node/sync/src/auto_sync/batcher_serial.rs @@ -23,6 +23,7 @@ use tokio::{ /// Supports to sync files in sequence concurrently. #[derive(Clone)] pub struct SerialBatcher { + config: Config, batcher: Batcher, /// Next tx seq to sync. @@ -80,13 +81,17 @@ impl SerialBatcher { sync_send: SyncSender, sync_store: Arc, ) -> Result { - let capacity = config.max_sequential_workers; - // continue file sync from break point in db let (next_tx_seq, max_tx_seq) = sync_store.get_tx_seq_range().await?; Ok(Self { - batcher: Batcher::new(config, capacity, store, sync_send), + config, + batcher: Batcher::new( + config.max_sequential_workers, + config.sequential_find_peer_timeout, + store, + sync_send, + ), next_tx_seq: Arc::new(AtomicU64::new(next_tx_seq.unwrap_or(0))), max_tx_seq: Arc::new(AtomicU64::new(max_tx_seq.unwrap_or(u64::MAX))), pending_completed_txs: Default::default(), @@ -136,7 +141,7 @@ impl SerialBatcher { // disable file sync until catched up if !catched_up.load(Ordering::Relaxed) { trace!("Cannot sync file in catch-up phase"); - sleep(self.batcher.config.auto_sync_idle_interval).await; + sleep(self.config.auto_sync_idle_interval).await; continue; } @@ -157,11 +162,11 @@ impl SerialBatcher { "File sync still in progress or idle, state = {:?}", self.get_state().await ); - sleep(self.batcher.config.auto_sync_idle_interval).await; + sleep(self.config.auto_sync_idle_interval).await; } Err(err) => { warn!(%err, "Failed to sync file once, state = {:?}", self.get_state().await); - sleep(self.batcher.config.auto_sync_error_interval).await; + sleep(self.config.auto_sync_error_interval).await; } } } diff --git a/node/sync/src/lib.rs b/node/sync/src/lib.rs index f2250ca..ae25d49 100644 --- a/node/sync/src/lib.rs +++ b/node/sync/src/lib.rs @@ -52,7 +52,9 @@ pub struct Config { pub max_sequential_workers: usize, pub max_random_workers: usize, #[serde(deserialize_with = "deserialize_duration")] - pub find_peer_timeout: Duration, + pub sequential_find_peer_timeout: Duration, + #[serde(deserialize_with = "deserialize_duration")] + pub random_find_peer_timeout: Duration, } impl Default for Config { @@ -61,7 +63,7 @@ impl Default for Config { // sync service config heartbeat_interval: Duration::from_secs(5), auto_sync_enabled: false, - max_sync_files: 16, + max_sync_files: 32, sync_file_by_rpc_enabled: true, sync_file_on_announcement_enabled: false, @@ -78,9 +80,10 @@ impl Default for Config { // auto sync config auto_sync_idle_interval: Duration::from_secs(3), auto_sync_error_interval: Duration::from_secs(10), - max_sequential_workers: 8, - max_random_workers: 4, - find_peer_timeout: Duration::from_secs(10), + max_sequential_workers: 24, + max_random_workers: 8, + sequential_find_peer_timeout: Duration::from_secs(60), + random_find_peer_timeout: Duration::from_secs(500), } } } diff --git a/run/config-testnet-standard.toml b/run/config-testnet-standard.toml index 57fb869..d201c3a 100644 --- a/run/config-testnet-standard.toml +++ b/run/config-testnet-standard.toml @@ -228,11 +228,7 @@ reward_contract_address = "0x0496D0817BD8519e0de4894Dc379D35c35275609" auto_sync_enabled = true # Maximum number of files in sync from other peers simultaneously. -max_sync_files = 32 - -# Timeout to terminate a file sync when automatically sync from other peers. -# If timeout, terminated file sync will be triggered later. -# find_peer_timeout = "10s" +# max_sync_files = 32 # Enable to start a file sync via RPC (e.g. `admin_startSyncFile`). # sync_file_by_rpc_enabled = true @@ -241,10 +237,10 @@ max_sync_files = 32 # sync_file_on_announcement_enabled = false # Maximum threads to sync files in sequence. -max_sequential_workers = 24 +# max_sequential_workers = 24 # Maximum threads to sync files randomly. -# max_random_workers = 4 +# max_random_workers = 8 ####################################################################### ### File Location Cache Options ### @@ -265,4 +261,4 @@ max_sequential_workers = 24 # Validity period of location information. # If the timestamp in the storage location information exceeds this duration from the current time, it will be removed from the cache. -# entry_expiration_time_secs = 3600 \ No newline at end of file +# entry_expiration_time_secs = 3600 diff --git a/run/config-testnet-turbo.toml b/run/config-testnet-turbo.toml index c3756a1..31243cf 100644 --- a/run/config-testnet-turbo.toml +++ b/run/config-testnet-turbo.toml @@ -228,11 +228,7 @@ reward_contract_address = "0x51998C4d486F406a788B766d93510980ae1f9360" auto_sync_enabled = true # Maximum number of files in sync from other peers simultaneously. -max_sync_files = 32 - -# Timeout to terminate a file sync when automatically sync from other peers. -# If timeout, terminated file sync will be triggered later. -# find_peer_timeout = "10s" +# max_sync_files = 32 # Enable to start a file sync via RPC (e.g. `admin_startSyncFile`). # sync_file_by_rpc_enabled = true @@ -241,10 +237,10 @@ max_sync_files = 32 # sync_file_on_announcement_enabled = false # Maximum threads to sync files in sequence. -max_sequential_workers = 24 +# max_sequential_workers = 24 # Maximum threads to sync files randomly. -# max_random_workers = 4 +# max_random_workers = 8 ####################################################################### ### File Location Cache Options ### @@ -265,4 +261,4 @@ max_sequential_workers = 24 # Validity period of location information. # If the timestamp in the storage location information exceeds this duration from the current time, it will be removed from the cache. -# entry_expiration_time_secs = 3600 \ No newline at end of file +# entry_expiration_time_secs = 3600 diff --git a/run/config.toml b/run/config.toml index e58f7e9..023f3c3 100644 --- a/run/config.toml +++ b/run/config.toml @@ -227,11 +227,7 @@ # auto_sync_enabled = false # Maximum number of files in sync from other peers simultaneously. -# max_sync_files = 16 - -# Timeout to terminate a file sync when automatically sync from other peers. -# If timeout, terminated file sync will be triggered later. -# find_peer_timeout = "10s" +# max_sync_files = 32 # Enable to start a file sync via RPC (e.g. `admin_startSyncFile`). # sync_file_by_rpc_enabled = true @@ -240,10 +236,10 @@ # sync_file_on_announcement_enabled = false # Maximum threads to sync files in sequence. -# max_sequential_workers = 8 +# max_sequential_workers = 24 # Maximum threads to sync files randomly. -# max_random_workers = 4 +# max_random_workers = 8 ####################################################################### ### File Location Cache Options ### @@ -264,4 +260,4 @@ # Validity period of location information. # If the timestamp in the storage location information exceeds this duration from the current time, it will be removed from the cache. -# entry_expiration_time_secs = 3600 \ No newline at end of file +# entry_expiration_time_secs = 3600 From 061fc3ef58d2bd0f3cc3164da6887463bf69b6b6 Mon Sep 17 00:00:00 2001 From: boqiu <82121246@qq.com> Date: Wed, 21 Aug 2024 15:17:13 +0800 Subject: [PATCH 6/7] fix metrics rpc for Timer type --- node/rpc/src/admin/impl.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/node/rpc/src/admin/impl.rs b/node/rpc/src/admin/impl.rs index ccad517..6bbd3cf 100644 --- a/node/rpc/src/admin/impl.rs +++ b/node/rpc/src/admin/impl.rs @@ -4,7 +4,7 @@ use crate::{error, Context}; use futures::prelude::*; use jsonrpsee::core::async_trait; use jsonrpsee::core::RpcResult; -use metrics::DEFAULT_REGISTRY; +use metrics::{DEFAULT_GROUPING_REGISTRY, DEFAULT_REGISTRY}; use network::{multiaddr::Protocol, Multiaddr}; use std::collections::{BTreeMap, HashMap}; use std::net::IpAddr; @@ -266,6 +266,21 @@ impl RpcServer for RpcServerImpl { } } + for (group_name, metrics) in DEFAULT_GROUPING_REGISTRY.read().get_all() { + for (metric_name, metric) in metrics.iter() { + let name = format!("{}.{}", group_name, metric_name); + match &maybe_prefix { + Some(prefix) if !name.starts_with(prefix) => {} + _ => { + result.insert( + name, + format!("{} {}", metric.get_type(), metric.get_value()), + ); + } + } + } + } + Ok(result) } } From 6fb21d14246582237946c94fa72a3276607b68cb Mon Sep 17 00:00:00 2001 From: boqiu <82121246@qq.com> Date: Wed, 21 Aug 2024 17:51:58 +0800 Subject: [PATCH 7/7] add metrics for channel --- Cargo.lock | 1 + common/channel/Cargo.toml | 1 + common/channel/src/channel.rs | 117 ++++++++++++++++++++---- node/router/src/libp2p_event_handler.rs | 2 +- node/sync/src/service.rs | 6 +- 5 files changed, 107 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3a85a3f..08cd52a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -844,6 +844,7 @@ dependencies = [ name = "channel" version = "0.1.0" dependencies = [ + "metrics", "tokio", ] diff --git a/common/channel/Cargo.toml b/common/channel/Cargo.toml index cbdc369..d49d24f 100644 --- a/common/channel/Cargo.toml +++ b/common/channel/Cargo.toml @@ -5,3 +5,4 @@ edition = "2021" [dependencies] tokio = { version = "1.19.2", features = ["sync", "time"] } +metrics = { workspace = true } diff --git a/common/channel/src/channel.rs b/common/channel/src/channel.rs index 2f5206a..d70aa4d 100644 --- a/common/channel/src/channel.rs +++ b/common/channel/src/channel.rs @@ -1,6 +1,8 @@ use crate::error::Error; -use std::time::Duration; -use tokio::sync::mpsc::error::TryRecvError; +use metrics::{register_meter_with_group, Counter, CounterUsize, Histogram, Meter, Sample}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::sync::mpsc::error::{SendError, TryRecvError}; use tokio::sync::{mpsc, oneshot}; use tokio::time::timeout; @@ -19,56 +21,139 @@ pub struct Channel { } impl Channel { - pub fn unbounded() -> (Sender, Receiver) { + pub fn unbounded(name: &str) -> (Sender, Receiver) { let (sender, receiver) = mpsc::unbounded_channel(); - (Sender { chan: sender }, Receiver { chan: receiver }) + + let metrics_group = format!("common_channel_{}", name); + let metrics_queued = CounterUsize::register_with_group(metrics_group.as_str(), "size"); + + ( + Sender { + chan: sender, + metrics_send_qps: register_meter_with_group(metrics_group.as_str(), "send"), + metrics_queued: metrics_queued.clone(), + metrics_timeout: CounterUsize::register_with_group( + metrics_group.as_str(), + "timeout", + ), + }, + Receiver { + chan: receiver, + metrics_recv_qps: register_meter_with_group(metrics_group.as_str(), "recv"), + metrics_queued, + metrics_queue_latency: Sample::ExpDecay(0.015).register_with_group( + metrics_group.as_str(), + "latency", + 1024, + ), + }, + ) + } +} + +enum TimedMessage { + Notification(Instant, N), + Request(Instant, Req, ResponseSender), +} + +impl From> for TimedMessage { + fn from(value: Message) -> Self { + match value { + Message::Notification(n) => TimedMessage::Notification(Instant::now(), n), + Message::Request(req, res) => TimedMessage::Request(Instant::now(), req, res), + } + } +} + +impl TimedMessage { + fn into_message(self) -> (Instant, Message) { + match self { + TimedMessage::Notification(since, n) => (since, Message::Notification(n)), + TimedMessage::Request(since, req, res) => (since, Message::Request(req, res)), + } } } pub struct Sender { - chan: mpsc::UnboundedSender>, + chan: mpsc::UnboundedSender>, + + metrics_send_qps: Arc, + metrics_queued: Arc>, + metrics_timeout: Arc>, } impl Clone for Sender { fn clone(&self) -> Self { Sender { chan: self.chan.clone(), + metrics_send_qps: self.metrics_send_qps.clone(), + metrics_queued: self.metrics_queued.clone(), + metrics_timeout: self.metrics_timeout.clone(), } } } impl Sender { pub fn notify(&self, msg: N) -> Result<(), Error> { - self.chan - .send(Message::Notification(msg)) - .map_err(|e| Error::SendError(e)) + self.send(Message::Notification(msg)) } pub async fn request(&self, request: Req) -> Result> { let (sender, receiver) = oneshot::channel(); - self.chan - .send(Message::Request(request, sender)) - .map_err(|e| Error::SendError(e))?; + self.send(Message::Request(request, sender))?; timeout(DEFAULT_REQUEST_TIMEOUT, receiver) .await - .map_err(|_| Error::TimeoutError)? + .map_err(|_| { + self.metrics_timeout.inc(1); + Error::TimeoutError + })? .map_err(|e| Error::RecvError(e)) } + + fn send(&self, message: Message) -> Result<(), Error> { + match self.chan.send(message.into()) { + Ok(()) => { + self.metrics_send_qps.mark(1); + self.metrics_queued.inc(1); + Ok(()) + } + Err(e) => { + let (_, msg) = e.0.into_message(); + Err(Error::SendError(SendError(msg))) + } + } + } } pub struct Receiver { - chan: mpsc::UnboundedReceiver>, + chan: mpsc::UnboundedReceiver>, + + metrics_recv_qps: Arc, + metrics_queued: Arc>, + metrics_queue_latency: Arc, } impl Receiver { pub async fn recv(&mut self) -> Option> { - self.chan.recv().await + let data = self.chan.recv().await?; + Some(self.on_recv_data(data)) } pub fn try_recv(&mut self) -> Result, TryRecvError> { - self.chan.try_recv() + let data = self.chan.try_recv()?; + Ok(self.on_recv_data(data)) + } + + fn on_recv_data(&self, data: TimedMessage) -> Message { + self.metrics_recv_qps.mark(1); + self.metrics_queued.dec(1); + + let (since, msg) = data.into_message(); + self.metrics_queue_latency.update_since(since); + + msg } } @@ -91,7 +176,7 @@ mod tests { #[tokio::test] async fn request_response() { - let (tx, mut rx) = Channel::::unbounded(); + let (tx, mut rx) = Channel::::unbounded("test"); let task1 = async move { match rx.recv().await.expect("not dropped") { diff --git a/node/router/src/libp2p_event_handler.rs b/node/router/src/libp2p_event_handler.rs index 19ca59a..b3bb839 100644 --- a/node/router/src/libp2p_event_handler.rs +++ b/node/router/src/libp2p_event_handler.rs @@ -815,7 +815,7 @@ mod tests { let runtime = TestRuntime::default(); let (network_globals, keypair) = Context::new_network_globals(); let (network_send, network_recv) = mpsc::unbounded_channel(); - let (sync_send, sync_recv) = channel::Channel::unbounded(); + let (sync_send, sync_recv) = channel::Channel::unbounded("test"); let (chunk_pool_send, _chunk_pool_recv) = mpsc::unbounded_channel(); let store = LogManager::memorydb(LogConfig::default()).unwrap(); Self { diff --git a/node/sync/src/service.rs b/node/sync/src/service.rs index a01d2d8..9282d99 100644 --- a/node/sync/src/service.rs +++ b/node/sync/src/service.rs @@ -157,7 +157,7 @@ impl SyncService { event_recv: broadcast::Receiver, catch_up_end_recv: oneshot::Receiver<()>, ) -> Result { - let (sync_send, sync_recv) = channel::Channel::unbounded(); + let (sync_send, sync_recv) = channel::Channel::unbounded("sync"); let store = Store::new(store, executor.clone()); // init auto sync @@ -912,7 +912,7 @@ mod tests { create_file_location_cache(init_peer_id, vec![txs[0].id()]); let (network_send, mut network_recv) = mpsc::unbounded_channel::(); - let (_, sync_recv) = channel::Channel::unbounded(); + let (_, sync_recv) = channel::Channel::unbounded("test"); let mut sync = SyncService { config: Config::default(), @@ -941,7 +941,7 @@ mod tests { create_file_location_cache(init_peer_id, vec![txs[0].id()]); let (network_send, mut network_recv) = mpsc::unbounded_channel::(); - let (_, sync_recv) = channel::Channel::unbounded(); + let (_, sync_recv) = channel::Channel::unbounded("test"); let mut sync = SyncService { config: Config::default(),