From e1df2f0868e267f2a62c9b7619ef4be62e8ca4c8 Mon Sep 17 00:00:00 2001 From: Bo QIU <35757521+boqiu@users.noreply.github.com> Date: Thu, 8 Aug 2024 16:21:50 +0800 Subject: [PATCH] Change hardcoded const variables to configs (#153) --- node/sync/src/auto_sync/batcher.rs | 2 +- node/sync/src/auto_sync/batcher_random.rs | 11 +++---- node/sync/src/auto_sync/batcher_serial.rs | 11 +++---- node/sync/src/auto_sync/mod.rs | 5 --- node/sync/src/controllers/mod.rs | 2 +- node/sync/src/controllers/peers.rs | 18 +++++----- node/sync/src/controllers/serial.rs | 38 ++++++++++----------- node/sync/src/lib.rs | 40 ++++++++++++++++++++++- node/sync/src/service.rs | 27 ++++----------- 9 files changed, 82 insertions(+), 72 deletions(-) diff --git a/node/sync/src/auto_sync/batcher.rs b/node/sync/src/auto_sync/batcher.rs index e8c482f..7b301de 100644 --- a/node/sync/src/auto_sync/batcher.rs +++ b/node/sync/src/auto_sync/batcher.rs @@ -15,7 +15,7 @@ pub enum SyncResult { /// Supports to sync files concurrently. #[derive(Clone)] pub struct Batcher { - config: Config, + pub(crate) config: Config, capacity: usize, tasks: Arc>>, // files to sync store: Store, diff --git a/node/sync/src/auto_sync/batcher_random.rs b/node/sync/src/auto_sync/batcher_random.rs index e1a8f92..363b826 100644 --- a/node/sync/src/auto_sync/batcher_random.rs +++ b/node/sync/src/auto_sync/batcher_random.rs @@ -1,8 +1,5 @@ use super::{batcher::Batcher, sync_store::SyncStore}; -use crate::{ - auto_sync::{batcher::SyncResult, INTERVAL_ERROR, INTERVAL_IDLE}, - Config, SyncSender, -}; +use crate::{auto_sync::batcher::SyncResult, Config, SyncSender}; use anyhow::Result; use serde::{Deserialize, Serialize}; use std::sync::{ @@ -57,7 +54,7 @@ impl RandomBatcher { // disable file sync until catched up if !catched_up.load(Ordering::Relaxed) { trace!("Cannot sync file in catch-up phase"); - sleep(INTERVAL_IDLE).await; + sleep(self.batcher.config.auto_sync_idle_interval).await; continue; } @@ -68,11 +65,11 @@ impl RandomBatcher { "File sync still in progress or idle, state = {:?}", self.get_state().await ); - sleep(INTERVAL_IDLE).await; + sleep(self.batcher.config.auto_sync_idle_interval).await; } Err(err) => { warn!(%err, "Failed to sync file once, state = {:?}", self.get_state().await); - sleep(INTERVAL_ERROR).await; + sleep(self.batcher.config.auto_sync_error_interval).await; } } } diff --git a/node/sync/src/auto_sync/batcher_serial.rs b/node/sync/src/auto_sync/batcher_serial.rs index 1ba406c..4062a6d 100644 --- a/node/sync/src/auto_sync/batcher_serial.rs +++ b/node/sync/src/auto_sync/batcher_serial.rs @@ -2,10 +2,7 @@ use super::{ batcher::{Batcher, SyncResult}, sync_store::SyncStore, }; -use crate::{ - auto_sync::{INTERVAL_ERROR, INTERVAL_IDLE}, - Config, SyncSender, -}; +use crate::{Config, SyncSender}; use anyhow::Result; use log_entry_sync::LogSyncEvent; use serde::{Deserialize, Serialize}; @@ -139,7 +136,7 @@ impl SerialBatcher { // disable file sync until catched up if !catched_up.load(Ordering::Relaxed) { trace!("Cannot sync file in catch-up phase"); - sleep(INTERVAL_IDLE).await; + sleep(self.batcher.config.auto_sync_idle_interval).await; continue; } @@ -151,11 +148,11 @@ impl SerialBatcher { "File sync still in progress or idle, state = {:?}", self.get_state().await ); - sleep(INTERVAL_IDLE).await; + sleep(self.batcher.config.auto_sync_idle_interval).await; } Err(err) => { warn!(%err, "Failed to sync file once, state = {:?}", self.get_state().await); - sleep(INTERVAL_ERROR).await; + sleep(self.batcher.config.auto_sync_error_interval).await; } } } diff --git a/node/sync/src/auto_sync/mod.rs b/node/sync/src/auto_sync/mod.rs index 12b3b62..47a09e2 100644 --- a/node/sync/src/auto_sync/mod.rs +++ b/node/sync/src/auto_sync/mod.rs @@ -4,8 +4,3 @@ pub mod batcher_serial; pub mod manager; pub mod sync_store; mod tx_store; - -use std::time::Duration; - -const INTERVAL_IDLE: Duration = Duration::from_secs(3); -const INTERVAL_ERROR: Duration = Duration::from_secs(10); diff --git a/node/sync/src/controllers/mod.rs b/node/sync/src/controllers/mod.rs index 4e816f3..ad2374f 100644 --- a/node/sync/src/controllers/mod.rs +++ b/node/sync/src/controllers/mod.rs @@ -6,7 +6,7 @@ use std::collections::HashMap; use peers::PeerState; use serde::{Deserialize, Serialize}; -pub use serial::{FailureReason, SerialSyncController, SyncState, MAX_CHUNKS_TO_REQUEST}; +pub use serial::{FailureReason, SerialSyncController, SyncState}; #[derive(Debug, Clone, Copy, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] diff --git a/node/sync/src/controllers/peers.rs b/node/sync/src/controllers/peers.rs index 81a21c5..989582f 100644 --- a/node/sync/src/controllers/peers.rs +++ b/node/sync/src/controllers/peers.rs @@ -7,15 +7,12 @@ use shared_types::TxID; use std::collections::HashMap; use std::fmt::Debug; use std::sync::Arc; -use std::time::{Duration, Instant}; +use std::time::Instant; use std::vec; use storage::config::{all_shards_available, ShardConfig}; use crate::context::SyncNetworkContext; -use crate::InstantWrapper; - -const PEER_CONNECT_TIMEOUT: Duration = Duration::from_secs(5); -const PEER_DISCONNECT_TIMEOUT: Duration = Duration::from_secs(5); +use crate::{Config, InstantWrapper}; #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum PeerState { @@ -49,6 +46,7 @@ impl PeerInfo { #[derive(Default)] pub struct SyncPeers { + config: Config, peers: HashMap, ctx: Option>, file_location_cache: Option<(TxID, Arc)>, @@ -56,11 +54,13 @@ pub struct SyncPeers { impl SyncPeers { pub fn new( + config: Config, ctx: Arc, tx_id: TxID, file_location_cache: Arc, ) -> Self { Self { + config, peers: Default::default(), ctx: Some(ctx), file_location_cache: Some((tx_id, file_location_cache)), @@ -185,7 +185,7 @@ impl SyncPeers { PeerState::Found | PeerState::Connected => {} PeerState::Connecting => { - if info.since.elapsed() >= PEER_CONNECT_TIMEOUT { + if info.since.elapsed() >= self.config.peer_connect_timeout { info!(%peer_id, %info.addr, "Peer connection timeout"); bad_peers.push(*peer_id); @@ -206,7 +206,7 @@ impl SyncPeers { } PeerState::Disconnecting => { - if info.since.elapsed() >= PEER_DISCONNECT_TIMEOUT { + if info.since.elapsed() >= self.config.peer_disconnect_timeout { info!(%peer_id, %info.addr, "Peer disconnect timeout"); bad_peers.push(*peer_id); } @@ -352,7 +352,7 @@ mod tests { sync_peers.add_new_peer(peer_id_connecting, addr.clone()); sync_peers.update_state_force(&peer_id_connecting, PeerState::Connecting); sync_peers.peers.get_mut(&peer_id_connecting).unwrap().since = - (Instant::now() - PEER_CONNECT_TIMEOUT).into(); + (Instant::now() - sync_peers.config.peer_connect_timeout).into(); let peer_id_disconnecting = identity::Keypair::generate_ed25519().public().to_peer_id(); sync_peers.add_new_peer(peer_id_disconnecting, addr.clone()); @@ -361,7 +361,7 @@ mod tests { .peers .get_mut(&peer_id_disconnecting) .unwrap() - .since = (Instant::now() - PEER_DISCONNECT_TIMEOUT).into(); + .since = (Instant::now() - sync_peers.config.peer_disconnect_timeout).into(); let peer_id_disconnected = identity::Keypair::generate_ed25519().public().to_peer_id(); sync_peers.add_new_peer(peer_id_disconnected, addr); diff --git a/node/sync/src/controllers/serial.rs b/node/sync/src/controllers/serial.rs index a69aee4..91937ed 100644 --- a/node/sync/src/controllers/serial.rs +++ b/node/sync/src/controllers/serial.rs @@ -1,7 +1,7 @@ use crate::context::SyncNetworkContext; use crate::controllers::peers::{PeerState, SyncPeers}; use crate::controllers::{FileSyncGoal, FileSyncInfo}; -use crate::InstantWrapper; +use crate::{Config, InstantWrapper}; use file_location_cache::FileLocationCache; use libp2p::swarm::DialError; use network::types::FindChunks; @@ -11,20 +11,10 @@ use network::{ }; use rand::Rng; use shared_types::{timestamp_now, ChunkArrayWithProof, TxID, CHUNK_SIZE}; -use std::{ - sync::Arc, - time::{Duration, Instant}, -}; +use std::{sync::Arc, time::Instant}; use storage::log_store::log_manager::PORA_CHUNK_SIZE; use storage_async::Store; -pub const MAX_CHUNKS_TO_REQUEST: u64 = 2 * 1024; -const MAX_REQUEST_FAILURES: usize = 5; -const PEER_REQUEST_TIMEOUT: Duration = Duration::from_secs(5); -const DOWNLOAD_TIMEOUT: Duration = Duration::from_secs(5); -const WAIT_OUTGOING_CONNECTION_TIMEOUT: Duration = Duration::from_secs(10); -const NEXT_REQUEST_WAIT_TIME: Duration = Duration::from_secs(3); - #[derive(Clone, Debug, PartialEq, Eq)] pub enum FailureReason { DBError(String), @@ -62,6 +52,8 @@ pub enum SyncState { } pub struct SerialSyncController { + config: Config, + // only used for log purpose tx_seq: u64, @@ -99,6 +91,7 @@ pub struct SerialSyncController { impl SerialSyncController { pub fn new( + config: Config, tx_id: TxID, tx_start_chunk_in_flow: u64, goal: FileSyncGoal, @@ -107,6 +100,7 @@ impl SerialSyncController { file_location_cache: Arc, ) -> Self { SerialSyncController { + config, tx_seq: tx_id.seq, tx_id, tx_start_chunk_in_flow, @@ -115,7 +109,7 @@ impl SerialSyncController { next_chunk: goal.index_start, failures: 0, state: SyncState::Idle, - peers: SyncPeers::new(ctx.clone(), tx_id, file_location_cache.clone()), + peers: SyncPeers::new(config, ctx.clone(), tx_id, file_location_cache.clone()), ctx, store, file_location_cache, @@ -465,7 +459,8 @@ impl SerialSyncController { // occurs when remote peer has higher block height info!(%self.tx_seq, "Failed to validate chunks response due to no root found"); self.state = SyncState::AwaitingDownload { - since: (Instant::now() + NEXT_REQUEST_WAIT_TIME).into(), + since: (Instant::now() + self.config.peer_next_chunks_request_wait_timeout) + .into(), }; return; } @@ -557,10 +552,10 @@ impl SerialSyncController { self.failures += 1; - if self.failures <= MAX_REQUEST_FAILURES { + if self.failures <= self.config.max_request_failures { // try again self.state = SyncState::AwaitingDownload { - since: (Instant::now() + NEXT_REQUEST_WAIT_TIME).into(), + since: (Instant::now() + self.config.peer_next_chunks_request_wait_timeout).into(), }; } else { // ban and find new peer to download @@ -622,7 +617,7 @@ impl SerialSyncController { // storage node may not have the specific file when `FindFile` // gossip message received. In this case, just broadcast the // `FindFile` message again. - if since.elapsed() >= PEER_REQUEST_TIMEOUT { + if since.elapsed() >= self.config.peer_find_timeout { debug!(%self.tx_seq, "Finding peer timeout and try to find peers again"); self.try_find_peers(); } @@ -657,7 +652,7 @@ impl SerialSyncController { } SyncState::AwaitingOutgoingConnection { since } => { - if since.elapsed() < WAIT_OUTGOING_CONNECTION_TIMEOUT { + if since.elapsed() < self.config.peer_wait_outgoing_connection_timeout { completed = true; } else { debug!(%self.tx_seq, "Waiting for outgoing connection timeout and try to find other peers to dial"); @@ -679,7 +674,7 @@ impl SerialSyncController { // e.g. peer disconnected by remote node debug!(%self.tx_seq, "No peer to continue downloading and try to find other peers to download"); self.state = SyncState::Idle; - } else if since.elapsed() >= DOWNLOAD_TIMEOUT { + } else if since.elapsed() >= self.config.peer_chunks_download_timeout { self.handle_response_failure(peer_id, "RPC timeout"); } else { completed = true; @@ -1516,7 +1511,7 @@ mod tests { chunk_count, ); - for i in 0..(MAX_REQUEST_FAILURES + 1) { + for i in 0..(controller.config.max_request_failures + 1) { controller.handle_response_failure(init_peer_id, "unit test"); if let Some(msg) = network_recv.recv().await { match msg { @@ -1550,7 +1545,7 @@ mod tests { } assert_eq!(controller.failures, i + 1); - if i == MAX_REQUEST_FAILURES { + if i == controller.config.max_request_failures { assert_eq!(*controller.get_status(), SyncState::Idle); if let Some(msg) = network_recv.recv().await { @@ -1626,6 +1621,7 @@ mod tests { let file_location_cache = create_file_location_cache(peer_id, vec![tx_id]); let controller = SerialSyncController::new( + Config::default(), tx_id, 0, FileSyncGoal::new_file(num_chunks as u64), diff --git a/node/sync/src/lib.rs b/node/sync/src/lib.rs index 7ffad90..911b930 100644 --- a/node/sync/src/lib.rs +++ b/node/sync/src/lib.rs @@ -20,12 +20,35 @@ use std::{ #[derive(Clone, Copy, Debug, Deserialize)] #[serde(default)] pub struct Config { + // sync service config + #[serde(deserialize_with = "deserialize_duration")] + pub heartbeat_interval: Duration, pub auto_sync_enabled: bool, pub max_sync_files: usize, pub sync_file_by_rpc_enabled: bool, pub sync_file_on_announcement_enabled: bool, - // auto_sync config + // serial sync config + pub max_chunks_to_request: u64, + pub max_request_failures: usize, + #[serde(deserialize_with = "deserialize_duration")] + pub peer_connect_timeout: Duration, + #[serde(deserialize_with = "deserialize_duration")] + pub peer_disconnect_timeout: Duration, + #[serde(deserialize_with = "deserialize_duration")] + pub peer_find_timeout: Duration, + #[serde(deserialize_with = "deserialize_duration")] + pub peer_chunks_download_timeout: Duration, + #[serde(deserialize_with = "deserialize_duration")] + pub peer_wait_outgoing_connection_timeout: Duration, + #[serde(deserialize_with = "deserialize_duration")] + pub peer_next_chunks_request_wait_timeout: Duration, + + // auto sync config + #[serde(deserialize_with = "deserialize_duration")] + pub auto_sync_idle_interval: Duration, + #[serde(deserialize_with = "deserialize_duration")] + pub auto_sync_error_interval: Duration, pub max_sequential_workers: usize, #[serde(deserialize_with = "deserialize_duration")] pub find_peer_timeout: Duration, @@ -34,11 +57,26 @@ pub struct Config { impl Default for Config { fn default() -> Self { Self { + // sync service config + heartbeat_interval: Duration::from_secs(5), auto_sync_enabled: false, max_sync_files: 16, sync_file_by_rpc_enabled: true, sync_file_on_announcement_enabled: false, + // serial sync config + max_chunks_to_request: 2 * 1024, + max_request_failures: 5, + peer_connect_timeout: Duration::from_secs(5), + peer_disconnect_timeout: Duration::from_secs(5), + peer_find_timeout: Duration::from_secs(5), + peer_chunks_download_timeout: Duration::from_secs(5), + peer_wait_outgoing_connection_timeout: Duration::from_secs(10), + peer_next_chunks_request_wait_timeout: Duration::from_secs(3), + + // auto sync config + auto_sync_idle_interval: Duration::from_secs(3), + auto_sync_error_interval: Duration::from_secs(10), max_sequential_workers: 8, find_peer_timeout: Duration::from_secs(10), } diff --git a/node/sync/src/service.rs b/node/sync/src/service.rs index 2daeab0..06dbc9d 100644 --- a/node/sync/src/service.rs +++ b/node/sync/src/service.rs @@ -2,7 +2,6 @@ use crate::auto_sync::manager::AutoSyncManager; use crate::context::SyncNetworkContext; use crate::controllers::{ FailureReason, FileSyncGoal, FileSyncInfo, SerialSyncController, SyncState, - MAX_CHUNKS_TO_REQUEST, }; use crate::{Config, SyncServiceState}; use anyhow::{bail, Result}; @@ -26,8 +25,6 @@ use storage::log_store::Store as LogStore; use storage_async::Store; use tokio::sync::{broadcast, mpsc, oneshot}; -const HEARTBEAT_INTERVAL_SEC: u64 = 5; - pub type SyncSender = channel::Sender; pub type SyncReceiver = channel::Receiver; @@ -126,9 +123,6 @@ pub struct SyncService { /// A collection of file sync controllers. controllers: HashMap, - /// Heartbeat interval for executing periodic tasks. - heartbeat: tokio::time::Interval, - auto_sync_manager: Option, } @@ -163,10 +157,6 @@ impl SyncService { catch_up_end_recv: oneshot::Receiver<()>, ) -> Result { let (sync_send, sync_recv) = channel::Channel::unbounded(); - - let heartbeat = - tokio::time::interval(tokio::time::Duration::from_secs(HEARTBEAT_INTERVAL_SEC)); - let store = Store::new(store, executor.clone()); // init auto sync @@ -193,7 +183,6 @@ impl SyncService { store, file_location_cache, controllers: Default::default(), - heartbeat, auto_sync_manager, }; @@ -204,6 +193,8 @@ impl SyncService { } async fn main(&mut self) { + let mut heartbeat = tokio::time::interval(self.config.heartbeat_interval); + loop { tokio::select! { // received sync message @@ -215,7 +206,7 @@ impl SyncService { } // heartbeat - _ = self.heartbeat.tick() => self.on_heartbeat(), + _ = heartbeat.tick() => self.on_heartbeat(), } } } @@ -417,7 +408,7 @@ impl SyncService { } // ban peer if requested too many chunks - if request.index_end - request.index_start > MAX_CHUNKS_TO_REQUEST { + if request.index_end - request.index_start > self.config.max_chunks_to_request { self.ctx.ban_peer(peer_id, "Too many chunks requested"); return Ok(()); } @@ -649,6 +640,7 @@ impl SyncService { } entry.insert(SerialSyncController::new( + self.config, tx.id(), tx.start_entry_index(), FileSyncGoal::new(num_chunks, index_start, index_end), @@ -916,8 +908,6 @@ mod tests { let (network_send, mut network_recv) = mpsc::unbounded_channel::(); let (_, sync_recv) = channel::Channel::unbounded(); - let heartbeat = tokio::time::interval(Duration::from_secs(HEARTBEAT_INTERVAL_SEC)); - let mut sync = SyncService { config: Config::default(), msg_recv: sync_recv, @@ -925,7 +915,6 @@ mod tests { store, file_location_cache, controllers: Default::default(), - heartbeat, auto_sync_manager: None, }; @@ -948,8 +937,6 @@ mod tests { let (network_send, mut network_recv) = mpsc::unbounded_channel::(); let (_, sync_recv) = channel::Channel::unbounded(); - let heartbeat = tokio::time::interval(Duration::from_secs(HEARTBEAT_INTERVAL_SEC)); - let mut sync = SyncService { config: Config::default(), msg_recv: sync_recv, @@ -957,7 +944,6 @@ mod tests { store, file_location_cache, controllers: Default::default(), - heartbeat, auto_sync_manager: None, }; @@ -1357,7 +1343,8 @@ mod tests { wait_for_tx_finalized(runtime.store, tx_seq).await; // test heartbeat - let deadline = Instant::now() + Duration::from_secs(HEARTBEAT_INTERVAL_SEC + 1); + let deadline = + Instant::now() + Config::default().heartbeat_interval + Duration::from_secs(1); while !matches!(sync_send .request(SyncRequest::SyncStatus { tx_seq }) .await