mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2024-11-10 10:05:17 +00:00
Compare commits
4 Commits
30f40d1bde
...
9cbc39b74f
Author | SHA1 | Date | |
---|---|---|---|
|
9cbc39b74f | ||
|
9189cabbb2 | ||
|
e1df2f0868 | ||
|
f456773b72 |
@ -1,7 +1,7 @@
|
|||||||
use crate::{controllers::SyncState, Config, SyncRequest, SyncResponse, SyncSender};
|
use crate::{controllers::SyncState, Config, SyncRequest, SyncResponse, SyncSender};
|
||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{fmt::Debug, sync::Arc};
|
use std::{collections::HashSet, fmt::Debug, sync::Arc};
|
||||||
use storage_async::Store;
|
use storage_async::Store;
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
@ -15,9 +15,9 @@ pub enum SyncResult {
|
|||||||
/// Supports to sync files concurrently.
|
/// Supports to sync files concurrently.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Batcher {
|
pub struct Batcher {
|
||||||
config: Config,
|
pub(crate) config: Config,
|
||||||
capacity: usize,
|
capacity: usize,
|
||||||
tasks: Arc<RwLock<Vec<u64>>>, // files to sync
|
tasks: Arc<RwLock<HashSet<u64>>>, // files to sync
|
||||||
store: Store,
|
store: Store,
|
||||||
sync_send: SyncSender,
|
sync_send: SyncSender,
|
||||||
}
|
}
|
||||||
@ -33,12 +33,10 @@ impl Batcher {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn len(&self) -> usize {
|
|
||||||
self.tasks.read().await.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn tasks(&self) -> Vec<u64> {
|
pub async fn tasks(&self) -> Vec<u64> {
|
||||||
self.tasks.read().await.clone()
|
let mut result: Vec<u64> = self.tasks.read().await.iter().copied().collect();
|
||||||
|
result.sort();
|
||||||
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn add(&self, tx_seq: u64) -> Result<bool> {
|
pub async fn add(&self, tx_seq: u64) -> Result<bool> {
|
||||||
@ -54,9 +52,7 @@ impl Batcher {
|
|||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks.push(tx_seq);
|
Ok(tasks.insert(tx_seq))
|
||||||
|
|
||||||
Ok(true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn reorg(&self, reverted_tx_seq: u64) {
|
pub async fn reorg(&self, reverted_tx_seq: u64) {
|
||||||
@ -67,19 +63,16 @@ impl Batcher {
|
|||||||
pub async fn poll(&self) -> Result<Option<(u64, SyncResult)>> {
|
pub async fn poll(&self) -> Result<Option<(u64, SyncResult)>> {
|
||||||
let mut result = None;
|
let mut result = None;
|
||||||
let tasks = self.tasks.read().await.clone();
|
let tasks = self.tasks.read().await.clone();
|
||||||
let mut index = tasks.len();
|
|
||||||
|
|
||||||
for (i, tx_seq) in tasks.iter().enumerate() {
|
for tx_seq in tasks.iter() {
|
||||||
if let Some(ret) = self.poll_tx(*tx_seq).await? {
|
if let Some(ret) = self.poll_tx(*tx_seq).await? {
|
||||||
result = Some((*tx_seq, ret));
|
result = Some((*tx_seq, ret));
|
||||||
index = i;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut tasks = self.tasks.write().await;
|
if let Some((tx_seq, _)) = &result {
|
||||||
if index < tasks.len() {
|
self.tasks.write().await.remove(tx_seq);
|
||||||
tasks.swap_remove(index);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(result)
|
Ok(result)
|
||||||
|
@ -1,8 +1,5 @@
|
|||||||
use super::{batcher::Batcher, sync_store::SyncStore};
|
use super::{batcher::Batcher, sync_store::SyncStore};
|
||||||
use crate::{
|
use crate::{auto_sync::batcher::SyncResult, Config, SyncSender};
|
||||||
auto_sync::{batcher::SyncResult, INTERVAL_ERROR, INTERVAL_IDLE},
|
|
||||||
Config, SyncSender,
|
|
||||||
};
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::sync::{
|
use std::sync::{
|
||||||
@ -34,8 +31,7 @@ impl RandomBatcher {
|
|||||||
sync_store: Arc<SyncStore>,
|
sync_store: Arc<SyncStore>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
// now, only 1 thread to sync file randomly
|
batcher: Batcher::new(config, config.max_random_workers, store, sync_send),
|
||||||
batcher: Batcher::new(config, 1, store, sync_send),
|
|
||||||
sync_store,
|
sync_store,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -57,7 +53,7 @@ impl RandomBatcher {
|
|||||||
// disable file sync until catched up
|
// disable file sync until catched up
|
||||||
if !catched_up.load(Ordering::Relaxed) {
|
if !catched_up.load(Ordering::Relaxed) {
|
||||||
trace!("Cannot sync file in catch-up phase");
|
trace!("Cannot sync file in catch-up phase");
|
||||||
sleep(INTERVAL_IDLE).await;
|
sleep(self.batcher.config.auto_sync_idle_interval).await;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,11 +64,11 @@ impl RandomBatcher {
|
|||||||
"File sync still in progress or idle, state = {:?}",
|
"File sync still in progress or idle, state = {:?}",
|
||||||
self.get_state().await
|
self.get_state().await
|
||||||
);
|
);
|
||||||
sleep(INTERVAL_IDLE).await;
|
sleep(self.batcher.config.auto_sync_idle_interval).await;
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!(%err, "Failed to sync file once, state = {:?}", self.get_state().await);
|
warn!(%err, "Failed to sync file once, state = {:?}", self.get_state().await);
|
||||||
sleep(INTERVAL_ERROR).await;
|
sleep(self.batcher.config.auto_sync_error_interval).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -100,10 +96,6 @@ impl RandomBatcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn schedule(&mut self) -> Result<bool> {
|
async fn schedule(&mut self) -> Result<bool> {
|
||||||
if self.batcher.len().await > 0 {
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
let tx_seq = match self.sync_store.random_tx().await? {
|
let tx_seq = match self.sync_store.random_tx().await? {
|
||||||
Some(v) => v,
|
Some(v) => v,
|
||||||
None => return Ok(false),
|
None => return Ok(false),
|
||||||
|
@ -2,10 +2,7 @@ use super::{
|
|||||||
batcher::{Batcher, SyncResult},
|
batcher::{Batcher, SyncResult},
|
||||||
sync_store::SyncStore,
|
sync_store::SyncStore,
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{Config, SyncSender};
|
||||||
auto_sync::{INTERVAL_ERROR, INTERVAL_IDLE},
|
|
||||||
Config, SyncSender,
|
|
||||||
};
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use log_entry_sync::LogSyncEvent;
|
use log_entry_sync::LogSyncEvent;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
@ -139,7 +136,7 @@ impl SerialBatcher {
|
|||||||
// disable file sync until catched up
|
// disable file sync until catched up
|
||||||
if !catched_up.load(Ordering::Relaxed) {
|
if !catched_up.load(Ordering::Relaxed) {
|
||||||
trace!("Cannot sync file in catch-up phase");
|
trace!("Cannot sync file in catch-up phase");
|
||||||
sleep(INTERVAL_IDLE).await;
|
sleep(self.batcher.config.auto_sync_idle_interval).await;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,11 +148,11 @@ impl SerialBatcher {
|
|||||||
"File sync still in progress or idle, state = {:?}",
|
"File sync still in progress or idle, state = {:?}",
|
||||||
self.get_state().await
|
self.get_state().await
|
||||||
);
|
);
|
||||||
sleep(INTERVAL_IDLE).await;
|
sleep(self.batcher.config.auto_sync_idle_interval).await;
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!(%err, "Failed to sync file once, state = {:?}", self.get_state().await);
|
warn!(%err, "Failed to sync file once, state = {:?}", self.get_state().await);
|
||||||
sleep(INTERVAL_ERROR).await;
|
sleep(self.batcher.config.auto_sync_error_interval).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,8 +4,3 @@ pub mod batcher_serial;
|
|||||||
pub mod manager;
|
pub mod manager;
|
||||||
pub mod sync_store;
|
pub mod sync_store;
|
||||||
mod tx_store;
|
mod tx_store;
|
||||||
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
const INTERVAL_IDLE: Duration = Duration::from_secs(3);
|
|
||||||
const INTERVAL_ERROR: Duration = Duration::from_secs(10);
|
|
||||||
|
@ -6,7 +6,7 @@ use std::collections::HashMap;
|
|||||||
use peers::PeerState;
|
use peers::PeerState;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
pub use serial::{FailureReason, SerialSyncController, SyncState, MAX_CHUNKS_TO_REQUEST};
|
pub use serial::{FailureReason, SerialSyncController, SyncState};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
|
@ -7,15 +7,12 @@ use shared_types::TxID;
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::Instant;
|
||||||
use std::vec;
|
use std::vec;
|
||||||
use storage::config::{all_shards_available, ShardConfig};
|
use storage::config::{all_shards_available, ShardConfig};
|
||||||
|
|
||||||
use crate::context::SyncNetworkContext;
|
use crate::context::SyncNetworkContext;
|
||||||
use crate::InstantWrapper;
|
use crate::{Config, InstantWrapper};
|
||||||
|
|
||||||
const PEER_CONNECT_TIMEOUT: Duration = Duration::from_secs(5);
|
|
||||||
const PEER_DISCONNECT_TIMEOUT: Duration = Duration::from_secs(5);
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
pub enum PeerState {
|
pub enum PeerState {
|
||||||
@ -49,6 +46,7 @@ impl PeerInfo {
|
|||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct SyncPeers {
|
pub struct SyncPeers {
|
||||||
|
config: Config,
|
||||||
peers: HashMap<PeerId, PeerInfo>,
|
peers: HashMap<PeerId, PeerInfo>,
|
||||||
ctx: Option<Arc<SyncNetworkContext>>,
|
ctx: Option<Arc<SyncNetworkContext>>,
|
||||||
file_location_cache: Option<(TxID, Arc<FileLocationCache>)>,
|
file_location_cache: Option<(TxID, Arc<FileLocationCache>)>,
|
||||||
@ -56,11 +54,13 @@ pub struct SyncPeers {
|
|||||||
|
|
||||||
impl SyncPeers {
|
impl SyncPeers {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
config: Config,
|
||||||
ctx: Arc<SyncNetworkContext>,
|
ctx: Arc<SyncNetworkContext>,
|
||||||
tx_id: TxID,
|
tx_id: TxID,
|
||||||
file_location_cache: Arc<FileLocationCache>,
|
file_location_cache: Arc<FileLocationCache>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
config,
|
||||||
peers: Default::default(),
|
peers: Default::default(),
|
||||||
ctx: Some(ctx),
|
ctx: Some(ctx),
|
||||||
file_location_cache: Some((tx_id, file_location_cache)),
|
file_location_cache: Some((tx_id, file_location_cache)),
|
||||||
@ -185,7 +185,7 @@ impl SyncPeers {
|
|||||||
PeerState::Found | PeerState::Connected => {}
|
PeerState::Found | PeerState::Connected => {}
|
||||||
|
|
||||||
PeerState::Connecting => {
|
PeerState::Connecting => {
|
||||||
if info.since.elapsed() >= PEER_CONNECT_TIMEOUT {
|
if info.since.elapsed() >= self.config.peer_connect_timeout {
|
||||||
info!(%peer_id, %info.addr, "Peer connection timeout");
|
info!(%peer_id, %info.addr, "Peer connection timeout");
|
||||||
bad_peers.push(*peer_id);
|
bad_peers.push(*peer_id);
|
||||||
|
|
||||||
@ -206,7 +206,7 @@ impl SyncPeers {
|
|||||||
}
|
}
|
||||||
|
|
||||||
PeerState::Disconnecting => {
|
PeerState::Disconnecting => {
|
||||||
if info.since.elapsed() >= PEER_DISCONNECT_TIMEOUT {
|
if info.since.elapsed() >= self.config.peer_disconnect_timeout {
|
||||||
info!(%peer_id, %info.addr, "Peer disconnect timeout");
|
info!(%peer_id, %info.addr, "Peer disconnect timeout");
|
||||||
bad_peers.push(*peer_id);
|
bad_peers.push(*peer_id);
|
||||||
}
|
}
|
||||||
@ -352,7 +352,7 @@ mod tests {
|
|||||||
sync_peers.add_new_peer(peer_id_connecting, addr.clone());
|
sync_peers.add_new_peer(peer_id_connecting, addr.clone());
|
||||||
sync_peers.update_state_force(&peer_id_connecting, PeerState::Connecting);
|
sync_peers.update_state_force(&peer_id_connecting, PeerState::Connecting);
|
||||||
sync_peers.peers.get_mut(&peer_id_connecting).unwrap().since =
|
sync_peers.peers.get_mut(&peer_id_connecting).unwrap().since =
|
||||||
(Instant::now() - PEER_CONNECT_TIMEOUT).into();
|
(Instant::now() - sync_peers.config.peer_connect_timeout).into();
|
||||||
|
|
||||||
let peer_id_disconnecting = identity::Keypair::generate_ed25519().public().to_peer_id();
|
let peer_id_disconnecting = identity::Keypair::generate_ed25519().public().to_peer_id();
|
||||||
sync_peers.add_new_peer(peer_id_disconnecting, addr.clone());
|
sync_peers.add_new_peer(peer_id_disconnecting, addr.clone());
|
||||||
@ -361,7 +361,7 @@ mod tests {
|
|||||||
.peers
|
.peers
|
||||||
.get_mut(&peer_id_disconnecting)
|
.get_mut(&peer_id_disconnecting)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.since = (Instant::now() - PEER_DISCONNECT_TIMEOUT).into();
|
.since = (Instant::now() - sync_peers.config.peer_disconnect_timeout).into();
|
||||||
|
|
||||||
let peer_id_disconnected = identity::Keypair::generate_ed25519().public().to_peer_id();
|
let peer_id_disconnected = identity::Keypair::generate_ed25519().public().to_peer_id();
|
||||||
sync_peers.add_new_peer(peer_id_disconnected, addr);
|
sync_peers.add_new_peer(peer_id_disconnected, addr);
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use crate::context::SyncNetworkContext;
|
use crate::context::SyncNetworkContext;
|
||||||
use crate::controllers::peers::{PeerState, SyncPeers};
|
use crate::controllers::peers::{PeerState, SyncPeers};
|
||||||
use crate::controllers::{FileSyncGoal, FileSyncInfo};
|
use crate::controllers::{FileSyncGoal, FileSyncInfo};
|
||||||
use crate::InstantWrapper;
|
use crate::{Config, InstantWrapper};
|
||||||
use file_location_cache::FileLocationCache;
|
use file_location_cache::FileLocationCache;
|
||||||
use libp2p::swarm::DialError;
|
use libp2p::swarm::DialError;
|
||||||
use network::types::FindChunks;
|
use network::types::FindChunks;
|
||||||
@ -11,20 +11,10 @@ use network::{
|
|||||||
};
|
};
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use shared_types::{timestamp_now, ChunkArrayWithProof, TxID, CHUNK_SIZE};
|
use shared_types::{timestamp_now, ChunkArrayWithProof, TxID, CHUNK_SIZE};
|
||||||
use std::{
|
use std::{sync::Arc, time::Instant};
|
||||||
sync::Arc,
|
|
||||||
time::{Duration, Instant},
|
|
||||||
};
|
|
||||||
use storage::log_store::log_manager::PORA_CHUNK_SIZE;
|
use storage::log_store::log_manager::PORA_CHUNK_SIZE;
|
||||||
use storage_async::Store;
|
use storage_async::Store;
|
||||||
|
|
||||||
pub const MAX_CHUNKS_TO_REQUEST: u64 = 2 * 1024;
|
|
||||||
const MAX_REQUEST_FAILURES: usize = 5;
|
|
||||||
const PEER_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
|
|
||||||
const DOWNLOAD_TIMEOUT: Duration = Duration::from_secs(5);
|
|
||||||
const WAIT_OUTGOING_CONNECTION_TIMEOUT: Duration = Duration::from_secs(10);
|
|
||||||
const NEXT_REQUEST_WAIT_TIME: Duration = Duration::from_secs(3);
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
pub enum FailureReason {
|
pub enum FailureReason {
|
||||||
DBError(String),
|
DBError(String),
|
||||||
@ -62,6 +52,8 @@ pub enum SyncState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct SerialSyncController {
|
pub struct SerialSyncController {
|
||||||
|
config: Config,
|
||||||
|
|
||||||
// only used for log purpose
|
// only used for log purpose
|
||||||
tx_seq: u64,
|
tx_seq: u64,
|
||||||
|
|
||||||
@ -99,6 +91,7 @@ pub struct SerialSyncController {
|
|||||||
|
|
||||||
impl SerialSyncController {
|
impl SerialSyncController {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
config: Config,
|
||||||
tx_id: TxID,
|
tx_id: TxID,
|
||||||
tx_start_chunk_in_flow: u64,
|
tx_start_chunk_in_flow: u64,
|
||||||
goal: FileSyncGoal,
|
goal: FileSyncGoal,
|
||||||
@ -107,6 +100,7 @@ impl SerialSyncController {
|
|||||||
file_location_cache: Arc<FileLocationCache>,
|
file_location_cache: Arc<FileLocationCache>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
SerialSyncController {
|
SerialSyncController {
|
||||||
|
config,
|
||||||
tx_seq: tx_id.seq,
|
tx_seq: tx_id.seq,
|
||||||
tx_id,
|
tx_id,
|
||||||
tx_start_chunk_in_flow,
|
tx_start_chunk_in_flow,
|
||||||
@ -115,7 +109,7 @@ impl SerialSyncController {
|
|||||||
next_chunk: goal.index_start,
|
next_chunk: goal.index_start,
|
||||||
failures: 0,
|
failures: 0,
|
||||||
state: SyncState::Idle,
|
state: SyncState::Idle,
|
||||||
peers: SyncPeers::new(ctx.clone(), tx_id, file_location_cache.clone()),
|
peers: SyncPeers::new(config, ctx.clone(), tx_id, file_location_cache.clone()),
|
||||||
ctx,
|
ctx,
|
||||||
store,
|
store,
|
||||||
file_location_cache,
|
file_location_cache,
|
||||||
@ -465,7 +459,8 @@ impl SerialSyncController {
|
|||||||
// occurs when remote peer has higher block height
|
// occurs when remote peer has higher block height
|
||||||
info!(%self.tx_seq, "Failed to validate chunks response due to no root found");
|
info!(%self.tx_seq, "Failed to validate chunks response due to no root found");
|
||||||
self.state = SyncState::AwaitingDownload {
|
self.state = SyncState::AwaitingDownload {
|
||||||
since: (Instant::now() + NEXT_REQUEST_WAIT_TIME).into(),
|
since: (Instant::now() + self.config.peer_next_chunks_request_wait_timeout)
|
||||||
|
.into(),
|
||||||
};
|
};
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -557,10 +552,10 @@ impl SerialSyncController {
|
|||||||
|
|
||||||
self.failures += 1;
|
self.failures += 1;
|
||||||
|
|
||||||
if self.failures <= MAX_REQUEST_FAILURES {
|
if self.failures <= self.config.max_request_failures {
|
||||||
// try again
|
// try again
|
||||||
self.state = SyncState::AwaitingDownload {
|
self.state = SyncState::AwaitingDownload {
|
||||||
since: (Instant::now() + NEXT_REQUEST_WAIT_TIME).into(),
|
since: (Instant::now() + self.config.peer_next_chunks_request_wait_timeout).into(),
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
// ban and find new peer to download
|
// ban and find new peer to download
|
||||||
@ -622,7 +617,7 @@ impl SerialSyncController {
|
|||||||
// storage node may not have the specific file when `FindFile`
|
// storage node may not have the specific file when `FindFile`
|
||||||
// gossip message received. In this case, just broadcast the
|
// gossip message received. In this case, just broadcast the
|
||||||
// `FindFile` message again.
|
// `FindFile` message again.
|
||||||
if since.elapsed() >= PEER_REQUEST_TIMEOUT {
|
if since.elapsed() >= self.config.peer_find_timeout {
|
||||||
debug!(%self.tx_seq, "Finding peer timeout and try to find peers again");
|
debug!(%self.tx_seq, "Finding peer timeout and try to find peers again");
|
||||||
self.try_find_peers();
|
self.try_find_peers();
|
||||||
}
|
}
|
||||||
@ -657,7 +652,7 @@ impl SerialSyncController {
|
|||||||
}
|
}
|
||||||
|
|
||||||
SyncState::AwaitingOutgoingConnection { since } => {
|
SyncState::AwaitingOutgoingConnection { since } => {
|
||||||
if since.elapsed() < WAIT_OUTGOING_CONNECTION_TIMEOUT {
|
if since.elapsed() < self.config.peer_wait_outgoing_connection_timeout {
|
||||||
completed = true;
|
completed = true;
|
||||||
} else {
|
} else {
|
||||||
debug!(%self.tx_seq, "Waiting for outgoing connection timeout and try to find other peers to dial");
|
debug!(%self.tx_seq, "Waiting for outgoing connection timeout and try to find other peers to dial");
|
||||||
@ -679,7 +674,7 @@ impl SerialSyncController {
|
|||||||
// e.g. peer disconnected by remote node
|
// e.g. peer disconnected by remote node
|
||||||
debug!(%self.tx_seq, "No peer to continue downloading and try to find other peers to download");
|
debug!(%self.tx_seq, "No peer to continue downloading and try to find other peers to download");
|
||||||
self.state = SyncState::Idle;
|
self.state = SyncState::Idle;
|
||||||
} else if since.elapsed() >= DOWNLOAD_TIMEOUT {
|
} else if since.elapsed() >= self.config.peer_chunks_download_timeout {
|
||||||
self.handle_response_failure(peer_id, "RPC timeout");
|
self.handle_response_failure(peer_id, "RPC timeout");
|
||||||
} else {
|
} else {
|
||||||
completed = true;
|
completed = true;
|
||||||
@ -1516,7 +1511,7 @@ mod tests {
|
|||||||
chunk_count,
|
chunk_count,
|
||||||
);
|
);
|
||||||
|
|
||||||
for i in 0..(MAX_REQUEST_FAILURES + 1) {
|
for i in 0..(controller.config.max_request_failures + 1) {
|
||||||
controller.handle_response_failure(init_peer_id, "unit test");
|
controller.handle_response_failure(init_peer_id, "unit test");
|
||||||
if let Some(msg) = network_recv.recv().await {
|
if let Some(msg) = network_recv.recv().await {
|
||||||
match msg {
|
match msg {
|
||||||
@ -1550,7 +1545,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(controller.failures, i + 1);
|
assert_eq!(controller.failures, i + 1);
|
||||||
if i == MAX_REQUEST_FAILURES {
|
if i == controller.config.max_request_failures {
|
||||||
assert_eq!(*controller.get_status(), SyncState::Idle);
|
assert_eq!(*controller.get_status(), SyncState::Idle);
|
||||||
|
|
||||||
if let Some(msg) = network_recv.recv().await {
|
if let Some(msg) = network_recv.recv().await {
|
||||||
@ -1626,6 +1621,7 @@ mod tests {
|
|||||||
let file_location_cache = create_file_location_cache(peer_id, vec![tx_id]);
|
let file_location_cache = create_file_location_cache(peer_id, vec![tx_id]);
|
||||||
|
|
||||||
let controller = SerialSyncController::new(
|
let controller = SerialSyncController::new(
|
||||||
|
Config::default(),
|
||||||
tx_id,
|
tx_id,
|
||||||
0,
|
0,
|
||||||
FileSyncGoal::new_file(num_chunks as u64),
|
FileSyncGoal::new_file(num_chunks as u64),
|
||||||
|
@ -20,13 +20,37 @@ use std::{
|
|||||||
#[derive(Clone, Copy, Debug, Deserialize)]
|
#[derive(Clone, Copy, Debug, Deserialize)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
|
// sync service config
|
||||||
|
#[serde(deserialize_with = "deserialize_duration")]
|
||||||
|
pub heartbeat_interval: Duration,
|
||||||
pub auto_sync_enabled: bool,
|
pub auto_sync_enabled: bool,
|
||||||
pub max_sync_files: usize,
|
pub max_sync_files: usize,
|
||||||
pub sync_file_by_rpc_enabled: bool,
|
pub sync_file_by_rpc_enabled: bool,
|
||||||
pub sync_file_on_announcement_enabled: bool,
|
pub sync_file_on_announcement_enabled: bool,
|
||||||
|
|
||||||
// auto_sync config
|
// serial sync config
|
||||||
|
pub max_chunks_to_request: u64,
|
||||||
|
pub max_request_failures: usize,
|
||||||
|
#[serde(deserialize_with = "deserialize_duration")]
|
||||||
|
pub peer_connect_timeout: Duration,
|
||||||
|
#[serde(deserialize_with = "deserialize_duration")]
|
||||||
|
pub peer_disconnect_timeout: Duration,
|
||||||
|
#[serde(deserialize_with = "deserialize_duration")]
|
||||||
|
pub peer_find_timeout: Duration,
|
||||||
|
#[serde(deserialize_with = "deserialize_duration")]
|
||||||
|
pub peer_chunks_download_timeout: Duration,
|
||||||
|
#[serde(deserialize_with = "deserialize_duration")]
|
||||||
|
pub peer_wait_outgoing_connection_timeout: Duration,
|
||||||
|
#[serde(deserialize_with = "deserialize_duration")]
|
||||||
|
pub peer_next_chunks_request_wait_timeout: Duration,
|
||||||
|
|
||||||
|
// auto sync config
|
||||||
|
#[serde(deserialize_with = "deserialize_duration")]
|
||||||
|
pub auto_sync_idle_interval: Duration,
|
||||||
|
#[serde(deserialize_with = "deserialize_duration")]
|
||||||
|
pub auto_sync_error_interval: Duration,
|
||||||
pub max_sequential_workers: usize,
|
pub max_sequential_workers: usize,
|
||||||
|
pub max_random_workers: usize,
|
||||||
#[serde(deserialize_with = "deserialize_duration")]
|
#[serde(deserialize_with = "deserialize_duration")]
|
||||||
pub find_peer_timeout: Duration,
|
pub find_peer_timeout: Duration,
|
||||||
}
|
}
|
||||||
@ -34,12 +58,28 @@ pub struct Config {
|
|||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
// sync service config
|
||||||
|
heartbeat_interval: Duration::from_secs(5),
|
||||||
auto_sync_enabled: false,
|
auto_sync_enabled: false,
|
||||||
max_sync_files: 16,
|
max_sync_files: 16,
|
||||||
sync_file_by_rpc_enabled: true,
|
sync_file_by_rpc_enabled: true,
|
||||||
sync_file_on_announcement_enabled: false,
|
sync_file_on_announcement_enabled: false,
|
||||||
|
|
||||||
|
// serial sync config
|
||||||
|
max_chunks_to_request: 2 * 1024,
|
||||||
|
max_request_failures: 5,
|
||||||
|
peer_connect_timeout: Duration::from_secs(5),
|
||||||
|
peer_disconnect_timeout: Duration::from_secs(5),
|
||||||
|
peer_find_timeout: Duration::from_secs(5),
|
||||||
|
peer_chunks_download_timeout: Duration::from_secs(5),
|
||||||
|
peer_wait_outgoing_connection_timeout: Duration::from_secs(10),
|
||||||
|
peer_next_chunks_request_wait_timeout: Duration::from_secs(3),
|
||||||
|
|
||||||
|
// auto sync config
|
||||||
|
auto_sync_idle_interval: Duration::from_secs(3),
|
||||||
|
auto_sync_error_interval: Duration::from_secs(10),
|
||||||
max_sequential_workers: 8,
|
max_sequential_workers: 8,
|
||||||
|
max_random_workers: 4,
|
||||||
find_peer_timeout: Duration::from_secs(10),
|
find_peer_timeout: Duration::from_secs(10),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,6 @@ use crate::auto_sync::manager::AutoSyncManager;
|
|||||||
use crate::context::SyncNetworkContext;
|
use crate::context::SyncNetworkContext;
|
||||||
use crate::controllers::{
|
use crate::controllers::{
|
||||||
FailureReason, FileSyncGoal, FileSyncInfo, SerialSyncController, SyncState,
|
FailureReason, FileSyncGoal, FileSyncInfo, SerialSyncController, SyncState,
|
||||||
MAX_CHUNKS_TO_REQUEST,
|
|
||||||
};
|
};
|
||||||
use crate::{Config, SyncServiceState};
|
use crate::{Config, SyncServiceState};
|
||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
@ -26,8 +25,6 @@ use storage::log_store::Store as LogStore;
|
|||||||
use storage_async::Store;
|
use storage_async::Store;
|
||||||
use tokio::sync::{broadcast, mpsc, oneshot};
|
use tokio::sync::{broadcast, mpsc, oneshot};
|
||||||
|
|
||||||
const HEARTBEAT_INTERVAL_SEC: u64 = 5;
|
|
||||||
|
|
||||||
pub type SyncSender = channel::Sender<SyncMessage, SyncRequest, SyncResponse>;
|
pub type SyncSender = channel::Sender<SyncMessage, SyncRequest, SyncResponse>;
|
||||||
pub type SyncReceiver = channel::Receiver<SyncMessage, SyncRequest, SyncResponse>;
|
pub type SyncReceiver = channel::Receiver<SyncMessage, SyncRequest, SyncResponse>;
|
||||||
|
|
||||||
@ -126,9 +123,6 @@ pub struct SyncService {
|
|||||||
/// A collection of file sync controllers.
|
/// A collection of file sync controllers.
|
||||||
controllers: HashMap<u64, SerialSyncController>,
|
controllers: HashMap<u64, SerialSyncController>,
|
||||||
|
|
||||||
/// Heartbeat interval for executing periodic tasks.
|
|
||||||
heartbeat: tokio::time::Interval,
|
|
||||||
|
|
||||||
auto_sync_manager: Option<AutoSyncManager>,
|
auto_sync_manager: Option<AutoSyncManager>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,10 +157,6 @@ impl SyncService {
|
|||||||
catch_up_end_recv: oneshot::Receiver<()>,
|
catch_up_end_recv: oneshot::Receiver<()>,
|
||||||
) -> Result<SyncSender> {
|
) -> Result<SyncSender> {
|
||||||
let (sync_send, sync_recv) = channel::Channel::unbounded();
|
let (sync_send, sync_recv) = channel::Channel::unbounded();
|
||||||
|
|
||||||
let heartbeat =
|
|
||||||
tokio::time::interval(tokio::time::Duration::from_secs(HEARTBEAT_INTERVAL_SEC));
|
|
||||||
|
|
||||||
let store = Store::new(store, executor.clone());
|
let store = Store::new(store, executor.clone());
|
||||||
|
|
||||||
// init auto sync
|
// init auto sync
|
||||||
@ -193,7 +183,6 @@ impl SyncService {
|
|||||||
store,
|
store,
|
||||||
file_location_cache,
|
file_location_cache,
|
||||||
controllers: Default::default(),
|
controllers: Default::default(),
|
||||||
heartbeat,
|
|
||||||
auto_sync_manager,
|
auto_sync_manager,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -204,6 +193,8 @@ impl SyncService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn main(&mut self) {
|
async fn main(&mut self) {
|
||||||
|
let mut heartbeat = tokio::time::interval(self.config.heartbeat_interval);
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
// received sync message
|
// received sync message
|
||||||
@ -215,7 +206,7 @@ impl SyncService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// heartbeat
|
// heartbeat
|
||||||
_ = self.heartbeat.tick() => self.on_heartbeat(),
|
_ = heartbeat.tick() => self.on_heartbeat(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -417,7 +408,7 @@ impl SyncService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ban peer if requested too many chunks
|
// ban peer if requested too many chunks
|
||||||
if request.index_end - request.index_start > MAX_CHUNKS_TO_REQUEST {
|
if request.index_end - request.index_start > self.config.max_chunks_to_request {
|
||||||
self.ctx.ban_peer(peer_id, "Too many chunks requested");
|
self.ctx.ban_peer(peer_id, "Too many chunks requested");
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
@ -649,6 +640,7 @@ impl SyncService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
entry.insert(SerialSyncController::new(
|
entry.insert(SerialSyncController::new(
|
||||||
|
self.config,
|
||||||
tx.id(),
|
tx.id(),
|
||||||
tx.start_entry_index(),
|
tx.start_entry_index(),
|
||||||
FileSyncGoal::new(num_chunks, index_start, index_end),
|
FileSyncGoal::new(num_chunks, index_start, index_end),
|
||||||
@ -916,8 +908,6 @@ mod tests {
|
|||||||
let (network_send, mut network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
|
let (network_send, mut network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
|
||||||
let (_, sync_recv) = channel::Channel::unbounded();
|
let (_, sync_recv) = channel::Channel::unbounded();
|
||||||
|
|
||||||
let heartbeat = tokio::time::interval(Duration::from_secs(HEARTBEAT_INTERVAL_SEC));
|
|
||||||
|
|
||||||
let mut sync = SyncService {
|
let mut sync = SyncService {
|
||||||
config: Config::default(),
|
config: Config::default(),
|
||||||
msg_recv: sync_recv,
|
msg_recv: sync_recv,
|
||||||
@ -925,7 +915,6 @@ mod tests {
|
|||||||
store,
|
store,
|
||||||
file_location_cache,
|
file_location_cache,
|
||||||
controllers: Default::default(),
|
controllers: Default::default(),
|
||||||
heartbeat,
|
|
||||||
auto_sync_manager: None,
|
auto_sync_manager: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -948,8 +937,6 @@ mod tests {
|
|||||||
let (network_send, mut network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
|
let (network_send, mut network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
|
||||||
let (_, sync_recv) = channel::Channel::unbounded();
|
let (_, sync_recv) = channel::Channel::unbounded();
|
||||||
|
|
||||||
let heartbeat = tokio::time::interval(Duration::from_secs(HEARTBEAT_INTERVAL_SEC));
|
|
||||||
|
|
||||||
let mut sync = SyncService {
|
let mut sync = SyncService {
|
||||||
config: Config::default(),
|
config: Config::default(),
|
||||||
msg_recv: sync_recv,
|
msg_recv: sync_recv,
|
||||||
@ -957,7 +944,6 @@ mod tests {
|
|||||||
store,
|
store,
|
||||||
file_location_cache,
|
file_location_cache,
|
||||||
controllers: Default::default(),
|
controllers: Default::default(),
|
||||||
heartbeat,
|
|
||||||
auto_sync_manager: None,
|
auto_sync_manager: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1357,7 +1343,8 @@ mod tests {
|
|||||||
wait_for_tx_finalized(runtime.store, tx_seq).await;
|
wait_for_tx_finalized(runtime.store, tx_seq).await;
|
||||||
|
|
||||||
// test heartbeat
|
// test heartbeat
|
||||||
let deadline = Instant::now() + Duration::from_secs(HEARTBEAT_INTERVAL_SEC + 1);
|
let deadline =
|
||||||
|
Instant::now() + Config::default().heartbeat_interval + Duration::from_secs(1);
|
||||||
while !matches!(sync_send
|
while !matches!(sync_send
|
||||||
.request(SyncRequest::SyncStatus { tx_seq })
|
.request(SyncRequest::SyncStatus { tx_seq })
|
||||||
.await
|
.await
|
||||||
|
@ -241,3 +241,6 @@ auto_sync_enabled = true
|
|||||||
|
|
||||||
# Maximum threads to sync files in sequence.
|
# Maximum threads to sync files in sequence.
|
||||||
# max_sequential_workers = 8
|
# max_sequential_workers = 8
|
||||||
|
|
||||||
|
# Maximum threads to sync files randomly.
|
||||||
|
# max_random_workers = 4
|
||||||
|
@ -242,6 +242,9 @@
|
|||||||
# Maximum threads to sync files in sequence.
|
# Maximum threads to sync files in sequence.
|
||||||
# max_sequential_workers = 8
|
# max_sequential_workers = 8
|
||||||
|
|
||||||
|
# Maximum threads to sync files randomly.
|
||||||
|
# max_random_workers = 4
|
||||||
|
|
||||||
#######################################################################
|
#######################################################################
|
||||||
### File Location Cache Options ###
|
### File Location Cache Options ###
|
||||||
#######################################################################
|
#######################################################################
|
||||||
|
@ -1,31 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
MINER_KEY=""
|
|
||||||
MINE_CONTRACT=""
|
|
||||||
BLOCKCHAIN_RPC=""
|
|
||||||
FLOW_CONTRACT=""
|
|
||||||
BLOCK_NUMBER=0
|
|
||||||
PUBLIC_IP=$(curl -s https://ipinfo.io/ip)
|
|
||||||
|
|
||||||
FILE=run/config.toml
|
|
||||||
|
|
||||||
# enable sync
|
|
||||||
sed -in-place='' 's/# \[sync\]/\[sync\]/g' $FILE
|
|
||||||
# enable auto_sync
|
|
||||||
sed -in-place='' 's/# auto_sync_enabled = false/auto_sync_enabled = true/g' $FILE
|
|
||||||
# reduce timeout for finding peers
|
|
||||||
sed -in-place='' 's/# find_peer_timeout = .*/find_peer_timeout = "10s"/g' $FILE
|
|
||||||
# set public ip
|
|
||||||
sed -in-place='' "s/# network_enr_address = .*/network_enr_address = \"$PUBLIC_IP\"/g" $FILE
|
|
||||||
# set miner key
|
|
||||||
sed -in-place='' "s/miner_key = \"\"/miner_key = \"$MINER_KEY\"/g" $FILE
|
|
||||||
# set miner contract address
|
|
||||||
sed -in-place='' "s/mine_contract_address = .*/mine_contract_address = \"$MINE_CONTRACT\"/g" $FILE
|
|
||||||
# set blockchain rpc endpoint
|
|
||||||
sed -in-place='' "s|blockchain_rpc_endpoint = .*|blockchain_rpc_endpoint = \"$BLOCKCHAIN_RPC\"|g" $FILE
|
|
||||||
# set flow contract address
|
|
||||||
sed -in-place='' "s/log_contract_address = .*/log_contract_address = \"$FLOW_CONTRACT\"/g" $FILE
|
|
||||||
# set contract deployed block number
|
|
||||||
sed -in-place='' "s/log_sync_start_block_number = .*/log_sync_start_block_number = $BLOCK_NUMBER/g" $FILE
|
|
||||||
# update the boot node ids
|
|
||||||
sed -in-place='' 's|network_boot_nodes = .*|network_boot_nodes = ["/ip4/54.219.26.22/udp/1234/p2p/16Uiu2HAmTVDGNhkHD98zDnJxQWu3i1FL1aFYeh9wiQTNu4pDCgps","/ip4/52.52.127.117/udp/1234/p2p/16Uiu2HAkzRjxK2gorngB1Xq84qDrT4hSVznYDHj6BkbaE4SGx9oS","/ip4/18.167.69.68/udp/1234/p2p/16Uiu2HAm2k6ua2mGgvZ8rTMV8GhpW71aVzkQWy7D37TTDuLCpgmX"]|g' $FILE
|
|
Loading…
Reference in New Issue
Block a user