Compare commits

..

1 Commits

Author SHA1 Message Date
peilun-conflux
0812d2e748
Merge 3957f8b28d into 59d24b073d 2024-09-13 07:41:36 +08:00
6 changed files with 1 additions and 36 deletions

View File

@ -1,14 +1,10 @@
use std::sync::Arc;
use metrics::{
register_meter, register_timer, Counter, CounterUsize, Histogram, Meter, Sample, Timer,
};
use metrics::{register_timer, Counter, CounterUsize, Histogram, Sample, Timer};
lazy_static::lazy_static! {
pub static ref SERIAL_SYNC_FILE_COMPLETED: Arc<dyn Timer> = register_timer("sync_controllers_serial_sync_file_completed");
pub static ref SERIAL_SYNC_CHUNKS_COMPLETED: Arc<dyn Timer> = register_timer("sync_controllers_serial_sync_chunks_completed");
pub static ref SERIAL_SYNC_SEGMENT_BANDWIDTH: Arc<dyn Meter> = register_meter("sync_controllers_serial_sync_segment_bandwidth");
pub static ref SERIAL_SYNC_SEGMENT_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_controllers_serial_sync_segment_latency", 1024);
pub static ref SERIAL_SYNC_SEGMENT_TIMEOUT: Arc<dyn Counter<usize>> = CounterUsize::register("sync_controllers_serial_sync_segment_timeout");
pub static ref SERIAL_SYNC_UNEXPECTED_ERRORS: Arc<dyn Counter<usize>> = CounterUsize::register("sync_controllers_serial_sync_unexpected_errors");

View File

@ -11,7 +11,6 @@ use network::{
};
use rand::Rng;
use shared_types::{timestamp_now, ChunkArrayWithProof, TxID, CHUNK_SIZE};
use ssz::Encode;
use std::{sync::Arc, time::Instant};
use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE};
use storage_async::Store;
@ -256,17 +255,6 @@ impl SerialSyncController {
/// Randomly select a peer to sync the next segment.
fn try_request_next(&mut self) {
// limits network bandwidth if configured
if self.config.max_bandwidth_bytes > 0 {
let m1 = metrics::SERIAL_SYNC_SEGMENT_BANDWIDTH.rate1() as u64;
if m1 > self.config.max_bandwidth_bytes {
self.state = SyncState::AwaitingDownload {
since: (Instant::now() + self.config.bandwidth_wait_timeout).into(),
};
return;
}
}
// request next chunk array
let from_chunk = self.next_chunk;
let to_chunk = std::cmp::min(from_chunk + PORA_CHUNK_SIZE as u64, self.goal.index_end);
@ -419,8 +407,6 @@ impl SerialSyncController {
}
pub async fn on_response(&mut self, from_peer_id: PeerId, response: ChunkArrayWithProof) {
metrics::SERIAL_SYNC_SEGMENT_BANDWIDTH.mark(response.ssz_bytes_len());
if self.handle_on_response_mismatch(from_peer_id) {
return;
}

View File

@ -43,9 +43,6 @@ pub struct Config {
pub peer_wait_outgoing_connection_timeout: Duration,
#[serde(deserialize_with = "deserialize_duration")]
pub peer_next_chunks_request_wait_timeout: Duration,
pub max_bandwidth_bytes: u64,
#[serde(deserialize_with = "deserialize_duration")]
pub bandwidth_wait_timeout: Duration,
// auto sync config
#[serde(deserialize_with = "deserialize_duration")]
@ -79,8 +76,6 @@ impl Default for Config {
peer_chunks_download_timeout: Duration::from_secs(15),
peer_wait_outgoing_connection_timeout: Duration::from_secs(10),
peer_next_chunks_request_wait_timeout: Duration::from_secs(3),
max_bandwidth_bytes: 0,
bandwidth_wait_timeout: Duration::from_secs(5),
// auto sync config
auto_sync_idle_interval: Duration::from_secs(3),

View File

@ -263,10 +263,6 @@ auto_sync_enabled = true
# Timeout to download data from remote peer.
# peer_chunks_download_timeout = "15s"
# Maximum network bandwidth (B/s) to sync files. Default value is 0,
# which indicates no limitation.
# max_bandwidth_bytes = 0
# Maximum threads to sync files in sequence.
# max_sequential_workers = 0

View File

@ -275,10 +275,6 @@ auto_sync_enabled = true
# Timeout to download data from remote peer.
# peer_chunks_download_timeout = "15s"
# Maximum network bandwidth (B/s) to sync files. Default value is 0,
# which indicates no limitation.
# max_bandwidth_bytes = 0
# Maximum threads to sync files in sequence.
# max_sequential_workers = 0

View File

@ -277,10 +277,6 @@
# Timeout to download data from remote peer.
# peer_chunks_download_timeout = "15s"
# Maximum network bandwidth (B/s) to sync files. Default value is 0,
# which indicates no limitation.
# max_bandwidth_bytes = 0
# Maximum threads to sync files in sequence.
# max_sequential_workers = 0