Compare commits

...

3 Commits

Author SHA1 Message Date
peilun-conflux
d2964f8252
Merge 3957f8b28d into a9f5169c15 2024-09-13 16:58:50 +08:00
Bo QIU
a9f5169c15
Allow user to configure network bandwidth for file sync (#198)
Some checks are pending
abi-consistent-check / build-and-compare (push) Waiting to run
code-coverage / unittest-cov (push) Waiting to run
rust / check (push) Waiting to run
rust / test (push) Waiting to run
rust / lints (push) Waiting to run
functional-test / test (push) Waiting to run
2024-09-13 16:58:27 +08:00
Peilun Li
3957f8b28d Add TopicScoreParams for gossipsubs. 2024-09-09 17:58:41 +08:00
7 changed files with 61 additions and 2 deletions

View File

@ -13,6 +13,7 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic, SnappyTransform};
use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash};
use futures::stream::StreamExt; use futures::stream::StreamExt;
use libp2p::gossipsub::error::PublishError; use libp2p::gossipsub::error::PublishError;
use libp2p::gossipsub::TopicScoreParams;
use libp2p::{ use libp2p::{
core::{ core::{
connection::ConnectionId, identity::Keypair, multiaddr::Protocol as MProtocol, Multiaddr, connection::ConnectionId, identity::Keypair, multiaddr::Protocol as MProtocol, Multiaddr,
@ -226,7 +227,30 @@ impl<AppReqId: ReqId> Behaviour<AppReqId> {
// trace!(behaviour_log, "Using peer score params"; "params" => ?params); // trace!(behaviour_log, "Using peer score params"; "params" => ?params);
let params = libp2p::gossipsub::PeerScoreParams::default(); let mut params = libp2p::gossipsub::PeerScoreParams::default();
let get_hash = |kind: GossipKind| -> TopicHash {
let topic: Topic = GossipTopic::new(kind, GossipEncoding::default()).into();
topic.hash()
};
params
.topics
.insert(get_hash(GossipKind::FindFile), TopicScoreParams::default());
params.topics.insert(
get_hash(GossipKind::FindChunks),
TopicScoreParams::default(),
);
params.topics.insert(
get_hash(GossipKind::AnnounceFile),
TopicScoreParams::default(),
);
params.topics.insert(
get_hash(GossipKind::AnnounceShardConfig),
TopicScoreParams::default(),
);
params.topics.insert(
get_hash(GossipKind::AnnounceChunks),
TopicScoreParams::default(),
);
// Set up a scoring update interval // Set up a scoring update interval
let update_gossipsub_scores = tokio::time::interval(params.decay_interval); let update_gossipsub_scores = tokio::time::interval(params.decay_interval);

View File

@ -1,10 +1,14 @@
use std::sync::Arc; use std::sync::Arc;
use metrics::{register_timer, Counter, CounterUsize, Histogram, Sample, Timer}; use metrics::{
register_meter, register_timer, Counter, CounterUsize, Histogram, Meter, Sample, Timer,
};
lazy_static::lazy_static! { lazy_static::lazy_static! {
pub static ref SERIAL_SYNC_FILE_COMPLETED: Arc<dyn Timer> = register_timer("sync_controllers_serial_sync_file_completed"); pub static ref SERIAL_SYNC_FILE_COMPLETED: Arc<dyn Timer> = register_timer("sync_controllers_serial_sync_file_completed");
pub static ref SERIAL_SYNC_CHUNKS_COMPLETED: Arc<dyn Timer> = register_timer("sync_controllers_serial_sync_chunks_completed"); pub static ref SERIAL_SYNC_CHUNKS_COMPLETED: Arc<dyn Timer> = register_timer("sync_controllers_serial_sync_chunks_completed");
pub static ref SERIAL_SYNC_SEGMENT_BANDWIDTH: Arc<dyn Meter> = register_meter("sync_controllers_serial_sync_segment_bandwidth");
pub static ref SERIAL_SYNC_SEGMENT_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_controllers_serial_sync_segment_latency", 1024); pub static ref SERIAL_SYNC_SEGMENT_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_controllers_serial_sync_segment_latency", 1024);
pub static ref SERIAL_SYNC_SEGMENT_TIMEOUT: Arc<dyn Counter<usize>> = CounterUsize::register("sync_controllers_serial_sync_segment_timeout"); pub static ref SERIAL_SYNC_SEGMENT_TIMEOUT: Arc<dyn Counter<usize>> = CounterUsize::register("sync_controllers_serial_sync_segment_timeout");
pub static ref SERIAL_SYNC_UNEXPECTED_ERRORS: Arc<dyn Counter<usize>> = CounterUsize::register("sync_controllers_serial_sync_unexpected_errors"); pub static ref SERIAL_SYNC_UNEXPECTED_ERRORS: Arc<dyn Counter<usize>> = CounterUsize::register("sync_controllers_serial_sync_unexpected_errors");

View File

@ -11,6 +11,7 @@ use network::{
}; };
use rand::Rng; use rand::Rng;
use shared_types::{timestamp_now, ChunkArrayWithProof, TxID, CHUNK_SIZE}; use shared_types::{timestamp_now, ChunkArrayWithProof, TxID, CHUNK_SIZE};
use ssz::Encode;
use std::{sync::Arc, time::Instant}; use std::{sync::Arc, time::Instant};
use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE}; use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE};
use storage_async::Store; use storage_async::Store;
@ -255,6 +256,17 @@ impl SerialSyncController {
/// Randomly select a peer to sync the next segment. /// Randomly select a peer to sync the next segment.
fn try_request_next(&mut self) { fn try_request_next(&mut self) {
// limits network bandwidth if configured
if self.config.max_bandwidth_bytes > 0 {
let m1 = metrics::SERIAL_SYNC_SEGMENT_BANDWIDTH.rate1() as u64;
if m1 > self.config.max_bandwidth_bytes {
self.state = SyncState::AwaitingDownload {
since: (Instant::now() + self.config.bandwidth_wait_timeout).into(),
};
return;
}
}
// request next chunk array // request next chunk array
let from_chunk = self.next_chunk; let from_chunk = self.next_chunk;
let to_chunk = std::cmp::min(from_chunk + PORA_CHUNK_SIZE as u64, self.goal.index_end); let to_chunk = std::cmp::min(from_chunk + PORA_CHUNK_SIZE as u64, self.goal.index_end);
@ -407,6 +419,8 @@ impl SerialSyncController {
} }
pub async fn on_response(&mut self, from_peer_id: PeerId, response: ChunkArrayWithProof) { pub async fn on_response(&mut self, from_peer_id: PeerId, response: ChunkArrayWithProof) {
metrics::SERIAL_SYNC_SEGMENT_BANDWIDTH.mark(response.ssz_bytes_len());
if self.handle_on_response_mismatch(from_peer_id) { if self.handle_on_response_mismatch(from_peer_id) {
return; return;
} }

View File

@ -43,6 +43,9 @@ pub struct Config {
pub peer_wait_outgoing_connection_timeout: Duration, pub peer_wait_outgoing_connection_timeout: Duration,
#[serde(deserialize_with = "deserialize_duration")] #[serde(deserialize_with = "deserialize_duration")]
pub peer_next_chunks_request_wait_timeout: Duration, pub peer_next_chunks_request_wait_timeout: Duration,
pub max_bandwidth_bytes: u64,
#[serde(deserialize_with = "deserialize_duration")]
pub bandwidth_wait_timeout: Duration,
// auto sync config // auto sync config
#[serde(deserialize_with = "deserialize_duration")] #[serde(deserialize_with = "deserialize_duration")]
@ -76,6 +79,8 @@ impl Default for Config {
peer_chunks_download_timeout: Duration::from_secs(15), peer_chunks_download_timeout: Duration::from_secs(15),
peer_wait_outgoing_connection_timeout: Duration::from_secs(10), peer_wait_outgoing_connection_timeout: Duration::from_secs(10),
peer_next_chunks_request_wait_timeout: Duration::from_secs(3), peer_next_chunks_request_wait_timeout: Duration::from_secs(3),
max_bandwidth_bytes: 0,
bandwidth_wait_timeout: Duration::from_secs(5),
// auto sync config // auto sync config
auto_sync_idle_interval: Duration::from_secs(3), auto_sync_idle_interval: Duration::from_secs(3),

View File

@ -263,6 +263,10 @@ auto_sync_enabled = true
# Timeout to download data from remote peer. # Timeout to download data from remote peer.
# peer_chunks_download_timeout = "15s" # peer_chunks_download_timeout = "15s"
# Maximum network bandwidth (B/s) to sync files. Default value is 0,
# which indicates no limitation.
# max_bandwidth_bytes = 0
# Maximum threads to sync files in sequence. # Maximum threads to sync files in sequence.
# max_sequential_workers = 0 # max_sequential_workers = 0

View File

@ -275,6 +275,10 @@ auto_sync_enabled = true
# Timeout to download data from remote peer. # Timeout to download data from remote peer.
# peer_chunks_download_timeout = "15s" # peer_chunks_download_timeout = "15s"
# Maximum network bandwidth (B/s) to sync files. Default value is 0,
# which indicates no limitation.
# max_bandwidth_bytes = 0
# Maximum threads to sync files in sequence. # Maximum threads to sync files in sequence.
# max_sequential_workers = 0 # max_sequential_workers = 0

View File

@ -277,6 +277,10 @@
# Timeout to download data from remote peer. # Timeout to download data from remote peer.
# peer_chunks_download_timeout = "15s" # peer_chunks_download_timeout = "15s"
# Maximum network bandwidth (B/s) to sync files. Default value is 0,
# which indicates no limitation.
# max_bandwidth_bytes = 0
# Maximum threads to sync files in sequence. # Maximum threads to sync files in sequence.
# max_sequential_workers = 0 # max_sequential_workers = 0