Compare commits

..

No commits in common. "6fb21d14246582237946c94fa72a3276607b68cb" and "4aae302df58418b773bfae6fe34becda38e92934" have entirely different histories.

13 changed files with 67 additions and 177 deletions

1
Cargo.lock generated
View File

@ -844,7 +844,6 @@ dependencies = [
name = "channel" name = "channel"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"metrics",
"tokio", "tokio",
] ]

View File

@ -5,4 +5,3 @@ edition = "2021"
[dependencies] [dependencies]
tokio = { version = "1.19.2", features = ["sync", "time"] } tokio = { version = "1.19.2", features = ["sync", "time"] }
metrics = { workspace = true }

View File

@ -1,8 +1,6 @@
use crate::error::Error; use crate::error::Error;
use metrics::{register_meter_with_group, Counter, CounterUsize, Histogram, Meter, Sample}; use std::time::Duration;
use std::sync::Arc; use tokio::sync::mpsc::error::TryRecvError;
use std::time::{Duration, Instant};
use tokio::sync::mpsc::error::{SendError, TryRecvError};
use tokio::sync::{mpsc, oneshot}; use tokio::sync::{mpsc, oneshot};
use tokio::time::timeout; use tokio::time::timeout;
@ -21,139 +19,56 @@ pub struct Channel<N, Req, Res> {
} }
impl<N, Req, Res> Channel<N, Req, Res> { impl<N, Req, Res> Channel<N, Req, Res> {
pub fn unbounded(name: &str) -> (Sender<N, Req, Res>, Receiver<N, Req, Res>) { pub fn unbounded() -> (Sender<N, Req, Res>, Receiver<N, Req, Res>) {
let (sender, receiver) = mpsc::unbounded_channel(); let (sender, receiver) = mpsc::unbounded_channel();
(Sender { chan: sender }, Receiver { chan: receiver })
let metrics_group = format!("common_channel_{}", name);
let metrics_queued = CounterUsize::register_with_group(metrics_group.as_str(), "size");
(
Sender {
chan: sender,
metrics_send_qps: register_meter_with_group(metrics_group.as_str(), "send"),
metrics_queued: metrics_queued.clone(),
metrics_timeout: CounterUsize::register_with_group(
metrics_group.as_str(),
"timeout",
),
},
Receiver {
chan: receiver,
metrics_recv_qps: register_meter_with_group(metrics_group.as_str(), "recv"),
metrics_queued,
metrics_queue_latency: Sample::ExpDecay(0.015).register_with_group(
metrics_group.as_str(),
"latency",
1024,
),
},
)
}
}
enum TimedMessage<N, Req, Res> {
Notification(Instant, N),
Request(Instant, Req, ResponseSender<Res>),
}
impl<N, Req, Res> From<Message<N, Req, Res>> for TimedMessage<N, Req, Res> {
fn from(value: Message<N, Req, Res>) -> Self {
match value {
Message::Notification(n) => TimedMessage::Notification(Instant::now(), n),
Message::Request(req, res) => TimedMessage::Request(Instant::now(), req, res),
}
}
}
impl<N, Req, Res> TimedMessage<N, Req, Res> {
fn into_message(self) -> (Instant, Message<N, Req, Res>) {
match self {
TimedMessage::Notification(since, n) => (since, Message::Notification(n)),
TimedMessage::Request(since, req, res) => (since, Message::Request(req, res)),
}
} }
} }
pub struct Sender<N, Req, Res> { pub struct Sender<N, Req, Res> {
chan: mpsc::UnboundedSender<TimedMessage<N, Req, Res>>, chan: mpsc::UnboundedSender<Message<N, Req, Res>>,
metrics_send_qps: Arc<dyn Meter>,
metrics_queued: Arc<dyn Counter<usize>>,
metrics_timeout: Arc<dyn Counter<usize>>,
} }
impl<N, Req, Res> Clone for Sender<N, Req, Res> { impl<N, Req, Res> Clone for Sender<N, Req, Res> {
fn clone(&self) -> Self { fn clone(&self) -> Self {
Sender { Sender {
chan: self.chan.clone(), chan: self.chan.clone(),
metrics_send_qps: self.metrics_send_qps.clone(),
metrics_queued: self.metrics_queued.clone(),
metrics_timeout: self.metrics_timeout.clone(),
} }
} }
} }
impl<N, Req, Res> Sender<N, Req, Res> { impl<N, Req, Res> Sender<N, Req, Res> {
pub fn notify(&self, msg: N) -> Result<(), Error<N, Req, Res>> { pub fn notify(&self, msg: N) -> Result<(), Error<N, Req, Res>> {
self.send(Message::Notification(msg)) self.chan
.send(Message::Notification(msg))
.map_err(|e| Error::SendError(e))
} }
pub async fn request(&self, request: Req) -> Result<Res, Error<N, Req, Res>> { pub async fn request(&self, request: Req) -> Result<Res, Error<N, Req, Res>> {
let (sender, receiver) = oneshot::channel(); let (sender, receiver) = oneshot::channel();
self.send(Message::Request(request, sender))?; self.chan
.send(Message::Request(request, sender))
.map_err(|e| Error::SendError(e))?;
timeout(DEFAULT_REQUEST_TIMEOUT, receiver) timeout(DEFAULT_REQUEST_TIMEOUT, receiver)
.await .await
.map_err(|_| { .map_err(|_| Error::TimeoutError)?
self.metrics_timeout.inc(1);
Error::TimeoutError
})?
.map_err(|e| Error::RecvError(e)) .map_err(|e| Error::RecvError(e))
} }
fn send(&self, message: Message<N, Req, Res>) -> Result<(), Error<N, Req, Res>> {
match self.chan.send(message.into()) {
Ok(()) => {
self.metrics_send_qps.mark(1);
self.metrics_queued.inc(1);
Ok(())
}
Err(e) => {
let (_, msg) = e.0.into_message();
Err(Error::SendError(SendError(msg)))
}
}
}
} }
pub struct Receiver<N, Req, Res> { pub struct Receiver<N, Req, Res> {
chan: mpsc::UnboundedReceiver<TimedMessage<N, Req, Res>>, chan: mpsc::UnboundedReceiver<Message<N, Req, Res>>,
metrics_recv_qps: Arc<dyn Meter>,
metrics_queued: Arc<dyn Counter<usize>>,
metrics_queue_latency: Arc<dyn Histogram>,
} }
impl<N, Req, Res> Receiver<N, Req, Res> { impl<N, Req, Res> Receiver<N, Req, Res> {
pub async fn recv(&mut self) -> Option<Message<N, Req, Res>> { pub async fn recv(&mut self) -> Option<Message<N, Req, Res>> {
let data = self.chan.recv().await?; self.chan.recv().await
Some(self.on_recv_data(data))
} }
pub fn try_recv(&mut self) -> Result<Message<N, Req, Res>, TryRecvError> { pub fn try_recv(&mut self) -> Result<Message<N, Req, Res>, TryRecvError> {
let data = self.chan.try_recv()?; self.chan.try_recv()
Ok(self.on_recv_data(data))
}
fn on_recv_data(&self, data: TimedMessage<N, Req, Res>) -> Message<N, Req, Res> {
self.metrics_recv_qps.mark(1);
self.metrics_queued.dec(1);
let (since, msg) = data.into_message();
self.metrics_queue_latency.update_since(since);
msg
} }
} }
@ -176,7 +91,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn request_response() { async fn request_response() {
let (tx, mut rx) = Channel::<Notification, Request, Response>::unbounded("test"); let (tx, mut rx) = Channel::<Notification, Request, Response>::unbounded();
let task1 = async move { let task1 = async move {
match rx.recv().await.expect("not dropped") { match rx.recv().await.expect("not dropped") {

View File

@ -815,7 +815,7 @@ mod tests {
let runtime = TestRuntime::default(); let runtime = TestRuntime::default();
let (network_globals, keypair) = Context::new_network_globals(); let (network_globals, keypair) = Context::new_network_globals();
let (network_send, network_recv) = mpsc::unbounded_channel(); let (network_send, network_recv) = mpsc::unbounded_channel();
let (sync_send, sync_recv) = channel::Channel::unbounded("test"); let (sync_send, sync_recv) = channel::Channel::unbounded();
let (chunk_pool_send, _chunk_pool_recv) = mpsc::unbounded_channel(); let (chunk_pool_send, _chunk_pool_recv) = mpsc::unbounded_channel();
let store = LogManager::memorydb(LogConfig::default()).unwrap(); let store = LogManager::memorydb(LogConfig::default()).unwrap();
Self { Self {

View File

@ -4,7 +4,7 @@ use crate::{error, Context};
use futures::prelude::*; use futures::prelude::*;
use jsonrpsee::core::async_trait; use jsonrpsee::core::async_trait;
use jsonrpsee::core::RpcResult; use jsonrpsee::core::RpcResult;
use metrics::{DEFAULT_GROUPING_REGISTRY, DEFAULT_REGISTRY}; use metrics::DEFAULT_REGISTRY;
use network::{multiaddr::Protocol, Multiaddr}; use network::{multiaddr::Protocol, Multiaddr};
use std::collections::{BTreeMap, HashMap}; use std::collections::{BTreeMap, HashMap};
use std::net::IpAddr; use std::net::IpAddr;
@ -266,21 +266,6 @@ impl RpcServer for RpcServerImpl {
} }
} }
for (group_name, metrics) in DEFAULT_GROUPING_REGISTRY.read().get_all() {
for (metric_name, metric) in metrics.iter() {
let name = format!("{}.{}", group_name, metric_name);
match &maybe_prefix {
Some(prefix) if !name.starts_with(prefix) => {}
_ => {
result.insert(
name,
format!("{} {}", metric.get_type(), metric.get_value()),
);
}
}
}
}
Ok(result) Ok(result)
} }
} }

View File

@ -1,7 +1,7 @@
use crate::{controllers::SyncState, SyncRequest, SyncResponse, SyncSender}; use crate::{controllers::SyncState, Config, SyncRequest, SyncResponse, SyncSender};
use anyhow::{bail, Result}; use anyhow::{bail, Result};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{collections::HashSet, fmt::Debug, sync::Arc, time::Duration}; use std::{collections::HashSet, fmt::Debug, sync::Arc};
use storage_async::Store; use storage_async::Store;
use tokio::sync::RwLock; use tokio::sync::RwLock;
@ -15,23 +15,18 @@ pub enum SyncResult {
/// Supports to sync files concurrently. /// Supports to sync files concurrently.
#[derive(Clone)] #[derive(Clone)]
pub struct Batcher { pub struct Batcher {
pub(crate) config: Config,
capacity: usize, capacity: usize,
find_peer_timeout: Duration,
tasks: Arc<RwLock<HashSet<u64>>>, // files to sync tasks: Arc<RwLock<HashSet<u64>>>, // files to sync
store: Store, store: Store,
sync_send: SyncSender, sync_send: SyncSender,
} }
impl Batcher { impl Batcher {
pub fn new( pub fn new(config: Config, capacity: usize, store: Store, sync_send: SyncSender) -> Self {
capacity: usize,
find_peer_timeout: Duration,
store: Store,
sync_send: SyncSender,
) -> Self {
Self { Self {
config,
capacity, capacity,
find_peer_timeout,
tasks: Default::default(), tasks: Default::default(),
store, store,
sync_send, sync_send,
@ -133,7 +128,7 @@ impl Batcher {
// finding peers timeout // finding peers timeout
Some(SyncState::FindingPeers { origin, .. }) Some(SyncState::FindingPeers { origin, .. })
if origin.elapsed() > self.find_peer_timeout => if origin.elapsed() > self.config.find_peer_timeout =>
{ {
debug!(%tx_seq, "Terminate file sync due to finding peers timeout"); debug!(%tx_seq, "Terminate file sync due to finding peers timeout");
self.terminate_file_sync(tx_seq, false).await; self.terminate_file_sync(tx_seq, false).await;
@ -142,7 +137,7 @@ impl Batcher {
// connecting peers timeout // connecting peers timeout
Some(SyncState::ConnectingPeers { origin, .. }) Some(SyncState::ConnectingPeers { origin, .. })
if origin.elapsed() > self.find_peer_timeout => if origin.elapsed() > self.config.find_peer_timeout =>
{ {
debug!(%tx_seq, "Terminate file sync due to connecting peers timeout"); debug!(%tx_seq, "Terminate file sync due to connecting peers timeout");
self.terminate_file_sync(tx_seq, false).await; self.terminate_file_sync(tx_seq, false).await;

View File

@ -22,7 +22,6 @@ pub struct RandomBatcherState {
#[derive(Clone)] #[derive(Clone)]
pub struct RandomBatcher { pub struct RandomBatcher {
config: Config,
batcher: Batcher, batcher: Batcher,
sync_store: Arc<SyncStore>, sync_store: Arc<SyncStore>,
} }
@ -35,13 +34,7 @@ impl RandomBatcher {
sync_store: Arc<SyncStore>, sync_store: Arc<SyncStore>,
) -> Self { ) -> Self {
Self { Self {
config, batcher: Batcher::new(config, config.max_random_workers, store, sync_send),
batcher: Batcher::new(
config.max_random_workers,
config.random_find_peer_timeout,
store,
sync_send,
),
sync_store, sync_store,
} }
} }
@ -63,7 +56,7 @@ impl RandomBatcher {
// disable file sync until catched up // disable file sync until catched up
if !catched_up.load(Ordering::Relaxed) { if !catched_up.load(Ordering::Relaxed) {
trace!("Cannot sync file in catch-up phase"); trace!("Cannot sync file in catch-up phase");
sleep(self.config.auto_sync_idle_interval).await; sleep(self.batcher.config.auto_sync_idle_interval).await;
continue; continue;
} }
@ -80,11 +73,11 @@ impl RandomBatcher {
"File sync still in progress or idle, state = {:?}", "File sync still in progress or idle, state = {:?}",
self.get_state().await self.get_state().await
); );
sleep(self.config.auto_sync_idle_interval).await; sleep(self.batcher.config.auto_sync_idle_interval).await;
} }
Err(err) => { Err(err) => {
warn!(%err, "Failed to sync file once, state = {:?}", self.get_state().await); warn!(%err, "Failed to sync file once, state = {:?}", self.get_state().await);
sleep(self.config.auto_sync_error_interval).await; sleep(self.batcher.config.auto_sync_error_interval).await;
} }
} }
} }

View File

@ -23,7 +23,6 @@ use tokio::{
/// Supports to sync files in sequence concurrently. /// Supports to sync files in sequence concurrently.
#[derive(Clone)] #[derive(Clone)]
pub struct SerialBatcher { pub struct SerialBatcher {
config: Config,
batcher: Batcher, batcher: Batcher,
/// Next tx seq to sync. /// Next tx seq to sync.
@ -81,17 +80,13 @@ impl SerialBatcher {
sync_send: SyncSender, sync_send: SyncSender,
sync_store: Arc<SyncStore>, sync_store: Arc<SyncStore>,
) -> Result<Self> { ) -> Result<Self> {
let capacity = config.max_sequential_workers;
// continue file sync from break point in db // continue file sync from break point in db
let (next_tx_seq, max_tx_seq) = sync_store.get_tx_seq_range().await?; let (next_tx_seq, max_tx_seq) = sync_store.get_tx_seq_range().await?;
Ok(Self { Ok(Self {
config, batcher: Batcher::new(config, capacity, store, sync_send),
batcher: Batcher::new(
config.max_sequential_workers,
config.sequential_find_peer_timeout,
store,
sync_send,
),
next_tx_seq: Arc::new(AtomicU64::new(next_tx_seq.unwrap_or(0))), next_tx_seq: Arc::new(AtomicU64::new(next_tx_seq.unwrap_or(0))),
max_tx_seq: Arc::new(AtomicU64::new(max_tx_seq.unwrap_or(u64::MAX))), max_tx_seq: Arc::new(AtomicU64::new(max_tx_seq.unwrap_or(u64::MAX))),
pending_completed_txs: Default::default(), pending_completed_txs: Default::default(),
@ -141,7 +136,7 @@ impl SerialBatcher {
// disable file sync until catched up // disable file sync until catched up
if !catched_up.load(Ordering::Relaxed) { if !catched_up.load(Ordering::Relaxed) {
trace!("Cannot sync file in catch-up phase"); trace!("Cannot sync file in catch-up phase");
sleep(self.config.auto_sync_idle_interval).await; sleep(self.batcher.config.auto_sync_idle_interval).await;
continue; continue;
} }
@ -162,11 +157,11 @@ impl SerialBatcher {
"File sync still in progress or idle, state = {:?}", "File sync still in progress or idle, state = {:?}",
self.get_state().await self.get_state().await
); );
sleep(self.config.auto_sync_idle_interval).await; sleep(self.batcher.config.auto_sync_idle_interval).await;
} }
Err(err) => { Err(err) => {
warn!(%err, "Failed to sync file once, state = {:?}", self.get_state().await); warn!(%err, "Failed to sync file once, state = {:?}", self.get_state().await);
sleep(self.config.auto_sync_error_interval).await; sleep(self.batcher.config.auto_sync_error_interval).await;
} }
} }
} }

View File

@ -52,9 +52,7 @@ pub struct Config {
pub max_sequential_workers: usize, pub max_sequential_workers: usize,
pub max_random_workers: usize, pub max_random_workers: usize,
#[serde(deserialize_with = "deserialize_duration")] #[serde(deserialize_with = "deserialize_duration")]
pub sequential_find_peer_timeout: Duration, pub find_peer_timeout: Duration,
#[serde(deserialize_with = "deserialize_duration")]
pub random_find_peer_timeout: Duration,
} }
impl Default for Config { impl Default for Config {
@ -63,7 +61,7 @@ impl Default for Config {
// sync service config // sync service config
heartbeat_interval: Duration::from_secs(5), heartbeat_interval: Duration::from_secs(5),
auto_sync_enabled: false, auto_sync_enabled: false,
max_sync_files: 32, max_sync_files: 16,
sync_file_by_rpc_enabled: true, sync_file_by_rpc_enabled: true,
sync_file_on_announcement_enabled: false, sync_file_on_announcement_enabled: false,
@ -80,10 +78,9 @@ impl Default for Config {
// auto sync config // auto sync config
auto_sync_idle_interval: Duration::from_secs(3), auto_sync_idle_interval: Duration::from_secs(3),
auto_sync_error_interval: Duration::from_secs(10), auto_sync_error_interval: Duration::from_secs(10),
max_sequential_workers: 24, max_sequential_workers: 8,
max_random_workers: 8, max_random_workers: 4,
sequential_find_peer_timeout: Duration::from_secs(60), find_peer_timeout: Duration::from_secs(10),
random_find_peer_timeout: Duration::from_secs(500),
} }
} }
} }

View File

@ -157,7 +157,7 @@ impl SyncService {
event_recv: broadcast::Receiver<LogSyncEvent>, event_recv: broadcast::Receiver<LogSyncEvent>,
catch_up_end_recv: oneshot::Receiver<()>, catch_up_end_recv: oneshot::Receiver<()>,
) -> Result<SyncSender> { ) -> Result<SyncSender> {
let (sync_send, sync_recv) = channel::Channel::unbounded("sync"); let (sync_send, sync_recv) = channel::Channel::unbounded();
let store = Store::new(store, executor.clone()); let store = Store::new(store, executor.clone());
// init auto sync // init auto sync
@ -912,7 +912,7 @@ mod tests {
create_file_location_cache(init_peer_id, vec![txs[0].id()]); create_file_location_cache(init_peer_id, vec![txs[0].id()]);
let (network_send, mut network_recv) = mpsc::unbounded_channel::<NetworkMessage>(); let (network_send, mut network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
let (_, sync_recv) = channel::Channel::unbounded("test"); let (_, sync_recv) = channel::Channel::unbounded();
let mut sync = SyncService { let mut sync = SyncService {
config: Config::default(), config: Config::default(),
@ -941,7 +941,7 @@ mod tests {
create_file_location_cache(init_peer_id, vec![txs[0].id()]); create_file_location_cache(init_peer_id, vec![txs[0].id()]);
let (network_send, mut network_recv) = mpsc::unbounded_channel::<NetworkMessage>(); let (network_send, mut network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
let (_, sync_recv) = channel::Channel::unbounded("test"); let (_, sync_recv) = channel::Channel::unbounded();
let mut sync = SyncService { let mut sync = SyncService {
config: Config::default(), config: Config::default(),

View File

@ -228,7 +228,11 @@ reward_contract_address = "0x0496D0817BD8519e0de4894Dc379D35c35275609"
auto_sync_enabled = true auto_sync_enabled = true
# Maximum number of files in sync from other peers simultaneously. # Maximum number of files in sync from other peers simultaneously.
# max_sync_files = 32 max_sync_files = 32
# Timeout to terminate a file sync when automatically sync from other peers.
# If timeout, terminated file sync will be triggered later.
# find_peer_timeout = "10s"
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`). # Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
# sync_file_by_rpc_enabled = true # sync_file_by_rpc_enabled = true
@ -237,10 +241,10 @@ auto_sync_enabled = true
# sync_file_on_announcement_enabled = false # sync_file_on_announcement_enabled = false
# Maximum threads to sync files in sequence. # Maximum threads to sync files in sequence.
# max_sequential_workers = 24 max_sequential_workers = 24
# Maximum threads to sync files randomly. # Maximum threads to sync files randomly.
# max_random_workers = 8 # max_random_workers = 4
####################################################################### #######################################################################
### File Location Cache Options ### ### File Location Cache Options ###
@ -261,4 +265,4 @@ auto_sync_enabled = true
# Validity period of location information. # Validity period of location information.
# If the timestamp in the storage location information exceeds this duration from the current time, it will be removed from the cache. # If the timestamp in the storage location information exceeds this duration from the current time, it will be removed from the cache.
# entry_expiration_time_secs = 3600 # entry_expiration_time_secs = 3600

View File

@ -228,7 +228,11 @@ reward_contract_address = "0x51998C4d486F406a788B766d93510980ae1f9360"
auto_sync_enabled = true auto_sync_enabled = true
# Maximum number of files in sync from other peers simultaneously. # Maximum number of files in sync from other peers simultaneously.
# max_sync_files = 32 max_sync_files = 32
# Timeout to terminate a file sync when automatically sync from other peers.
# If timeout, terminated file sync will be triggered later.
# find_peer_timeout = "10s"
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`). # Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
# sync_file_by_rpc_enabled = true # sync_file_by_rpc_enabled = true
@ -237,10 +241,10 @@ auto_sync_enabled = true
# sync_file_on_announcement_enabled = false # sync_file_on_announcement_enabled = false
# Maximum threads to sync files in sequence. # Maximum threads to sync files in sequence.
# max_sequential_workers = 24 max_sequential_workers = 24
# Maximum threads to sync files randomly. # Maximum threads to sync files randomly.
# max_random_workers = 8 # max_random_workers = 4
####################################################################### #######################################################################
### File Location Cache Options ### ### File Location Cache Options ###
@ -261,4 +265,4 @@ auto_sync_enabled = true
# Validity period of location information. # Validity period of location information.
# If the timestamp in the storage location information exceeds this duration from the current time, it will be removed from the cache. # If the timestamp in the storage location information exceeds this duration from the current time, it will be removed from the cache.
# entry_expiration_time_secs = 3600 # entry_expiration_time_secs = 3600

View File

@ -227,7 +227,11 @@
# auto_sync_enabled = false # auto_sync_enabled = false
# Maximum number of files in sync from other peers simultaneously. # Maximum number of files in sync from other peers simultaneously.
# max_sync_files = 32 # max_sync_files = 16
# Timeout to terminate a file sync when automatically sync from other peers.
# If timeout, terminated file sync will be triggered later.
# find_peer_timeout = "10s"
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`). # Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
# sync_file_by_rpc_enabled = true # sync_file_by_rpc_enabled = true
@ -236,10 +240,10 @@
# sync_file_on_announcement_enabled = false # sync_file_on_announcement_enabled = false
# Maximum threads to sync files in sequence. # Maximum threads to sync files in sequence.
# max_sequential_workers = 24 # max_sequential_workers = 8
# Maximum threads to sync files randomly. # Maximum threads to sync files randomly.
# max_random_workers = 8 # max_random_workers = 4
####################################################################### #######################################################################
### File Location Cache Options ### ### File Location Cache Options ###
@ -260,4 +264,4 @@
# Validity period of location information. # Validity period of location information.
# If the timestamp in the storage location information exceeds this duration from the current time, it will be removed from the cache. # If the timestamp in the storage location information exceeds this duration from the current time, it will be removed from the cache.
# entry_expiration_time_secs = 3600 # entry_expiration_time_secs = 3600