mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2024-11-15 04:25:19 +00:00
Fix test.
This commit is contained in:
parent
f43f76dd42
commit
09ea9d7d7d
@ -8,6 +8,7 @@ use shared_types::{timestamp_now, TxID};
|
|||||||
use std::cmp::Reverse;
|
use std::cmp::Reverse;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use storage::config::ShardConfig;
|
use storage::config::ShardConfig;
|
||||||
|
use tracing::debug;
|
||||||
|
|
||||||
/// Caches limited announcements of specified file from different peers.
|
/// Caches limited announcements of specified file from different peers.
|
||||||
struct AnnouncementCache {
|
struct AnnouncementCache {
|
||||||
|
@ -17,10 +17,12 @@ pub struct FileSyncGoal {
|
|||||||
pub index_start: u64,
|
pub index_start: u64,
|
||||||
/// Chunk index to sync to (exclusive).
|
/// Chunk index to sync to (exclusive).
|
||||||
pub index_end: u64,
|
pub index_end: u64,
|
||||||
|
/// `true` if we are syncing all the needed data of this file.
|
||||||
|
pub all_chunks: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FileSyncGoal {
|
impl FileSyncGoal {
|
||||||
pub fn new(num_chunks: u64, index_start: u64, index_end: u64) -> Self {
|
pub fn new(num_chunks: u64, index_start: u64, index_end: u64, all_chunks: bool) -> Self {
|
||||||
assert!(
|
assert!(
|
||||||
index_start < index_end && index_end <= num_chunks,
|
index_start < index_end && index_end <= num_chunks,
|
||||||
"invalid index_end"
|
"invalid index_end"
|
||||||
@ -29,15 +31,16 @@ impl FileSyncGoal {
|
|||||||
num_chunks,
|
num_chunks,
|
||||||
index_start,
|
index_start,
|
||||||
index_end,
|
index_end,
|
||||||
|
all_chunks,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_file(num_chunks: u64) -> Self {
|
pub fn new_file(num_chunks: u64) -> Self {
|
||||||
Self::new(num_chunks, 0, num_chunks)
|
Self::new(num_chunks, 0, num_chunks, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_all_chunks(&self) -> bool {
|
pub fn is_all_chunks(&self) -> bool {
|
||||||
self.index_start == 0 && self.index_end == self.num_chunks
|
self.all_chunks
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,7 +139,7 @@ impl SerialSyncController {
|
|||||||
if let Some((start, end)) = maybe_range {
|
if let Some((start, end)) = maybe_range {
|
||||||
// Sync new chunks regardless of previously downloaded file or chunks.
|
// Sync new chunks regardless of previously downloaded file or chunks.
|
||||||
// It's up to client to avoid duplicated chunks sync.
|
// It's up to client to avoid duplicated chunks sync.
|
||||||
self.goal = FileSyncGoal::new(self.goal.num_chunks, start, end);
|
self.goal = FileSyncGoal::new(self.goal.num_chunks, start, end, false);
|
||||||
self.next_chunk = start;
|
self.next_chunk = start;
|
||||||
} else if self.goal.is_all_chunks() {
|
} else if self.goal.is_all_chunks() {
|
||||||
// retry the failed file sync at break point
|
// retry the failed file sync at break point
|
||||||
|
@ -635,8 +635,8 @@ impl SyncService {
|
|||||||
bail!("File already exists");
|
bail!("File already exists");
|
||||||
}
|
}
|
||||||
|
|
||||||
let (index_start, index_end) = match maybe_range {
|
let (index_start, index_end, all_chunks) = match maybe_range {
|
||||||
Some((start, end)) => (start, end),
|
Some((start, end)) => (start, end, false),
|
||||||
None => {
|
None => {
|
||||||
let start = match Self::tx_sync_start_index(&self.store, &tx).await? {
|
let start = match Self::tx_sync_start_index(&self.store, &tx).await? {
|
||||||
Some(s) => s,
|
Some(s) => s,
|
||||||
@ -646,7 +646,7 @@ impl SyncService {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
(start, num_chunks)
|
(start, num_chunks, true)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -658,7 +658,7 @@ impl SyncService {
|
|||||||
self.config,
|
self.config,
|
||||||
tx.id(),
|
tx.id(),
|
||||||
tx.start_entry_index(),
|
tx.start_entry_index(),
|
||||||
FileSyncGoal::new(num_chunks, index_start, index_end),
|
FileSyncGoal::new(num_chunks, index_start, index_end, all_chunks),
|
||||||
self.ctx.clone(),
|
self.ctx.clone(),
|
||||||
self.store.clone(),
|
self.store.clone(),
|
||||||
self.file_location_cache.clone(),
|
self.file_location_cache.clone(),
|
||||||
|
@ -68,7 +68,7 @@ class ZgsNode(TestNode):
|
|||||||
os.mkdir(self.data_dir)
|
os.mkdir(self.data_dir)
|
||||||
log_config_path = os.path.join(self.data_dir, self.config["log_config_file"])
|
log_config_path = os.path.join(self.data_dir, self.config["log_config_file"])
|
||||||
with open(log_config_path, "w") as f:
|
with open(log_config_path, "w") as f:
|
||||||
f.write("debug,hyper=info,h2=info")
|
f.write("trace,hyper=info,h2=info")
|
||||||
|
|
||||||
initialize_toml_config(self.config_file, self.config)
|
initialize_toml_config(self.config_file, self.config)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user