Fix test.

This commit is contained in:
Peilun Li 2024-08-22 14:25:06 +08:00
parent f43f76dd42
commit 09ea9d7d7d
5 changed files with 13 additions and 9 deletions

View File

@ -8,6 +8,7 @@ use shared_types::{timestamp_now, TxID};
use std::cmp::Reverse; use std::cmp::Reverse;
use std::collections::HashMap; use std::collections::HashMap;
use storage::config::ShardConfig; use storage::config::ShardConfig;
use tracing::debug;
/// Caches limited announcements of specified file from different peers. /// Caches limited announcements of specified file from different peers.
struct AnnouncementCache { struct AnnouncementCache {

View File

@ -17,10 +17,12 @@ pub struct FileSyncGoal {
pub index_start: u64, pub index_start: u64,
/// Chunk index to sync to (exclusive). /// Chunk index to sync to (exclusive).
pub index_end: u64, pub index_end: u64,
/// `true` if we are syncing all the needed data of this file.
pub all_chunks: bool,
} }
impl FileSyncGoal { impl FileSyncGoal {
pub fn new(num_chunks: u64, index_start: u64, index_end: u64) -> Self { pub fn new(num_chunks: u64, index_start: u64, index_end: u64, all_chunks: bool) -> Self {
assert!( assert!(
index_start < index_end && index_end <= num_chunks, index_start < index_end && index_end <= num_chunks,
"invalid index_end" "invalid index_end"
@ -29,15 +31,16 @@ impl FileSyncGoal {
num_chunks, num_chunks,
index_start, index_start,
index_end, index_end,
all_chunks,
} }
} }
pub fn new_file(num_chunks: u64) -> Self { pub fn new_file(num_chunks: u64) -> Self {
Self::new(num_chunks, 0, num_chunks) Self::new(num_chunks, 0, num_chunks, true)
} }
pub fn is_all_chunks(&self) -> bool { pub fn is_all_chunks(&self) -> bool {
self.index_start == 0 && self.index_end == self.num_chunks self.all_chunks
} }
} }

View File

@ -139,7 +139,7 @@ impl SerialSyncController {
if let Some((start, end)) = maybe_range { if let Some((start, end)) = maybe_range {
// Sync new chunks regardless of previously downloaded file or chunks. // Sync new chunks regardless of previously downloaded file or chunks.
// It's up to client to avoid duplicated chunks sync. // It's up to client to avoid duplicated chunks sync.
self.goal = FileSyncGoal::new(self.goal.num_chunks, start, end); self.goal = FileSyncGoal::new(self.goal.num_chunks, start, end, false);
self.next_chunk = start; self.next_chunk = start;
} else if self.goal.is_all_chunks() { } else if self.goal.is_all_chunks() {
// retry the failed file sync at break point // retry the failed file sync at break point

View File

@ -635,8 +635,8 @@ impl SyncService {
bail!("File already exists"); bail!("File already exists");
} }
let (index_start, index_end) = match maybe_range { let (index_start, index_end, all_chunks) = match maybe_range {
Some((start, end)) => (start, end), Some((start, end)) => (start, end, false),
None => { None => {
let start = match Self::tx_sync_start_index(&self.store, &tx).await? { let start = match Self::tx_sync_start_index(&self.store, &tx).await? {
Some(s) => s, Some(s) => s,
@ -646,7 +646,7 @@ impl SyncService {
return Ok(()); return Ok(());
} }
}; };
(start, num_chunks) (start, num_chunks, true)
} }
}; };
@ -658,7 +658,7 @@ impl SyncService {
self.config, self.config,
tx.id(), tx.id(),
tx.start_entry_index(), tx.start_entry_index(),
FileSyncGoal::new(num_chunks, index_start, index_end), FileSyncGoal::new(num_chunks, index_start, index_end, all_chunks),
self.ctx.clone(), self.ctx.clone(),
self.store.clone(), self.store.clone(),
self.file_location_cache.clone(), self.file_location_cache.clone(),

View File

@ -68,7 +68,7 @@ class ZgsNode(TestNode):
os.mkdir(self.data_dir) os.mkdir(self.data_dir)
log_config_path = os.path.join(self.data_dir, self.config["log_config_file"]) log_config_path = os.path.join(self.data_dir, self.config["log_config_file"])
with open(log_config_path, "w") as f: with open(log_config_path, "w") as f:
f.write("debug,hyper=info,h2=info") f.write("trace,hyper=info,h2=info")
initialize_toml_config(self.config_file, self.config) initialize_toml_config(self.config_file, self.config)