From f43f76dd42e091ea825888261c1a535416acc4a5 Mon Sep 17 00:00:00 2001 From: Peilun Li Date: Wed, 21 Aug 2024 19:31:46 +0800 Subject: [PATCH 1/3] Set sync start index based on data in db. --- node/storage/src/log_store/log_manager.rs | 31 +++++++++------ node/sync/src/controllers/serial.rs | 15 ++++---- node/sync/src/service.rs | 47 +++++++++++++++++++++-- 3 files changed, 70 insertions(+), 23 deletions(-) diff --git a/node/storage/src/log_store/log_manager.rs b/node/storage/src/log_store/log_manager.rs index 64ea1cf..c0e4874 100644 --- a/node/storage/src/log_store/log_manager.rs +++ b/node/storage/src/log_store/log_manager.rs @@ -609,9 +609,9 @@ impl LogManager { .get_tx_by_seq_number(last_tx_seq)? .expect("tx missing"); let mut current_len = initial_data.leaves(); - let expected_len = (last_tx.start_entry_index + last_tx.num_entries() as u64) - / PORA_CHUNK_SIZE as u64; - match expected_len.cmp(&(current_len as u64)) { + let expected_len = + sector_to_segment(last_tx.start_entry_index + last_tx.num_entries() as u64); + match expected_len.cmp(&(current_len)) { Ordering::Less => { bail!( "Unexpected DB: merkle tree larger than the known data size,\ @@ -634,10 +634,9 @@ impl LogManager { let previous_tx = tx_store .get_tx_by_seq_number(last_tx_seq - 1)? .expect("tx missing"); - let expected_len = ((previous_tx.start_entry_index - + previous_tx.num_entries() as u64) - / PORA_CHUNK_SIZE as u64) - as usize; + let expected_len = sector_to_segment( + previous_tx.start_entry_index + previous_tx.num_entries() as u64, + ); if current_len > expected_len { while let Some((subtree_depth, _)) = initial_data.subtree_list.pop() { @@ -737,13 +736,13 @@ impl LogManager { maybe_tx_seq: Option, ) -> Result { let merkle = self.merkle.read_recursive(); - let chunk_index = flow_index / PORA_CHUNK_SIZE as u64; + let seg_index = sector_to_segment(flow_index); let top_proof = match maybe_tx_seq { - None => merkle.pora_chunks_merkle.gen_proof(chunk_index as usize)?, + None => merkle.pora_chunks_merkle.gen_proof(seg_index)?, Some(tx_seq) => merkle .pora_chunks_merkle .at_version(tx_seq)? - .gen_proof(chunk_index as usize)?, + .gen_proof(seg_index)?, }; // TODO(zz): Maybe we can decide that all proofs are at the PoRA chunk level, so @@ -753,11 +752,11 @@ impl LogManager { // and `flow_index` must be within a complete PoRA chunk. For possible future usages, // we'll need to find the flow length at the given root and load a partial chunk // if `flow_index` is in the last chunk. - let sub_proof = if chunk_index as usize != merkle.pora_chunks_merkle.leaves() - 1 + let sub_proof = if seg_index != merkle.pora_chunks_merkle.leaves() - 1 || merkle.last_chunk_merkle.leaves() == 0 { self.flow_store - .gen_proof_in_batch(chunk_index as usize, flow_index as usize % PORA_CHUNK_SIZE)? + .gen_proof_in_batch(seg_index, flow_index as usize % PORA_CHUNK_SIZE)? } else { match maybe_tx_seq { None => merkle @@ -1236,3 +1235,11 @@ pub fn tx_subtree_root_list_padded(data: &[u8]) -> Vec<(usize, DataRoot)> { root_list } + +pub fn sector_to_segment(sector_index: u64) -> usize { + (sector_index / PORA_CHUNK_SIZE as u64) as usize +} + +pub fn segment_to_sector(segment_index: usize) -> usize { + segment_index * PORA_CHUNK_SIZE +} diff --git a/node/sync/src/controllers/serial.rs b/node/sync/src/controllers/serial.rs index 91937ed..2fe0452 100644 --- a/node/sync/src/controllers/serial.rs +++ b/node/sync/src/controllers/serial.rs @@ -12,7 +12,7 @@ use network::{ use rand::Rng; use shared_types::{timestamp_now, ChunkArrayWithProof, TxID, CHUNK_SIZE}; use std::{sync::Arc, time::Instant}; -use storage::log_store::log_manager::PORA_CHUNK_SIZE; +use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE}; use storage_async::Store; #[derive(Clone, Debug, PartialEq, Eq)] @@ -475,10 +475,10 @@ impl SerialSyncController { self.failures = 0; let shard_config = self.store.get_store().flow().get_shard_config(); - let next_chunk = shard_config.next_segment_index( - (from_chunk / PORA_CHUNK_SIZE as u64) as usize, - (self.tx_start_chunk_in_flow / PORA_CHUNK_SIZE as u64) as usize, - ) * PORA_CHUNK_SIZE; + let next_chunk = segment_to_sector(shard_config.next_segment_index( + sector_to_segment(from_chunk), + sector_to_segment(self.tx_start_chunk_in_flow), + )); // store in db match self .store @@ -566,12 +566,11 @@ impl SerialSyncController { /// Randomly select a `Connected` peer to sync chunks. fn select_peer_for_request(&self, request: &GetChunksRequest) -> Option { - let segment_index = - (request.index_start + self.tx_start_chunk_in_flow) / PORA_CHUNK_SIZE as u64; + let segment_index = sector_to_segment(request.index_start + self.tx_start_chunk_in_flow); let mut peers = self.peers.filter_peers(vec![PeerState::Connected]); peers.retain(|peer_id| match self.peers.shard_config(peer_id) { - Some(v) => v.in_range(segment_index), + Some(v) => v.in_range(segment_index as u64), None => false, }); diff --git a/node/sync/src/service.rs b/node/sync/src/service.rs index a01d2d8..c72b52a 100644 --- a/node/sync/src/service.rs +++ b/node/sync/src/service.rs @@ -4,7 +4,7 @@ use crate::controllers::{ FailureReason, FileSyncGoal, FileSyncInfo, SerialSyncController, SyncState, }; use crate::{Config, SyncServiceState}; -use anyhow::{bail, Result}; +use anyhow::{anyhow, bail, Result}; use file_location_cache::FileLocationCache; use libp2p::swarm::DialError; use log_entry_sync::LogSyncEvent; @@ -14,14 +14,16 @@ use network::{ rpc::GetChunksRequest, rpc::RPCResponseErrorCode, Multiaddr, NetworkMessage, PeerId, PeerRequestId, SyncId as RequestId, }; -use shared_types::{bytes_to_chunks, timestamp_now, ChunkArrayWithProof, TxID}; +use shared_types::{bytes_to_chunks, timestamp_now, ChunkArrayWithProof, Transaction, TxID}; use std::sync::atomic::Ordering; use std::{ + cmp, collections::{hash_map::Entry, HashMap}, sync::Arc, }; use storage::config::ShardConfig; use storage::error::Result as StorageResult; +use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE}; use storage::log_store::Store as LogStore; use storage_async::Store; use tokio::sync::{broadcast, mpsc, oneshot}; @@ -635,7 +637,17 @@ impl SyncService { let (index_start, index_end) = match maybe_range { Some((start, end)) => (start, end), - None => (0, num_chunks), + None => { + let start = match Self::tx_sync_start_index(&self.store, &tx).await? { + Some(s) => s, + None => { + debug!(%tx.seq, "No more data needed"); + self.store.finalize_tx_with_hash(tx.seq, tx.hash()).await?; + return Ok(()); + } + }; + (start, num_chunks) + } }; if index_start >= index_end || index_end > num_chunks { @@ -793,6 +805,35 @@ impl SyncService { self.controllers.remove(&tx_seq); } } + + async fn tx_sync_start_index(store: &Store, tx: &Transaction) -> Result> { + let shard_config = store.get_store().flow().get_shard_config(); + let start_segment = sector_to_segment(tx.start_entry_index()); + let end = + bytes_to_chunks(usize::try_from(tx.size).map_err(|e| anyhow!("tx size e={}", e))?); + let mut start = if shard_config.in_range(start_segment as u64) { + 0 + } else { + segment_to_sector(shard_config.next_segment_index(0, start_segment)) + }; + while start < end { + if store + .get_chunks_by_tx_and_index_range( + tx.seq, + start, + cmp::min(start + PORA_CHUNK_SIZE, end), + ) + .await? + .is_none() + { + return Ok(Some(start as u64)); + } + start = segment_to_sector( + shard_config.next_segment_index(sector_to_segment(start as u64), start_segment), + ); + } + Ok(None) + } } #[cfg(test)] From 09ea9d7d7d13dafbad3b11a96ba8c56bb1c02642 Mon Sep 17 00:00:00 2001 From: Peilun Li Date: Thu, 22 Aug 2024 14:25:06 +0800 Subject: [PATCH 2/3] Fix test. --- node/file_location_cache/src/file_location_cache.rs | 1 + node/sync/src/controllers/mod.rs | 9 ++++++--- node/sync/src/controllers/serial.rs | 2 +- node/sync/src/service.rs | 8 ++++---- tests/test_framework/zgs_node.py | 2 +- 5 files changed, 13 insertions(+), 9 deletions(-) diff --git a/node/file_location_cache/src/file_location_cache.rs b/node/file_location_cache/src/file_location_cache.rs index 6e278d6..73dd941 100644 --- a/node/file_location_cache/src/file_location_cache.rs +++ b/node/file_location_cache/src/file_location_cache.rs @@ -8,6 +8,7 @@ use shared_types::{timestamp_now, TxID}; use std::cmp::Reverse; use std::collections::HashMap; use storage::config::ShardConfig; +use tracing::debug; /// Caches limited announcements of specified file from different peers. struct AnnouncementCache { diff --git a/node/sync/src/controllers/mod.rs b/node/sync/src/controllers/mod.rs index ad2374f..1b838df 100644 --- a/node/sync/src/controllers/mod.rs +++ b/node/sync/src/controllers/mod.rs @@ -17,10 +17,12 @@ pub struct FileSyncGoal { pub index_start: u64, /// Chunk index to sync to (exclusive). pub index_end: u64, + /// `true` if we are syncing all the needed data of this file. + pub all_chunks: bool, } impl FileSyncGoal { - pub fn new(num_chunks: u64, index_start: u64, index_end: u64) -> Self { + pub fn new(num_chunks: u64, index_start: u64, index_end: u64, all_chunks: bool) -> Self { assert!( index_start < index_end && index_end <= num_chunks, "invalid index_end" @@ -29,15 +31,16 @@ impl FileSyncGoal { num_chunks, index_start, index_end, + all_chunks, } } pub fn new_file(num_chunks: u64) -> Self { - Self::new(num_chunks, 0, num_chunks) + Self::new(num_chunks, 0, num_chunks, true) } pub fn is_all_chunks(&self) -> bool { - self.index_start == 0 && self.index_end == self.num_chunks + self.all_chunks } } diff --git a/node/sync/src/controllers/serial.rs b/node/sync/src/controllers/serial.rs index 2fe0452..e3869f3 100644 --- a/node/sync/src/controllers/serial.rs +++ b/node/sync/src/controllers/serial.rs @@ -139,7 +139,7 @@ impl SerialSyncController { if let Some((start, end)) = maybe_range { // Sync new chunks regardless of previously downloaded file or chunks. // It's up to client to avoid duplicated chunks sync. - self.goal = FileSyncGoal::new(self.goal.num_chunks, start, end); + self.goal = FileSyncGoal::new(self.goal.num_chunks, start, end, false); self.next_chunk = start; } else if self.goal.is_all_chunks() { // retry the failed file sync at break point diff --git a/node/sync/src/service.rs b/node/sync/src/service.rs index c72b52a..5c41f2a 100644 --- a/node/sync/src/service.rs +++ b/node/sync/src/service.rs @@ -635,8 +635,8 @@ impl SyncService { bail!("File already exists"); } - let (index_start, index_end) = match maybe_range { - Some((start, end)) => (start, end), + let (index_start, index_end, all_chunks) = match maybe_range { + Some((start, end)) => (start, end, false), None => { let start = match Self::tx_sync_start_index(&self.store, &tx).await? { Some(s) => s, @@ -646,7 +646,7 @@ impl SyncService { return Ok(()); } }; - (start, num_chunks) + (start, num_chunks, true) } }; @@ -658,7 +658,7 @@ impl SyncService { self.config, tx.id(), tx.start_entry_index(), - FileSyncGoal::new(num_chunks, index_start, index_end), + FileSyncGoal::new(num_chunks, index_start, index_end, all_chunks), self.ctx.clone(), self.store.clone(), self.file_location_cache.clone(), diff --git a/tests/test_framework/zgs_node.py b/tests/test_framework/zgs_node.py index 06e3c0c..8ea9122 100644 --- a/tests/test_framework/zgs_node.py +++ b/tests/test_framework/zgs_node.py @@ -68,7 +68,7 @@ class ZgsNode(TestNode): os.mkdir(self.data_dir) log_config_path = os.path.join(self.data_dir, self.config["log_config_file"]) with open(log_config_path, "w") as f: - f.write("debug,hyper=info,h2=info") + f.write("trace,hyper=info,h2=info") initialize_toml_config(self.config_file, self.config) From 8b870a069c24702ad05a0d6bcbe17e0b58a54b0c Mon Sep 17 00:00:00 2001 From: Peilun Li Date: Thu, 22 Aug 2024 15:05:03 +0800 Subject: [PATCH 3/3] nit. --- node/file_location_cache/src/file_location_cache.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/node/file_location_cache/src/file_location_cache.rs b/node/file_location_cache/src/file_location_cache.rs index 73dd941..6e278d6 100644 --- a/node/file_location_cache/src/file_location_cache.rs +++ b/node/file_location_cache/src/file_location_cache.rs @@ -8,7 +8,6 @@ use shared_types::{timestamp_now, TxID}; use std::cmp::Reverse; use std::collections::HashMap; use storage::config::ShardConfig; -use tracing::debug; /// Caches limited announcements of specified file from different peers. struct AnnouncementCache {