diff --git a/node/storage/src/log_store/log_manager.rs b/node/storage/src/log_store/log_manager.rs index 64ea1cf..c0e4874 100644 --- a/node/storage/src/log_store/log_manager.rs +++ b/node/storage/src/log_store/log_manager.rs @@ -609,9 +609,9 @@ impl LogManager { .get_tx_by_seq_number(last_tx_seq)? .expect("tx missing"); let mut current_len = initial_data.leaves(); - let expected_len = (last_tx.start_entry_index + last_tx.num_entries() as u64) - / PORA_CHUNK_SIZE as u64; - match expected_len.cmp(&(current_len as u64)) { + let expected_len = + sector_to_segment(last_tx.start_entry_index + last_tx.num_entries() as u64); + match expected_len.cmp(&(current_len)) { Ordering::Less => { bail!( "Unexpected DB: merkle tree larger than the known data size,\ @@ -634,10 +634,9 @@ impl LogManager { let previous_tx = tx_store .get_tx_by_seq_number(last_tx_seq - 1)? .expect("tx missing"); - let expected_len = ((previous_tx.start_entry_index - + previous_tx.num_entries() as u64) - / PORA_CHUNK_SIZE as u64) - as usize; + let expected_len = sector_to_segment( + previous_tx.start_entry_index + previous_tx.num_entries() as u64, + ); if current_len > expected_len { while let Some((subtree_depth, _)) = initial_data.subtree_list.pop() { @@ -737,13 +736,13 @@ impl LogManager { maybe_tx_seq: Option, ) -> Result { let merkle = self.merkle.read_recursive(); - let chunk_index = flow_index / PORA_CHUNK_SIZE as u64; + let seg_index = sector_to_segment(flow_index); let top_proof = match maybe_tx_seq { - None => merkle.pora_chunks_merkle.gen_proof(chunk_index as usize)?, + None => merkle.pora_chunks_merkle.gen_proof(seg_index)?, Some(tx_seq) => merkle .pora_chunks_merkle .at_version(tx_seq)? - .gen_proof(chunk_index as usize)?, + .gen_proof(seg_index)?, }; // TODO(zz): Maybe we can decide that all proofs are at the PoRA chunk level, so @@ -753,11 +752,11 @@ impl LogManager { // and `flow_index` must be within a complete PoRA chunk. For possible future usages, // we'll need to find the flow length at the given root and load a partial chunk // if `flow_index` is in the last chunk. - let sub_proof = if chunk_index as usize != merkle.pora_chunks_merkle.leaves() - 1 + let sub_proof = if seg_index != merkle.pora_chunks_merkle.leaves() - 1 || merkle.last_chunk_merkle.leaves() == 0 { self.flow_store - .gen_proof_in_batch(chunk_index as usize, flow_index as usize % PORA_CHUNK_SIZE)? + .gen_proof_in_batch(seg_index, flow_index as usize % PORA_CHUNK_SIZE)? } else { match maybe_tx_seq { None => merkle @@ -1236,3 +1235,11 @@ pub fn tx_subtree_root_list_padded(data: &[u8]) -> Vec<(usize, DataRoot)> { root_list } + +pub fn sector_to_segment(sector_index: u64) -> usize { + (sector_index / PORA_CHUNK_SIZE as u64) as usize +} + +pub fn segment_to_sector(segment_index: usize) -> usize { + segment_index * PORA_CHUNK_SIZE +} diff --git a/node/sync/src/controllers/serial.rs b/node/sync/src/controllers/serial.rs index 91937ed..2fe0452 100644 --- a/node/sync/src/controllers/serial.rs +++ b/node/sync/src/controllers/serial.rs @@ -12,7 +12,7 @@ use network::{ use rand::Rng; use shared_types::{timestamp_now, ChunkArrayWithProof, TxID, CHUNK_SIZE}; use std::{sync::Arc, time::Instant}; -use storage::log_store::log_manager::PORA_CHUNK_SIZE; +use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE}; use storage_async::Store; #[derive(Clone, Debug, PartialEq, Eq)] @@ -475,10 +475,10 @@ impl SerialSyncController { self.failures = 0; let shard_config = self.store.get_store().flow().get_shard_config(); - let next_chunk = shard_config.next_segment_index( - (from_chunk / PORA_CHUNK_SIZE as u64) as usize, - (self.tx_start_chunk_in_flow / PORA_CHUNK_SIZE as u64) as usize, - ) * PORA_CHUNK_SIZE; + let next_chunk = segment_to_sector(shard_config.next_segment_index( + sector_to_segment(from_chunk), + sector_to_segment(self.tx_start_chunk_in_flow), + )); // store in db match self .store @@ -566,12 +566,11 @@ impl SerialSyncController { /// Randomly select a `Connected` peer to sync chunks. fn select_peer_for_request(&self, request: &GetChunksRequest) -> Option { - let segment_index = - (request.index_start + self.tx_start_chunk_in_flow) / PORA_CHUNK_SIZE as u64; + let segment_index = sector_to_segment(request.index_start + self.tx_start_chunk_in_flow); let mut peers = self.peers.filter_peers(vec![PeerState::Connected]); peers.retain(|peer_id| match self.peers.shard_config(peer_id) { - Some(v) => v.in_range(segment_index), + Some(v) => v.in_range(segment_index as u64), None => false, }); diff --git a/node/sync/src/service.rs b/node/sync/src/service.rs index a01d2d8..c72b52a 100644 --- a/node/sync/src/service.rs +++ b/node/sync/src/service.rs @@ -4,7 +4,7 @@ use crate::controllers::{ FailureReason, FileSyncGoal, FileSyncInfo, SerialSyncController, SyncState, }; use crate::{Config, SyncServiceState}; -use anyhow::{bail, Result}; +use anyhow::{anyhow, bail, Result}; use file_location_cache::FileLocationCache; use libp2p::swarm::DialError; use log_entry_sync::LogSyncEvent; @@ -14,14 +14,16 @@ use network::{ rpc::GetChunksRequest, rpc::RPCResponseErrorCode, Multiaddr, NetworkMessage, PeerId, PeerRequestId, SyncId as RequestId, }; -use shared_types::{bytes_to_chunks, timestamp_now, ChunkArrayWithProof, TxID}; +use shared_types::{bytes_to_chunks, timestamp_now, ChunkArrayWithProof, Transaction, TxID}; use std::sync::atomic::Ordering; use std::{ + cmp, collections::{hash_map::Entry, HashMap}, sync::Arc, }; use storage::config::ShardConfig; use storage::error::Result as StorageResult; +use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE}; use storage::log_store::Store as LogStore; use storage_async::Store; use tokio::sync::{broadcast, mpsc, oneshot}; @@ -635,7 +637,17 @@ impl SyncService { let (index_start, index_end) = match maybe_range { Some((start, end)) => (start, end), - None => (0, num_chunks), + None => { + let start = match Self::tx_sync_start_index(&self.store, &tx).await? { + Some(s) => s, + None => { + debug!(%tx.seq, "No more data needed"); + self.store.finalize_tx_with_hash(tx.seq, tx.hash()).await?; + return Ok(()); + } + }; + (start, num_chunks) + } }; if index_start >= index_end || index_end > num_chunks { @@ -793,6 +805,35 @@ impl SyncService { self.controllers.remove(&tx_seq); } } + + async fn tx_sync_start_index(store: &Store, tx: &Transaction) -> Result> { + let shard_config = store.get_store().flow().get_shard_config(); + let start_segment = sector_to_segment(tx.start_entry_index()); + let end = + bytes_to_chunks(usize::try_from(tx.size).map_err(|e| anyhow!("tx size e={}", e))?); + let mut start = if shard_config.in_range(start_segment as u64) { + 0 + } else { + segment_to_sector(shard_config.next_segment_index(0, start_segment)) + }; + while start < end { + if store + .get_chunks_by_tx_and_index_range( + tx.seq, + start, + cmp::min(start + PORA_CHUNK_SIZE, end), + ) + .await? + .is_none() + { + return Ok(Some(start as u64)); + } + start = segment_to_sector( + shard_config.next_segment_index(sector_to_segment(start as u64), start_segment), + ); + } + Ok(None) + } } #[cfg(test)]