From 2f9960e8e7d4b2b901d5bf3bbf11c420dcc35005 Mon Sep 17 00:00:00 2001 From: peilun-conflux <48905552+peilun-conflux@users.noreply.github.com> Date: Sun, 27 Oct 2024 20:58:03 +0800 Subject: [PATCH] Hardcode pad data segment root. (#250) * Hardcode pad data segment root. * fix deref --------- Co-authored-by: Peter Zhang --- Cargo.lock | 1 + node/storage/Cargo.toml | 1 + node/storage/src/log_store/log_manager.rs | 25 +++++++++++++++-------- 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ed68a1f..d6fa5cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7302,6 +7302,7 @@ dependencies = [ "kvdb-rocksdb", "merkle_light", "merkle_tree", + "once_cell", "parking_lot 0.12.3", "rand 0.8.5", "rayon", diff --git a/node/storage/Cargo.toml b/node/storage/Cargo.toml index 225b8c3..aed9c68 100644 --- a/node/storage/Cargo.toml +++ b/node/storage/Cargo.toml @@ -31,6 +31,7 @@ parking_lot = "0.12.3" serde_json = "1.0.127" tokio = { version = "1.38.0", features = ["full"] } task_executor = { path = "../../common/task_executor" } +once_cell = { version = "1.19.0", features = [] } [dev-dependencies] rand = "0.8.5" diff --git a/node/storage/src/log_store/log_manager.rs b/node/storage/src/log_store/log_manager.rs index 8cdf7a9..118bedb 100644 --- a/node/storage/src/log_store/log_manager.rs +++ b/node/storage/src/log_store/log_manager.rs @@ -1,3 +1,5 @@ +use super::tx_store::BlockHashAndSubmissionIndex; +use super::{FlowSeal, MineLoadChunk, SealAnswer, SealTask}; use crate::config::ShardConfig; use crate::log_store::flow_store::{batch_iter_sharded, FlowConfig, FlowDBStore, FlowStore}; use crate::log_store::tx_store::TransactionStore; @@ -11,6 +13,7 @@ use ethereum_types::H256; use kvdb_rocksdb::{Database, DatabaseConfig}; use merkle_light::merkle::{log2_pow2, MerkleTree}; use merkle_tree::RawLeafSha3Algorithm; +use once_cell::sync::Lazy; use parking_lot::RwLock; use rayon::iter::ParallelIterator; use rayon::prelude::ParallelSlice; @@ -25,9 +28,6 @@ use std::sync::mpsc; use std::sync::Arc; use tracing::{debug, error, info, instrument, trace, warn}; -use super::tx_store::BlockHashAndSubmissionIndex; -use super::{FlowSeal, MineLoadChunk, SealAnswer, SealTask}; - /// 256 Bytes pub const ENTRY_SIZE: usize = 256; /// 1024 Entries. @@ -47,6 +47,14 @@ pub const COL_NUM: u32 = 9; // Process at most 1M entries (256MB) pad data at a time. const PAD_MAX_SIZE: usize = 1 << 20; +static PAD_SEGMENT_ROOT: Lazy = Lazy::new(|| { + Merkle::new( + data_to_merkle_leaves(&[0; ENTRY_SIZE * PORA_CHUNK_SIZE]).unwrap(), + 0, + None, + ) + .root() +}); pub struct UpdateFlowMessage { pub root_map: BTreeMap, pub pad_data: usize, @@ -967,12 +975,11 @@ impl LogManager { // Pad with more complete chunks. let mut start_index = last_chunk_pad / ENTRY_SIZE; while pad_data.len() >= (start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE { - let data = pad_data[start_index * ENTRY_SIZE - ..(start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE] - .to_vec(); - let root = Merkle::new(data_to_merkle_leaves(&data)?, 0, None).root(); - merkle.pora_chunks_merkle.append(root); - root_map.insert(merkle.pora_chunks_merkle.leaves() - 1, (root, 1)); + merkle.pora_chunks_merkle.append(*PAD_SEGMENT_ROOT); + root_map.insert( + merkle.pora_chunks_merkle.leaves() - 1, + (*PAD_SEGMENT_ROOT, 1), + ); start_index += PORA_CHUNK_SIZE; } assert_eq!(pad_data.len(), start_index * ENTRY_SIZE);