Hardcode pad data segment root. (#250)
Some checks are pending
abi-consistent-check / build-and-compare (push) Waiting to run
code-coverage / unittest-cov (push) Waiting to run
rust / check (push) Waiting to run
rust / test (push) Waiting to run
rust / lints (push) Waiting to run
functional-test / test (push) Waiting to run

* Hardcode pad data segment root.

* fix deref

---------

Co-authored-by: Peter Zhang <peter@0g.ai>
This commit is contained in:
peilun-conflux 2024-10-27 20:58:03 +08:00 committed by GitHub
parent 506d234562
commit 2f9960e8e7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 18 additions and 9 deletions

1
Cargo.lock generated
View File

@ -7302,6 +7302,7 @@ dependencies = [
"kvdb-rocksdb",
"merkle_light",
"merkle_tree",
"once_cell",
"parking_lot 0.12.3",
"rand 0.8.5",
"rayon",

View File

@ -31,6 +31,7 @@ parking_lot = "0.12.3"
serde_json = "1.0.127"
tokio = { version = "1.38.0", features = ["full"] }
task_executor = { path = "../../common/task_executor" }
once_cell = { version = "1.19.0", features = [] }
[dev-dependencies]
rand = "0.8.5"

View File

@ -1,3 +1,5 @@
use super::tx_store::BlockHashAndSubmissionIndex;
use super::{FlowSeal, MineLoadChunk, SealAnswer, SealTask};
use crate::config::ShardConfig;
use crate::log_store::flow_store::{batch_iter_sharded, FlowConfig, FlowDBStore, FlowStore};
use crate::log_store::tx_store::TransactionStore;
@ -11,6 +13,7 @@ use ethereum_types::H256;
use kvdb_rocksdb::{Database, DatabaseConfig};
use merkle_light::merkle::{log2_pow2, MerkleTree};
use merkle_tree::RawLeafSha3Algorithm;
use once_cell::sync::Lazy;
use parking_lot::RwLock;
use rayon::iter::ParallelIterator;
use rayon::prelude::ParallelSlice;
@ -25,9 +28,6 @@ use std::sync::mpsc;
use std::sync::Arc;
use tracing::{debug, error, info, instrument, trace, warn};
use super::tx_store::BlockHashAndSubmissionIndex;
use super::{FlowSeal, MineLoadChunk, SealAnswer, SealTask};
/// 256 Bytes
pub const ENTRY_SIZE: usize = 256;
/// 1024 Entries.
@ -47,6 +47,14 @@ pub const COL_NUM: u32 = 9;
// Process at most 1M entries (256MB) pad data at a time.
const PAD_MAX_SIZE: usize = 1 << 20;
static PAD_SEGMENT_ROOT: Lazy<H256> = Lazy::new(|| {
Merkle::new(
data_to_merkle_leaves(&[0; ENTRY_SIZE * PORA_CHUNK_SIZE]).unwrap(),
0,
None,
)
.root()
});
pub struct UpdateFlowMessage {
pub root_map: BTreeMap<usize, (H256, usize)>,
pub pad_data: usize,
@ -967,12 +975,11 @@ impl LogManager {
// Pad with more complete chunks.
let mut start_index = last_chunk_pad / ENTRY_SIZE;
while pad_data.len() >= (start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE {
let data = pad_data[start_index * ENTRY_SIZE
..(start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE]
.to_vec();
let root = Merkle::new(data_to_merkle_leaves(&data)?, 0, None).root();
merkle.pora_chunks_merkle.append(root);
root_map.insert(merkle.pora_chunks_merkle.leaves() - 1, (root, 1));
merkle.pora_chunks_merkle.append(*PAD_SEGMENT_ROOT);
root_map.insert(
merkle.pora_chunks_merkle.leaves() - 1,
(*PAD_SEGMENT_ROOT, 1),
);
start_index += PORA_CHUNK_SIZE;
}
assert_eq!(pad_data.len(), start_index * ENTRY_SIZE);