From 10bd71046b97c97d8468b22d2edecbd1de2c0e18 Mon Sep 17 00:00:00 2001 From: peilun-conflux <48905552+peilun-conflux@users.noreply.github.com> Date: Sat, 14 Sep 2024 17:39:07 +0800 Subject: [PATCH] Add some input validation for `append_merkle`. (#202) * Add some input validation for `append_merkle`. * Fix clippy. --- common/append_merkle/src/lib.rs | 28 +++++++++++++++++++---- common/append_merkle/src/merkle_tree.rs | 7 ++---- common/append_merkle/src/proof.rs | 18 ++++++++------- node/miner/src/pora.rs | 4 ++-- node/shared_types/src/lib.rs | 2 +- node/storage/src/log_store/flow_store.rs | 2 +- node/storage/src/log_store/log_manager.rs | 6 ++--- node/storage/src/log_store/mod.rs | 4 ++-- 8 files changed, 45 insertions(+), 26 deletions(-) diff --git a/common/append_merkle/src/lib.rs b/common/append_merkle/src/lib.rs index f621d86..55fe806 100644 --- a/common/append_merkle/src/lib.rs +++ b/common/append_merkle/src/lib.rs @@ -137,13 +137,20 @@ impl> AppendMerkleTree { } } - /// Return the new merkle root. pub fn append(&mut self, new_leaf: E) { + if new_leaf == E::null() { + // appending null is not allowed. + return; + } self.layers[0].push(new_leaf); self.recompute_after_append_leaves(self.leaves() - 1); } pub fn append_list(&mut self, mut leaf_list: Vec) { + if leaf_list.contains(&E::null()) { + // appending null is not allowed. + return; + } let start_index = self.leaves(); self.layers[0].append(&mut leaf_list); self.recompute_after_append_leaves(start_index); @@ -155,6 +162,10 @@ impl> AppendMerkleTree { /// Other nodes in the subtree will be set to `null` nodes. /// TODO: Optimize to avoid storing the `null` nodes? pub fn append_subtree(&mut self, subtree_depth: usize, subtree_root: E) -> Result<()> { + if subtree_root == E::null() { + // appending null is not allowed. + bail!("subtree_root is null"); + } let start_index = self.leaves(); self.append_subtree_inner(subtree_depth, subtree_root)?; self.recompute_after_append_subtree(start_index, subtree_depth - 1); @@ -162,6 +173,10 @@ impl> AppendMerkleTree { } pub fn append_subtree_list(&mut self, subtree_list: Vec<(usize, E)>) -> Result<()> { + if subtree_list.iter().any(|(_, root)| root == &E::null()) { + // appending null is not allowed. + bail!("subtree_list contains null"); + } for (subtree_depth, subtree_root) in subtree_list { let start_index = self.leaves(); self.append_subtree_inner(subtree_depth, subtree_root)?; @@ -173,6 +188,10 @@ impl> AppendMerkleTree { /// Change the value of the last leaf and return the new merkle root. /// This is needed if our merkle-tree in memory only keeps intermediate nodes instead of real leaves. pub fn update_last(&mut self, updated_leaf: E) { + if updated_leaf == E::null() { + // updating to null is not allowed. + return; + } if self.layers[0].is_empty() { // Special case for the first data. self.layers[0].push(updated_leaf); @@ -183,10 +202,12 @@ impl> AppendMerkleTree { } /// Fill an unknown `null` leaf with its real value. - /// Panics if the leaf changes the merkle root or the index is out of range. + /// Panics if the leaf is already set and different or the index is out of range. /// TODO: Batch computing intermediate nodes. pub fn fill_leaf(&mut self, index: usize, leaf: E) { - if self.layers[0][index] == E::null() { + if leaf == E::null() { + // fill leaf with null is not allowed. + } else if self.layers[0][index] == E::null() { self.layers[0][index] = leaf; self.recompute_after_fill_leaves(index, index + 1); } else if self.layers[0][index] != leaf { @@ -349,7 +370,6 @@ impl> AppendMerkleTree { right_most_nodes.push((layer.len() - 1, layer.last().unwrap().clone())); } let root = self.root().clone(); - assert_eq!(root, right_most_nodes.last().unwrap().1); self.delta_nodes_map .insert(tx_seq, DeltaNodes::new(right_most_nodes)); self.root_to_tx_seq_map.insert(root, tx_seq); diff --git a/common/append_merkle/src/merkle_tree.rs b/common/append_merkle/src/merkle_tree.rs index d8a96df..975cbb3 100644 --- a/common/append_merkle/src/merkle_tree.rs +++ b/common/append_merkle/src/merkle_tree.rs @@ -74,10 +74,7 @@ pub trait MerkleTreeRead { bail!("Not ready to generate proof for leaf_index={}", leaf_index); } if self.height() == 1 { - return Ok(Proof::new( - vec![self.root().clone(), self.root().clone()], - vec![], - )); + return Proof::new(vec![self.root().clone(), self.root().clone()], vec![]); } let mut lemma: Vec = Vec::with_capacity(self.height()); // path + root let mut path: Vec = Vec::with_capacity(self.height() - 2); // path - 1 @@ -112,7 +109,7 @@ pub trait MerkleTreeRead { path ); } - Ok(Proof::new(lemma, path)) + Proof::new(lemma, path) } fn gen_range_proof(&self, start_index: usize, end_index: usize) -> Result> { diff --git a/common/append_merkle/src/proof.rs b/common/append_merkle/src/proof.rs index 77d983b..f7386c6 100644 --- a/common/append_merkle/src/proof.rs +++ b/common/append_merkle/src/proof.rs @@ -11,9 +11,11 @@ pub struct Proof { impl Proof { /// Creates new MT inclusion proof - pub fn new(hash: Vec, path: Vec) -> Proof { - assert_eq!(hash.len() - 2, path.len()); - Proof { lemma: hash, path } + pub fn new(hash: Vec, path: Vec) -> Result> { + if hash.len() != path.len() + 2 { + bail!("hash and path length mismatch"); + } + Ok(Proof { lemma: hash, path }) } pub fn new_empty() -> Proof { @@ -58,10 +60,10 @@ impl Proof { bail!("Invalid proof"); } if *item != self.item() { - bail!("Proof item unmatch"); + bail!("Proof item mismatch"); } if position != self.position() { - bail!("Proof position unmatch"); + bail!("Proof position mismatch"); } Ok(()) } @@ -88,7 +90,7 @@ impl Proof { /// Return `Vec<(index_in_layer, data)>`. pub fn proof_nodes_in_tree(&self) -> Vec<(usize, T)> { - let mut r = Vec::with_capacity(self.lemma.len()); + let mut r = Vec::with_capacity(self.lemma.len() - 1); let mut pos = 0; r.push((0, self.root())); for (i, is_left) in self.path.iter().rev().enumerate() { @@ -108,7 +110,7 @@ impl Proof { tx_merkle_nodes: Vec<(usize, T)>, tx_merkle_nodes_size: usize, ) -> Vec<(usize, T)> { - let mut r = Vec::with_capacity(self.lemma.len()); + let mut r = Vec::with_capacity(self.path.len()); let mut subtree_pos = 0; let mut root_pos = 0; let mut in_subtree = tx_merkle_nodes_size == 1; @@ -222,7 +224,7 @@ impl RangeProof { } children_layer = parent_layer; } - assert_eq!(children_layer.len(), 1); + ensure_eq!(children_layer.len(), 1); let computed_root = children_layer.pop().unwrap(); ensure_eq!(computed_root, self.root()); diff --git a/node/miner/src/pora.rs b/node/miner/src/pora.rs index a74d2b5..deef5c3 100644 --- a/node/miner/src/pora.rs +++ b/node/miner/src/pora.rs @@ -79,7 +79,7 @@ impl<'a> Miner<'a> { inc_counter(&LOADING_COUNT); let MineLoadChunk { loaded_chunk, - avalibilities, + availabilities, } = self .loader .load_sealed_data(recall_position / SECTORS_PER_LOAD as u64) @@ -92,7 +92,7 @@ impl<'a> Miner<'a> { .into_iter() .enumerate() .zip(scratch_pad.iter().cycle()) - .zip(avalibilities.into_iter()) + .zip(availabilities.into_iter()) .filter_map(|(data, avaliable)| avaliable.then_some(data)) { inc_counter(&PAD_MIX_COUNT); diff --git a/node/shared_types/src/lib.rs b/node/shared_types/src/lib.rs index e2f507c..aac0805 100644 --- a/node/shared_types/src/lib.rs +++ b/node/shared_types/src/lib.rs @@ -364,7 +364,7 @@ impl TryFrom for FlowProof { if lemma.len() != value.path.len() + 2 { Err(anyhow!("invalid file proof")) } else { - Ok(Self::new(lemma, value.path)) + Self::new(lemma, value.path) } } } diff --git a/node/storage/src/log_store/flow_store.rs b/node/storage/src/log_store/flow_store.rs index cdbc672..a641dff 100644 --- a/node/storage/src/log_store/flow_store.rs +++ b/node/storage/src/log_store/flow_store.rs @@ -203,7 +203,7 @@ impl FlowRead for FlowStore { for (seal_index, (sealed, validity)) in mine_chunk .loaded_chunk .iter_mut() - .zip(mine_chunk.avalibilities.iter_mut()) + .zip(mine_chunk.availabilities.iter_mut()) .enumerate() { if let Some(data) = batch.get_sealed_data(seal_index as u16) { diff --git a/node/storage/src/log_store/log_manager.rs b/node/storage/src/log_store/log_manager.rs index 1c43a7e..026ca8b 100644 --- a/node/storage/src/log_store/log_manager.rs +++ b/node/storage/src/log_store/log_manager.rs @@ -252,7 +252,7 @@ impl LogStoreWrite for LogManager { debug!("recovery with tx_seq={}", tx.seq); } else { // This is not supposed to happen since we have checked the tx seq in log entry sync. - error!("tx unmatch, expected={} get={:?}", expected_seq, tx); + error!("tx mismatch, expected={} get={:?}", expected_seq, tx); bail!("unexpected tx!"); } } @@ -1173,7 +1173,7 @@ pub fn sub_merkle_tree(leaf_data: &[u8]) -> Result { pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result> { if leaf_data.len() % ENTRY_SIZE != 0 { - bail!("merkle_tree: unmatch data size"); + bail!("merkle_tree: mismatched data size"); } // If the data size is small, using `rayon` would introduce more overhead. let r = if leaf_data.len() >= ENTRY_SIZE * 8 { @@ -1211,7 +1211,7 @@ fn entry_proof(top_proof: &FlowProof, sub_proof: &FlowProof) -> Result Vec { diff --git a/node/storage/src/log_store/mod.rs b/node/storage/src/log_store/mod.rs index a2a6442..ae3b07e 100644 --- a/node/storage/src/log_store/mod.rs +++ b/node/storage/src/log_store/mod.rs @@ -185,14 +185,14 @@ pub trait LogStoreInner { pub struct MineLoadChunk { // Use `Vec` instead of array to avoid thread stack overflow. pub loaded_chunk: Vec<[u8; BYTES_PER_SEAL]>, - pub avalibilities: [bool; SEALS_PER_LOAD], + pub availabilities: [bool; SEALS_PER_LOAD], } impl Default for MineLoadChunk { fn default() -> Self { Self { loaded_chunk: vec![[0u8; BYTES_PER_SEAL]; SEALS_PER_LOAD], - avalibilities: [false; SEALS_PER_LOAD], + availabilities: [false; SEALS_PER_LOAD], } } }