Add some input validation for append_merkle.

This commit is contained in:
Peilun Li 2024-09-14 06:10:10 +08:00
parent a9f5169c15
commit 40a289f0d5
8 changed files with 46 additions and 26 deletions

View File

@ -137,13 +137,20 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
} }
} }
/// Return the new merkle root.
pub fn append(&mut self, new_leaf: E) { pub fn append(&mut self, new_leaf: E) {
if new_leaf == E::null() {
// appending null is not allowed.
return;
}
self.layers[0].push(new_leaf); self.layers[0].push(new_leaf);
self.recompute_after_append_leaves(self.leaves() - 1); self.recompute_after_append_leaves(self.leaves() - 1);
} }
pub fn append_list(&mut self, mut leaf_list: Vec<E>) { pub fn append_list(&mut self, mut leaf_list: Vec<E>) {
if leaf_list.contains(&E::null()) {
// appending null is not allowed.
return;
}
let start_index = self.leaves(); let start_index = self.leaves();
self.layers[0].append(&mut leaf_list); self.layers[0].append(&mut leaf_list);
self.recompute_after_append_leaves(start_index); self.recompute_after_append_leaves(start_index);
@ -155,6 +162,10 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
/// Other nodes in the subtree will be set to `null` nodes. /// Other nodes in the subtree will be set to `null` nodes.
/// TODO: Optimize to avoid storing the `null` nodes? /// TODO: Optimize to avoid storing the `null` nodes?
pub fn append_subtree(&mut self, subtree_depth: usize, subtree_root: E) -> Result<()> { pub fn append_subtree(&mut self, subtree_depth: usize, subtree_root: E) -> Result<()> {
if subtree_root == E::null() {
// appending null is not allowed.
bail!("subtree_root is null");
}
let start_index = self.leaves(); let start_index = self.leaves();
self.append_subtree_inner(subtree_depth, subtree_root)?; self.append_subtree_inner(subtree_depth, subtree_root)?;
self.recompute_after_append_subtree(start_index, subtree_depth - 1); self.recompute_after_append_subtree(start_index, subtree_depth - 1);
@ -162,6 +173,10 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
} }
pub fn append_subtree_list(&mut self, subtree_list: Vec<(usize, E)>) -> Result<()> { pub fn append_subtree_list(&mut self, subtree_list: Vec<(usize, E)>) -> Result<()> {
if subtree_list.iter().any(|(_, root)| root == &E::null()) {
// appending null is not allowed.
bail!("subtree_list contains null");
}
for (subtree_depth, subtree_root) in subtree_list { for (subtree_depth, subtree_root) in subtree_list {
let start_index = self.leaves(); let start_index = self.leaves();
self.append_subtree_inner(subtree_depth, subtree_root)?; self.append_subtree_inner(subtree_depth, subtree_root)?;
@ -173,6 +188,10 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
/// Change the value of the last leaf and return the new merkle root. /// Change the value of the last leaf and return the new merkle root.
/// This is needed if our merkle-tree in memory only keeps intermediate nodes instead of real leaves. /// This is needed if our merkle-tree in memory only keeps intermediate nodes instead of real leaves.
pub fn update_last(&mut self, updated_leaf: E) { pub fn update_last(&mut self, updated_leaf: E) {
if updated_leaf == E::null() {
// updating to null is not allowed.
return;
}
if self.layers[0].is_empty() { if self.layers[0].is_empty() {
// Special case for the first data. // Special case for the first data.
self.layers[0].push(updated_leaf); self.layers[0].push(updated_leaf);
@ -183,10 +202,13 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
} }
/// Fill an unknown `null` leaf with its real value. /// Fill an unknown `null` leaf with its real value.
/// Panics if the leaf changes the merkle root or the index is out of range. /// Panics if the leaf is already set and different or the index is out of range.
/// TODO: Batch computing intermediate nodes. /// TODO: Batch computing intermediate nodes.
pub fn fill_leaf(&mut self, index: usize, leaf: E) { pub fn fill_leaf(&mut self, index: usize, leaf: E) {
if self.layers[0][index] == E::null() { if leaf == E::null() {
// fill leaf with null is not allowed.
return;
} else if self.layers[0][index] == E::null() {
self.layers[0][index] = leaf; self.layers[0][index] = leaf;
self.recompute_after_fill_leaves(index, index + 1); self.recompute_after_fill_leaves(index, index + 1);
} else if self.layers[0][index] != leaf { } else if self.layers[0][index] != leaf {
@ -349,7 +371,6 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
right_most_nodes.push((layer.len() - 1, layer.last().unwrap().clone())); right_most_nodes.push((layer.len() - 1, layer.last().unwrap().clone()));
} }
let root = self.root().clone(); let root = self.root().clone();
assert_eq!(root, right_most_nodes.last().unwrap().1);
self.delta_nodes_map self.delta_nodes_map
.insert(tx_seq, DeltaNodes::new(right_most_nodes)); .insert(tx_seq, DeltaNodes::new(right_most_nodes));
self.root_to_tx_seq_map.insert(root, tx_seq); self.root_to_tx_seq_map.insert(root, tx_seq);

View File

@ -74,10 +74,7 @@ pub trait MerkleTreeRead {
bail!("Not ready to generate proof for leaf_index={}", leaf_index); bail!("Not ready to generate proof for leaf_index={}", leaf_index);
} }
if self.height() == 1 { if self.height() == 1 {
return Ok(Proof::new( return Proof::new(vec![self.root().clone(), self.root().clone()], vec![]);
vec![self.root().clone(), self.root().clone()],
vec![],
));
} }
let mut lemma: Vec<Self::E> = Vec::with_capacity(self.height()); // path + root let mut lemma: Vec<Self::E> = Vec::with_capacity(self.height()); // path + root
let mut path: Vec<bool> = Vec::with_capacity(self.height() - 2); // path - 1 let mut path: Vec<bool> = Vec::with_capacity(self.height() - 2); // path - 1
@ -112,7 +109,7 @@ pub trait MerkleTreeRead {
path path
); );
} }
Ok(Proof::new(lemma, path)) Proof::new(lemma, path)
} }
fn gen_range_proof(&self, start_index: usize, end_index: usize) -> Result<RangeProof<Self::E>> { fn gen_range_proof(&self, start_index: usize, end_index: usize) -> Result<RangeProof<Self::E>> {

View File

@ -11,9 +11,11 @@ pub struct Proof<T: HashElement> {
impl<T: HashElement> Proof<T> { impl<T: HashElement> Proof<T> {
/// Creates new MT inclusion proof /// Creates new MT inclusion proof
pub fn new(hash: Vec<T>, path: Vec<bool>) -> Proof<T> { pub fn new(hash: Vec<T>, path: Vec<bool>) -> Result<Proof<T>> {
assert_eq!(hash.len() - 2, path.len()); if hash.len() != path.len() + 2 {
Proof { lemma: hash, path } bail!("hash and path length mismatch");
}
Ok(Proof { lemma: hash, path })
} }
pub fn new_empty() -> Proof<T> { pub fn new_empty() -> Proof<T> {
@ -58,10 +60,10 @@ impl<T: HashElement> Proof<T> {
bail!("Invalid proof"); bail!("Invalid proof");
} }
if *item != self.item() { if *item != self.item() {
bail!("Proof item unmatch"); bail!("Proof item mismatch");
} }
if position != self.position() { if position != self.position() {
bail!("Proof position unmatch"); bail!("Proof position mismatch");
} }
Ok(()) Ok(())
} }
@ -88,7 +90,7 @@ impl<T: HashElement> Proof<T> {
/// Return `Vec<(index_in_layer, data)>`. /// Return `Vec<(index_in_layer, data)>`.
pub fn proof_nodes_in_tree(&self) -> Vec<(usize, T)> { pub fn proof_nodes_in_tree(&self) -> Vec<(usize, T)> {
let mut r = Vec::with_capacity(self.lemma.len()); let mut r = Vec::with_capacity(self.lemma.len() - 1);
let mut pos = 0; let mut pos = 0;
r.push((0, self.root())); r.push((0, self.root()));
for (i, is_left) in self.path.iter().rev().enumerate() { for (i, is_left) in self.path.iter().rev().enumerate() {
@ -108,7 +110,7 @@ impl<T: HashElement> Proof<T> {
tx_merkle_nodes: Vec<(usize, T)>, tx_merkle_nodes: Vec<(usize, T)>,
tx_merkle_nodes_size: usize, tx_merkle_nodes_size: usize,
) -> Vec<(usize, T)> { ) -> Vec<(usize, T)> {
let mut r = Vec::with_capacity(self.lemma.len()); let mut r = Vec::with_capacity(self.path.len());
let mut subtree_pos = 0; let mut subtree_pos = 0;
let mut root_pos = 0; let mut root_pos = 0;
let mut in_subtree = tx_merkle_nodes_size == 1; let mut in_subtree = tx_merkle_nodes_size == 1;
@ -222,7 +224,7 @@ impl<E: HashElement> RangeProof<E> {
} }
children_layer = parent_layer; children_layer = parent_layer;
} }
assert_eq!(children_layer.len(), 1); ensure_eq!(children_layer.len(), 1);
let computed_root = children_layer.pop().unwrap(); let computed_root = children_layer.pop().unwrap();
ensure_eq!(computed_root, self.root()); ensure_eq!(computed_root, self.root());

View File

@ -79,7 +79,7 @@ impl<'a> Miner<'a> {
inc_counter(&LOADING_COUNT); inc_counter(&LOADING_COUNT);
let MineLoadChunk { let MineLoadChunk {
loaded_chunk, loaded_chunk,
avalibilities, availabilities,
} = self } = self
.loader .loader
.load_sealed_data(recall_position / SECTORS_PER_LOAD as u64) .load_sealed_data(recall_position / SECTORS_PER_LOAD as u64)
@ -92,7 +92,7 @@ impl<'a> Miner<'a> {
.into_iter() .into_iter()
.enumerate() .enumerate()
.zip(scratch_pad.iter().cycle()) .zip(scratch_pad.iter().cycle())
.zip(avalibilities.into_iter()) .zip(availabilities.into_iter())
.filter_map(|(data, avaliable)| avaliable.then_some(data)) .filter_map(|(data, avaliable)| avaliable.then_some(data))
{ {
inc_counter(&PAD_MIX_COUNT); inc_counter(&PAD_MIX_COUNT);

View File

@ -364,7 +364,7 @@ impl TryFrom<FileProof> for FlowProof {
if lemma.len() != value.path.len() + 2 { if lemma.len() != value.path.len() + 2 {
Err(anyhow!("invalid file proof")) Err(anyhow!("invalid file proof"))
} else { } else {
Ok(Self::new(lemma, value.path)) Self::new(lemma, value.path)
} }
} }
} }

View File

@ -203,7 +203,7 @@ impl FlowRead for FlowStore {
for (seal_index, (sealed, validity)) in mine_chunk for (seal_index, (sealed, validity)) in mine_chunk
.loaded_chunk .loaded_chunk
.iter_mut() .iter_mut()
.zip(mine_chunk.avalibilities.iter_mut()) .zip(mine_chunk.availabilities.iter_mut())
.enumerate() .enumerate()
{ {
if let Some(data) = batch.get_sealed_data(seal_index as u16) { if let Some(data) = batch.get_sealed_data(seal_index as u16) {

View File

@ -252,7 +252,7 @@ impl LogStoreWrite for LogManager {
debug!("recovery with tx_seq={}", tx.seq); debug!("recovery with tx_seq={}", tx.seq);
} else { } else {
// This is not supposed to happen since we have checked the tx seq in log entry sync. // This is not supposed to happen since we have checked the tx seq in log entry sync.
error!("tx unmatch, expected={} get={:?}", expected_seq, tx); error!("tx mismatch, expected={} get={:?}", expected_seq, tx);
bail!("unexpected tx!"); bail!("unexpected tx!");
} }
} }
@ -1173,7 +1173,7 @@ pub fn sub_merkle_tree(leaf_data: &[u8]) -> Result<FileMerkleTree> {
pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<H256>> { pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<H256>> {
if leaf_data.len() % ENTRY_SIZE != 0 { if leaf_data.len() % ENTRY_SIZE != 0 {
bail!("merkle_tree: unmatch data size"); bail!("merkle_tree: mismatched data size");
} }
// If the data size is small, using `rayon` would introduce more overhead. // If the data size is small, using `rayon` would introduce more overhead.
let r = if leaf_data.len() >= ENTRY_SIZE * 8 { let r = if leaf_data.len() >= ENTRY_SIZE * 8 {
@ -1211,7 +1211,7 @@ fn entry_proof(top_proof: &FlowProof, sub_proof: &FlowProof) -> Result<FlowProof
assert!(lemma.pop().is_some()); assert!(lemma.pop().is_some());
lemma.extend_from_slice(&top_proof.lemma()[1..]); lemma.extend_from_slice(&top_proof.lemma()[1..]);
path.extend_from_slice(top_proof.path()); path.extend_from_slice(top_proof.path());
Ok(FlowProof::new(lemma, path)) FlowProof::new(lemma, path)
} }
pub fn split_nodes(data_size: usize) -> Vec<usize> { pub fn split_nodes(data_size: usize) -> Vec<usize> {

View File

@ -185,14 +185,14 @@ pub trait LogStoreInner {
pub struct MineLoadChunk { pub struct MineLoadChunk {
// Use `Vec` instead of array to avoid thread stack overflow. // Use `Vec` instead of array to avoid thread stack overflow.
pub loaded_chunk: Vec<[u8; BYTES_PER_SEAL]>, pub loaded_chunk: Vec<[u8; BYTES_PER_SEAL]>,
pub avalibilities: [bool; SEALS_PER_LOAD], pub availabilities: [bool; SEALS_PER_LOAD],
} }
impl Default for MineLoadChunk { impl Default for MineLoadChunk {
fn default() -> Self { fn default() -> Self {
Self { Self {
loaded_chunk: vec![[0u8; BYTES_PER_SEAL]; SEALS_PER_LOAD], loaded_chunk: vec![[0u8; BYTES_PER_SEAL]; SEALS_PER_LOAD],
avalibilities: [false; SEALS_PER_LOAD], availabilities: [false; SEALS_PER_LOAD],
} }
} }
} }