fix error

This commit is contained in:
Peter Zhang 2025-09-08 18:02:14 +08:00
parent f29f4f8b61
commit b900b9c48d
5 changed files with 146 additions and 72 deletions

View File

@ -259,8 +259,11 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
}
self.node_manager.start_transaction();
let start_index = self.leaves();
println!("append_subtree_inner");
self.append_subtree_inner(subtree_depth, subtree_root)?;
println!("recompute");
self.recompute_after_append_subtree(start_index, subtree_depth - 1);
println!("commit");
self.node_manager.commit();
metrics::APPEND_SUBTREE.update_since(start_time);

View File

@ -40,6 +40,45 @@ impl OptionalHash {
pub fn as_ref(&self) -> Option<&H256> {
self.0.as_ref()
}
/// Create OptionalHash from a byte slice
pub fn from_slice(bytes: &[u8]) -> Result<Self, &'static str> {
if bytes.len() != 32 {
return Err("Invalid byte length for H256");
}
let mut hash_bytes = [0u8; 32];
hash_bytes.copy_from_slice(bytes);
Ok(OptionalHash::some(H256(hash_bytes)))
}
/// Convert to bytes for storage (33 bytes: 1 flag + 32 hash)
pub fn as_bytes(&self) -> [u8; 33] {
let mut bytes = [0u8; 33];
match &self.0 {
Some(hash) => {
bytes[0] = 1; // Some flag
bytes[1..].copy_from_slice(hash.as_ref());
}
None => {
bytes[0] = 0; // None flag
// bytes[1..] remain zeros
}
}
bytes
}
/// Create OptionalHash from storage bytes (33 bytes)
pub fn from_bytes(bytes: &[u8; 33]) -> Result<Self, &'static str> {
match bytes[0] {
0 => Ok(OptionalHash::none()),
1 => {
let mut hash_bytes = [0u8; 32];
hash_bytes.copy_from_slice(&bytes[1..]);
Ok(OptionalHash::some(H256(hash_bytes)))
}
_ => Err("Invalid flag byte for OptionalHash"),
}
}
}
// Add From conversions for easier usage
@ -82,18 +121,25 @@ impl Encode for OptionalHash {
}
fn ssz_fixed_len() -> usize {
32 // Same as H256 - just 32 bytes, no flag byte
33 // 1 byte for Some/None flag + 32 bytes for hash
}
fn ssz_bytes_len(&self) -> usize {
32
33
}
fn ssz_append(&self, buf: &mut Vec<u8>) {
// Use H256::zero() for None, actual hash for Some
let hash = self.0.unwrap();
match &self.0 {
Some(hash) => {
buf.push(1); // Some flag
hash.ssz_append(buf);
}
None => {
buf.push(0); // None flag
buf.extend_from_slice(&[0u8; 32]); // Padding zeros
}
}
}
}
impl Decode for OptionalHash {
@ -102,20 +148,28 @@ impl Decode for OptionalHash {
}
fn ssz_fixed_len() -> usize {
32 // Same as H256
33 // 1 byte for Some/None flag + 32 bytes for hash
}
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
if bytes.len() != 32 {
if bytes.len() != 33 {
return Err(ssz::DecodeError::InvalidByteLength {
len: bytes.len(),
expected: 32,
expected: 33,
});
}
let hash = H256::from_ssz_bytes(bytes)?;
match bytes[0] {
0 => Ok(OptionalHash::none()),
1 => {
let hash = H256::from_ssz_bytes(&bytes[1..])?;
Ok(OptionalHash::some(hash))
}
_ => Err(ssz::DecodeError::BytesInvalid(
"Invalid flag byte for OptionalHash".to_string(),
)),
}
}
}
unsafe impl Send for OptionalHash {}

View File

@ -580,12 +580,12 @@ fn layer_size_key(layer: usize) -> Vec<u8> {
pub struct NodeDBTransaction(DBTransaction);
impl NodeDatabase<DataRoot> for FlowDBStore {
fn get_node(&self, layer: usize, pos: usize) -> Result<Option<DataRoot>> {
impl NodeDatabase<OptionalHash> for FlowDBStore {
fn get_node(&self, layer: usize, pos: usize) -> Result<Option<OptionalHash>> {
Ok(self
.kvdb
.get(COL_FLOW_MPT_NODES, &encode_mpt_node_key(layer, pos))?
.map(|v| DataRoot::from_slice(&v)))
.map(|v| OptionalHash::from_bytes(v.as_slice().try_into().unwrap()).unwrap()))
}
fn get_layer_size(&self, layer: usize) -> Result<Option<usize>> {
@ -595,11 +595,11 @@ impl NodeDatabase<DataRoot> for FlowDBStore {
}
}
fn start_transaction(&self) -> Box<dyn NodeTransaction<DataRoot>> {
fn start_transaction(&self) -> Box<dyn NodeTransaction<OptionalHash>> {
Box::new(NodeDBTransaction(self.kvdb.transaction()))
}
fn commit(&self, tx: Box<dyn NodeTransaction<DataRoot>>) -> Result<()> {
fn commit(&self, tx: Box<dyn NodeTransaction<OptionalHash>>) -> Result<()> {
let db_tx: Box<NodeDBTransaction> = tx
.into_any()
.downcast()
@ -608,21 +608,21 @@ impl NodeDatabase<DataRoot> for FlowDBStore {
}
}
impl NodeTransaction<DataRoot> for NodeDBTransaction {
fn save_node(&mut self, layer: usize, pos: usize, node: &DataRoot) {
impl NodeTransaction<OptionalHash> for NodeDBTransaction {
fn save_node(&mut self, layer: usize, pos: usize, node: &OptionalHash) {
self.0.put(
COL_FLOW_MPT_NODES,
&encode_mpt_node_key(layer, pos),
node.as_bytes(),
&node.as_bytes(),
);
}
fn save_node_list(&mut self, nodes: &[(usize, usize, &DataRoot)]) {
fn save_node_list(&mut self, nodes: &[(usize, usize, &OptionalHash)]) {
for (layer_index, position, data) in nodes {
self.0.put(
COL_FLOW_MPT_NODES,
&encode_mpt_node_key(*layer_index, *position),
data.as_bytes(),
&data.as_bytes(),
);
}
}
@ -653,63 +653,63 @@ impl NodeTransaction<DataRoot> for NodeDBTransaction {
}
}
// Adapter implementation for OptionalHash that delegates to the H256 implementation
impl NodeDatabase<OptionalHash> for FlowDBStore {
fn get_node(&self, layer: usize, pos: usize) -> Result<Option<OptionalHash>> {
Ok(self.get_node(layer, pos)?.map(OptionalHash::some))
}
// // Adapter implementation for OptionalHash that delegates to the H256 implementation
// impl NodeDatabase<OptionalHash> for FlowDBStore {
// fn get_node(&self, layer: usize, pos: usize) -> Result<Option<OptionalHash>> {
// Ok(self.get_node(layer, pos)?.map(OptionalHash::some))
// }
fn get_layer_size(&self, layer: usize) -> Result<Option<usize>> {
// Layer size is the same regardless of hash type
<Self as NodeDatabase<DataRoot>>::get_layer_size(self, layer)
}
// fn get_layer_size(&self, layer: usize) -> Result<Option<usize>> {
// // Layer size is the same regardless of hash type
// <Self as NodeDatabase<DataRoot>>::get_layer_size(self, layer)
// }
fn start_transaction(&self) -> Box<dyn NodeTransaction<OptionalHash>> {
Box::new(OptionalHashNodeDBTransaction(self.start_transaction()))
}
// fn start_transaction(&self) -> Box<dyn NodeTransaction<OptionalHash>> {
// Box::new(OptionalHashNodeDBTransaction(self.start_transaction()))
// }
fn commit(&self, tx: Box<dyn NodeTransaction<OptionalHash>>) -> Result<()> {
let h256_tx = tx
.into_any()
.downcast::<OptionalHashNodeDBTransaction>()
.map_err(|_| anyhow::anyhow!("Failed to downcast OptionalHashNodeDBTransaction"))?;
self.commit(h256_tx.0)
}
}
// fn commit(&self, tx: Box<dyn NodeTransaction<OptionalHash>>) -> Result<()> {
// let h256_tx = tx
// .into_any()
// .downcast::<OptionalHashNodeDBTransaction>()
// .map_err(|_| anyhow::anyhow!("Failed to downcast OptionalHashNodeDBTransaction"))?;
// self.commit(h256_tx.0)
// }
// }
// Wrapper for NodeTransaction<OptionalHash> that delegates to NodeTransaction<H256>
pub struct OptionalHashNodeDBTransaction(Box<dyn NodeTransaction<DataRoot>>);
// // Wrapper for NodeTransaction<OptionalHash> that delegates to NodeTransaction<H256>
// pub struct OptionalHashNodeDBTransaction(Box<dyn NodeTransaction<DataRoot>>);
impl NodeTransaction<OptionalHash> for OptionalHashNodeDBTransaction {
fn save_node(&mut self, layer: usize, pos: usize, node: &OptionalHash) {
self.0.save_node(layer, pos, &node.unwrap());
}
// impl NodeTransaction<OptionalHash> for OptionalHashNodeDBTransaction {
// fn save_node(&mut self, layer: usize, pos: usize, node: &OptionalHash) {
// self.0.save_node(layer, pos, &node.unwrap());
// }
fn save_node_list(&mut self, nodes: &[(usize, usize, &OptionalHash)]) {
let h256_nodes: Vec<(usize, usize, DataRoot)> = nodes
.iter()
.map(|(layer, pos, oh)| (*layer, *pos, oh.unwrap()))
.collect();
let h256_node_refs: Vec<(usize, usize, &DataRoot)> = h256_nodes
.iter()
.map(|(layer, pos, h)| (*layer, *pos, h))
.collect();
self.0.save_node_list(&h256_node_refs);
}
// fn save_node_list(&mut self, nodes: &[(usize, usize, &OptionalHash)]) {
// let h256_nodes: Vec<(usize, usize, DataRoot)> = nodes
// .iter()
// .map(|(layer, pos, oh)| (*layer, *pos, oh.unwrap()))
// .collect();
// let h256_node_refs: Vec<(usize, usize, &DataRoot)> = h256_nodes
// .iter()
// .map(|(layer, pos, h)| (*layer, *pos, h))
// .collect();
// self.0.save_node_list(&h256_node_refs);
// }
fn remove_node_list(&mut self, nodes: &[(usize, usize)]) {
self.0.remove_node_list(nodes);
}
// fn remove_node_list(&mut self, nodes: &[(usize, usize)]) {
// self.0.remove_node_list(nodes);
// }
fn save_layer_size(&mut self, layer: usize, size: usize) {
self.0.save_layer_size(layer, size);
}
// fn save_layer_size(&mut self, layer: usize, size: usize) {
// self.0.save_layer_size(layer, size);
// }
fn remove_layer_size(&mut self, layer: usize) {
self.0.remove_layer_size(layer);
}
// fn remove_layer_size(&mut self, layer: usize) {
// self.0.remove_layer_size(layer);
// }
fn into_any(self: Box<Self>) -> Box<dyn Any> {
Box::new(self)
}
}
// fn into_any(self: Box<Self>) -> Box<dyn Any> {
// Box::new(self)
// }
// }

View File

@ -946,17 +946,24 @@ impl LogManager {
return Ok(());
}
let start_time = Instant::now();
println!(
"append_subtree_list: tx_seq={} tx_start_index={} merkle_list={:?}",
tx_seq, tx_start_index, merkle_list
);
self.pad_tx(tx_seq, tx_start_index, &mut *merkle)?;
for (subtree_depth, subtree_root) in merkle_list {
let subtree_size = 1 << (subtree_depth - 1);
println!("append new subtree to pora_chunks_merkle");
if merkle.last_chunk_merkle.leaves() + subtree_size <= PORA_CHUNK_SIZE {
println!("append to last_chunk_");
merkle
.last_chunk_merkle
.append_subtree(subtree_depth, OptionalHash::some(subtree_root))?;
if merkle.last_chunk_merkle.leaves() == subtree_size {
// `last_chunk_merkle` was empty, so this is a new leaf in the top_tree.
merkle
.pora_chunks_merkle
.append_subtree(1, merkle.last_chunk_merkle.root())?;
@ -972,17 +979,23 @@ impl LogManager {
)?;
}
} else {
println!("append to pora_chunks_");
// `last_chunk_merkle` has been padded here, so a subtree should not be across
// the chunks boundary.
assert_eq!(merkle.last_chunk_merkle.leaves(), 0);
assert!(subtree_size >= PORA_CHUNK_SIZE);
merkle.pora_chunks_merkle.append_subtree(
subtree_depth - log2_pow2(PORA_CHUNK_SIZE),
subtree_root.into(),
OptionalHash::some(subtree_root),
)?;
}
}
println!(
"after append_subtree_list: tx_seq={} tx_start_index={} last_chunk={}",
tx_seq,
tx_start_index,
merkle.last_chunk_merkle.leaves()
);
metrics::APPEND_SUBTREE_LIST.update_since(start_time);
Ok(())
}

View File

@ -168,6 +168,7 @@ fn test_revert() {
#[test]
fn test_put_tx() {
for i in 0..12 {
println!("{}", i);
let chunk_count = 0xF << i;
let mut store = create_store();
put_tx(&mut store, chunk_count, 0);
@ -200,7 +201,9 @@ fn put_tx(store: &mut LogManager, chunk_count: usize, seq: u64) {
// TODO: This can come from `tx_merkle`.
merkle_nodes,
};
println!("put tx");
store.put_tx(tx.clone()).unwrap();
println!("put tx done");
for start_index in (0..chunk_count).step_by(PORA_CHUNK_SIZE) {
let end = cmp::min((start_index + PORA_CHUNK_SIZE) * CHUNK_SIZE, data.len());
let chunk_array = ChunkArray {
@ -209,5 +212,6 @@ fn put_tx(store: &mut LogManager, chunk_count: usize, seq: u64) {
};
store.put_chunks(tx.seq, chunk_array.clone()).unwrap();
}
println!("put chunks done");
store.finalize_tx(tx.seq).unwrap();
}