mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2024-11-10 10:05:17 +00:00
Wait SyncedBlock to be processed for revert block (#45)
* wait SyncedBlock to be processed * remove retry counter * use default parent block hash for missing case
This commit is contained in:
parent
bd4ebee2da
commit
6c1b0b35ec
@ -61,7 +61,7 @@ impl LogEntryFetcher {
|
|||||||
block_number: u64,
|
block_number: u64,
|
||||||
block_hash: H256,
|
block_hash: H256,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
block_hash_cache: Arc<RwLock<BTreeMap<u64, BlockHashAndSubmissionIndex>>>,
|
block_hash_cache: Arc<RwLock<BTreeMap<u64, Option<BlockHashAndSubmissionIndex>>>>,
|
||||||
) -> UnboundedReceiver<LogFetchProgress> {
|
) -> UnboundedReceiver<LogFetchProgress> {
|
||||||
let (reorg_tx, reorg_rx) = tokio::sync::mpsc::unbounded_channel();
|
let (reorg_tx, reorg_rx) = tokio::sync::mpsc::unbounded_channel();
|
||||||
let provider = self.provider.clone();
|
let provider = self.provider.clone();
|
||||||
@ -93,6 +93,7 @@ impl LogEntryFetcher {
|
|||||||
block_number,
|
block_number,
|
||||||
&reorg_tx,
|
&reorg_tx,
|
||||||
&block_hash_cache,
|
&block_hash_cache,
|
||||||
|
provider.as_ref(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
@ -122,7 +123,7 @@ impl LogEntryFetcher {
|
|||||||
&self,
|
&self,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
store: Arc<RwLock<dyn Store>>,
|
store: Arc<RwLock<dyn Store>>,
|
||||||
block_hash_cache: Arc<RwLock<BTreeMap<u64, BlockHashAndSubmissionIndex>>>,
|
block_hash_cache: Arc<RwLock<BTreeMap<u64, Option<BlockHashAndSubmissionIndex>>>>,
|
||||||
default_finalized_block_count: u64,
|
default_finalized_block_count: u64,
|
||||||
remove_finalized_block_interval_minutes: u64,
|
remove_finalized_block_interval_minutes: u64,
|
||||||
) {
|
) {
|
||||||
@ -167,7 +168,7 @@ impl LogEntryFetcher {
|
|||||||
if processed_block_number >= finalized_block_number {
|
if processed_block_number >= finalized_block_number {
|
||||||
let mut pending_keys = vec![];
|
let mut pending_keys = vec![];
|
||||||
for (key, _) in block_hash_cache.read().await.iter() {
|
for (key, _) in block_hash_cache.read().await.iter() {
|
||||||
if *key <= finalized_block_number {
|
if *key < finalized_block_number {
|
||||||
pending_keys.push(*key);
|
pending_keys.push(*key);
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
@ -284,7 +285,7 @@ impl LogEntryFetcher {
|
|||||||
start_block_number: u64,
|
start_block_number: u64,
|
||||||
parent_block_hash: H256,
|
parent_block_hash: H256,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
block_hash_cache: Arc<RwLock<BTreeMap<u64, BlockHashAndSubmissionIndex>>>,
|
block_hash_cache: Arc<RwLock<BTreeMap<u64, Option<BlockHashAndSubmissionIndex>>>>,
|
||||||
watch_loop_wait_time_ms: u64,
|
watch_loop_wait_time_ms: u64,
|
||||||
) -> UnboundedReceiver<LogFetchProgress> {
|
) -> UnboundedReceiver<LogFetchProgress> {
|
||||||
let (watch_tx, watch_rx) = tokio::sync::mpsc::unbounded_channel();
|
let (watch_tx, watch_rx) = tokio::sync::mpsc::unbounded_channel();
|
||||||
@ -340,7 +341,7 @@ impl LogEntryFetcher {
|
|||||||
watch_tx: &UnboundedSender<LogFetchProgress>,
|
watch_tx: &UnboundedSender<LogFetchProgress>,
|
||||||
confirmation_delay: u64,
|
confirmation_delay: u64,
|
||||||
contract: &ZgsFlow<Provider<RetryClient<Http>>>,
|
contract: &ZgsFlow<Provider<RetryClient<Http>>>,
|
||||||
block_hash_cache: &Arc<RwLock<BTreeMap<u64, BlockHashAndSubmissionIndex>>>,
|
block_hash_cache: &Arc<RwLock<BTreeMap<u64, Option<BlockHashAndSubmissionIndex>>>>,
|
||||||
) -> Result<Option<(u64, H256, Option<Option<u64>>)>> {
|
) -> Result<Option<(u64, H256, Option<Option<u64>>)>> {
|
||||||
let latest_block_number = provider.get_block_number().await?.as_u64();
|
let latest_block_number = provider.get_block_number().await?.as_u64();
|
||||||
debug!(
|
debug!(
|
||||||
@ -371,6 +372,7 @@ impl LogEntryFetcher {
|
|||||||
from_block_number.saturating_sub(1),
|
from_block_number.saturating_sub(1),
|
||||||
watch_tx,
|
watch_tx,
|
||||||
block_hash_cache,
|
block_hash_cache,
|
||||||
|
provider,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
return Ok(Some((parent_block_number, block_hash, None)));
|
return Ok(Some((parent_block_number, block_hash, None)));
|
||||||
@ -500,6 +502,8 @@ impl LogEntryFetcher {
|
|||||||
if let Err(e) = watch_tx.send(LogFetchProgress::SyncedBlock(*p)) {
|
if let Err(e) = watch_tx.send(LogFetchProgress::SyncedBlock(*p)) {
|
||||||
warn!("send LogFetchProgress failed: {:?}", e);
|
warn!("send LogFetchProgress failed: {:?}", e);
|
||||||
return Ok(progress);
|
return Ok(progress);
|
||||||
|
} else {
|
||||||
|
block_hash_cache.write().await.insert(p.0, None);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for log in log_events.into_iter() {
|
for log in log_events.into_iter() {
|
||||||
@ -524,15 +528,25 @@ async fn revert_one_block(
|
|||||||
block_hash: H256,
|
block_hash: H256,
|
||||||
block_number: u64,
|
block_number: u64,
|
||||||
watch_tx: &UnboundedSender<LogFetchProgress>,
|
watch_tx: &UnboundedSender<LogFetchProgress>,
|
||||||
block_hash_cache: &Arc<RwLock<BTreeMap<u64, BlockHashAndSubmissionIndex>>>,
|
block_hash_cache: &Arc<RwLock<BTreeMap<u64, Option<BlockHashAndSubmissionIndex>>>>,
|
||||||
|
provider: &Provider<RetryClient<Http>>,
|
||||||
) -> Result<(u64, H256), anyhow::Error> {
|
) -> Result<(u64, H256), anyhow::Error> {
|
||||||
debug!("revert block {}, block hash {:?}", block_number, block_hash);
|
debug!("revert block {}, block hash {:?}", block_number, block_hash);
|
||||||
let block = block_hash_cache
|
let block = loop {
|
||||||
.read()
|
if let Some(block) = block_hash_cache.read().await.get(&block_number) {
|
||||||
.await
|
if let Some(v) = block {
|
||||||
.get(&block_number)
|
break v.clone();
|
||||||
.ok_or_else(|| anyhow!("None for block {}", block_number))?
|
} else {
|
||||||
.clone();
|
debug!(
|
||||||
|
"block_hash_cache wait for SyncedBlock processed for {}",
|
||||||
|
block_number
|
||||||
|
);
|
||||||
|
tokio::time::sleep(Duration::from_secs(RETRY_WAIT_MS)).await;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(anyhow!("None for block {}", block_number));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
assert!(block_hash == block.block_hash);
|
assert!(block_hash == block.block_hash);
|
||||||
if let Some(reverted) = block.first_submission_index {
|
if let Some(reverted) = block.first_submission_index {
|
||||||
@ -540,13 +554,18 @@ async fn revert_one_block(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let parent_block_number = block_number.saturating_sub(1);
|
let parent_block_number = block_number.saturating_sub(1);
|
||||||
let parent_block_hash = block_hash_cache
|
let parent_block_hash = match block_hash_cache.read().await.get(&parent_block_number) {
|
||||||
.read()
|
Some(v) => v.clone().as_ref().unwrap().block_hash,
|
||||||
.await
|
_ => {
|
||||||
.get(&parent_block_number)
|
debug!("assume parent block {} is not reorged", parent_block_number);
|
||||||
.ok_or_else(|| anyhow!("None for block {}", parent_block_number))?
|
provider
|
||||||
.clone()
|
.get_block(parent_block_number)
|
||||||
.block_hash;
|
.await?
|
||||||
|
.ok_or_else(|| anyhow!("None for block {}", parent_block_number))?
|
||||||
|
.hash
|
||||||
|
.ok_or_else(|| anyhow!("None block hash for block {}", parent_block_number))?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let synced_block =
|
let synced_block =
|
||||||
LogFetchProgress::SyncedBlock((parent_block_number, parent_block_hash, None));
|
LogFetchProgress::SyncedBlock((parent_block_number, parent_block_hash, None));
|
||||||
|
@ -41,7 +41,7 @@ pub struct LogSyncManager {
|
|||||||
/// To broadcast events to handle in advance.
|
/// To broadcast events to handle in advance.
|
||||||
event_send: broadcast::Sender<LogSyncEvent>,
|
event_send: broadcast::Sender<LogSyncEvent>,
|
||||||
|
|
||||||
block_hash_cache: Arc<RwLock<BTreeMap<u64, BlockHashAndSubmissionIndex>>>,
|
block_hash_cache: Arc<RwLock<BTreeMap<u64, Option<BlockHashAndSubmissionIndex>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LogSyncManager {
|
impl LogSyncManager {
|
||||||
@ -85,6 +85,7 @@ impl LogSyncManager {
|
|||||||
.await
|
.await
|
||||||
.get_block_hashes()?
|
.get_block_hashes()?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
.map(|(x, y)| (x, Some(y)))
|
||||||
.collect::<BTreeMap<_, _>>(),
|
.collect::<BTreeMap<_, _>>(),
|
||||||
));
|
));
|
||||||
let mut log_sync_manager = Self {
|
let mut log_sync_manager = Self {
|
||||||
@ -196,7 +197,7 @@ impl LogSyncManager {
|
|||||||
.get(&start_block_number)
|
.get(&start_block_number)
|
||||||
{
|
{
|
||||||
// special case avoid reorg
|
// special case avoid reorg
|
||||||
submission_idx = b.first_submission_index;
|
submission_idx = b.as_ref().unwrap().first_submission_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
let parent_block_number = start_block_number.saturating_sub(1);
|
let parent_block_number = start_block_number.saturating_sub(1);
|
||||||
@ -206,7 +207,7 @@ impl LogSyncManager {
|
|||||||
.await
|
.await
|
||||||
.get(&parent_block_number)
|
.get(&parent_block_number)
|
||||||
{
|
{
|
||||||
Some(b) => b.block_hash,
|
Some(b) => b.as_ref().unwrap().block_hash,
|
||||||
_ => log_sync_manager
|
_ => log_sync_manager
|
||||||
.log_fetcher
|
.log_fetcher
|
||||||
.provider()
|
.provider()
|
||||||
@ -345,10 +346,10 @@ impl LogSyncManager {
|
|||||||
if first_submission_index.is_some() {
|
if first_submission_index.is_some() {
|
||||||
self.block_hash_cache.write().await.insert(
|
self.block_hash_cache.write().await.insert(
|
||||||
block_number,
|
block_number,
|
||||||
BlockHashAndSubmissionIndex {
|
Some(BlockHashAndSubmissionIndex {
|
||||||
block_hash,
|
block_hash,
|
||||||
first_submission_index: first_submission_index.unwrap(),
|
first_submission_index: first_submission_index.unwrap(),
|
||||||
},
|
}),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user