mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2024-11-10 10:05:17 +00:00
Compare commits
4 Commits
adcc1a7f0d
...
4127333297
Author | SHA1 | Date | |
---|---|---|---|
|
4127333297 | ||
|
b82183bf09 | ||
|
e75f58c1ee | ||
|
0734ec9886 |
@ -1,6 +1,6 @@
|
||||
use ethers::prelude::{Filter, JsonRpcClient, Log, Middleware, Provider, ProviderError, U64};
|
||||
use futures_core::stream::Stream;
|
||||
use jsonrpsee::tracing::trace;
|
||||
use jsonrpsee::tracing::{debug, error, trace};
|
||||
use std::future::Future;
|
||||
use std::time::Duration;
|
||||
use std::{
|
||||
@ -152,6 +152,7 @@ where
|
||||
}
|
||||
}
|
||||
LogQueryState::LoadLogs((from_block, fut)) => {
|
||||
debug!("LoadLogs: loading logs from block={:?}", from_block);
|
||||
match futures_util::ready!(fut.as_mut().poll(ctx)) {
|
||||
Ok(logs) => {
|
||||
self.current_logs = VecDeque::from(logs);
|
||||
@ -163,6 +164,10 @@ where
|
||||
if err.to_string().contains(msg) {
|
||||
self.from_block = *from_block;
|
||||
self.page_size /= 2;
|
||||
error!(
|
||||
"log_query: page size to large at {} reducing page size to {}",
|
||||
self.from_block.unwrap(), self.page_size
|
||||
);
|
||||
rewake_with_new_state!(ctx, self, LogQueryState::Consume);
|
||||
}
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ use anyhow::{anyhow, bail, Result};
|
||||
use ethereum_types::H256;
|
||||
use ethers::{prelude::Middleware, types::BlockNumber};
|
||||
use futures::FutureExt;
|
||||
use jsonrpsee::tracing::{debug, error, trace, warn};
|
||||
use jsonrpsee::tracing::{debug, error, warn};
|
||||
use shared_types::{ChunkArray, Transaction};
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::Debug;
|
||||
@ -56,7 +56,7 @@ impl LogSyncManager {
|
||||
store: Arc<dyn Store>,
|
||||
) -> Result<(broadcast::Sender<LogSyncEvent>, oneshot::Receiver<()>)> {
|
||||
let next_tx_seq = store.next_tx_seq();
|
||||
|
||||
debug!("LogSyncManager spawn next_tx_seq: {}", next_tx_seq);
|
||||
let executor_clone = executor.clone();
|
||||
let mut shutdown_sender = executor.shutdown_sender();
|
||||
|
||||
@ -251,8 +251,8 @@ impl LogSyncManager {
|
||||
}
|
||||
std::cmp::Ordering::Greater => {
|
||||
error!(
|
||||
"Unexpected transaction seq: next={} get={}",
|
||||
self.next_tx_seq, tx.seq
|
||||
"Unexpected transaction tx={:?}",
|
||||
tx,
|
||||
);
|
||||
false
|
||||
}
|
||||
@ -298,7 +298,7 @@ impl LogSyncManager {
|
||||
|
||||
async fn handle_data(&mut self, mut rx: UnboundedReceiver<LogFetchProgress>) -> Result<()> {
|
||||
while let Some(data) = rx.recv().await {
|
||||
trace!("handle_data: data={:?}", data);
|
||||
debug!("handle_data: data={:?}", data);
|
||||
match data {
|
||||
LogFetchProgress::SyncedBlock((
|
||||
block_number,
|
||||
@ -387,7 +387,7 @@ impl LogSyncManager {
|
||||
}
|
||||
}
|
||||
self.data_cache.garbage_collect(self.next_tx_seq);
|
||||
self.next_tx_seq += 1;
|
||||
self.next_tx_seq = self.store.next_tx_seq();
|
||||
true
|
||||
}
|
||||
}
|
||||
@ -427,6 +427,7 @@ impl LogSyncManager {
|
||||
&executor_clone,
|
||||
Duration::from_millis(self.config.recover_query_delay),
|
||||
);
|
||||
self.next_tx_seq = self.store.next_tx_seq();
|
||||
self.handle_data(recover_rx).await?;
|
||||
}
|
||||
|
||||
@ -437,6 +438,7 @@ impl LogSyncManager {
|
||||
self.config.default_finalized_block_count,
|
||||
self.config.remove_finalized_block_interval_minutes,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user