Change db_max_num_chunks to db_max_num_sectors. (#137)

* Change db_max_num_chunks to db_max_num_sectors.

* Update tests and config files.

* Revert contract change.
This commit is contained in:
peilun-conflux 2024-07-29 22:31:19 +08:00 committed by GitHub
parent ae9c52c0e6
commit dbd865fded
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 14 additions and 14 deletions

View File

@ -17,7 +17,7 @@ const PRUNE_THRESHOLD: f32 = 0.9;
pub struct PrunerConfig {
pub shard_config: ShardConfig,
pub db_path: PathBuf,
pub max_num_chunks: usize,
pub max_num_sectors: usize,
pub check_time: Duration,
pub batch_size: usize,
pub batch_wait_time: Duration,
@ -25,7 +25,7 @@ pub struct PrunerConfig {
impl PrunerConfig {
fn start_prune_size(&self) -> u64 {
(self.max_num_chunks as f32 * PRUNE_THRESHOLD) as u64
(self.max_num_sectors as f32 * PRUNE_THRESHOLD) as u64
}
}

View File

@ -201,12 +201,12 @@ impl ZgsConfig {
}
pub fn pruner_config(&self) -> Result<Option<PrunerConfig>, String> {
if let Some(max_num_chunks) = self.db_max_num_chunks {
if let Some(max_num_sectors) = self.db_max_num_sectors {
let shard_config = self.shard_config()?;
Ok(Some(PrunerConfig {
shard_config,
db_path: self.db_dir.clone().into(),
max_num_chunks,
max_num_sectors,
check_time: Duration::from_secs(self.prune_check_time_s),
batch_size: self.prune_batch_size,
batch_wait_time: Duration::from_millis(self.prune_batch_wait_time_ms),

View File

@ -63,7 +63,7 @@ build_config! {
// db
(db_dir, (String), "db".to_string())
(db_max_num_chunks, (Option<usize>), None)
(db_max_num_sectors, (Option<usize>), None)
(prune_check_time_s, (u64), 60)
(prune_batch_size, (usize), 1024)
(prune_batch_wait_time_ms, (u64), 1000)

View File

@ -194,11 +194,11 @@ mine_contract_address = "0x85F6722319538A805ED5733c5F4882d96F1C7384"
#######################################################################
# The max number of chunk entries to store in db.
# Each entry is 256B, so the db size is roughly limited to
# `256 * db_max_num_chunks` Bytes.
# `256 * db_max_num_sectors` Bytes.
# If this limit is reached, the node will update its `shard_position`
# and store only half data.
#
# db_max_num_chunks = 1000000000
# db_max_num_sectors = 1000000000
# The format is <shard_id>/<shard_number>, where the shard number is 2^n.
# This only applies if there is no stored shard config in db.

View File

@ -194,11 +194,11 @@
#######################################################################
# The max number of chunk entries to store in db.
# Each entry is 256B, so the db size is roughly limited to
# `256 * db_max_num_chunks` Bytes.
# `256 * db_max_num_sectors` Bytes.
# If this limit is reached, the node will update its `shard_position`
# and store only half data.
#
# db_max_num_chunks = 1000000000
# db_max_num_sectors = 1000000000
# The format is <shard_id>/<shard_number>, where the shard number is 2^n.
# This only applies if there is no stored shard config in db.

View File

@ -16,7 +16,7 @@ class MineTest(TestFramework):
self.num_blockchain_nodes = 1
self.num_nodes = 1
self.zgs_node_configs[0] = {
"db_max_num_chunks": 2**30,
"db_max_num_sectors": 2**30,
"miner_key": GENESIS_PRIV_KEY,
"shard_position": "3 / 8",
}

View File

@ -12,7 +12,7 @@ class PrunerTest(TestFramework):
self.num_blockchain_nodes = 1
self.num_nodes = 1
self.zgs_node_configs[0] = {
"db_max_num_chunks": 16 * 1024,
"db_max_num_sectors": 16 * 1024,
"prune_check_time_s": 1,
"prune_batch_wait_time_ms": 10,
}

View File

@ -12,15 +12,15 @@ class PrunerTest(TestFramework):
self.num_blockchain_nodes = 1
self.num_nodes = 4
self.zgs_node_configs[0] = {
"db_max_num_chunks": 2 ** 30,
"db_max_num_sectors": 2 ** 30,
"shard_position": "0/2"
}
self.zgs_node_configs[1] = {
"db_max_num_chunks": 2 ** 30,
"db_max_num_sectors": 2 ** 30,
"shard_position": "1/2"
}
self.zgs_node_configs[3] = {
"db_max_num_chunks": 2 ** 30,
"db_max_num_sectors": 2 ** 30,
"shard_position": "1/4"
}