mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2024-11-10 10:05:17 +00:00
Compare commits
7 Commits
6fc43cc95f
...
c83541339f
Author | SHA1 | Date | |
---|---|---|---|
|
c83541339f | ||
|
29fcc415a6 | ||
|
bf3694d138 | ||
|
ec21c6fce4 | ||
|
7ef1a73c7c | ||
|
9028a30e9d | ||
|
3610b30bc6 |
@ -238,6 +238,10 @@ impl PoraService {
|
||||
return Err("too many mine shards");
|
||||
}
|
||||
|
||||
if puzzle.context.flow_length <= U256::one() {
|
||||
return Err("no data submitted");
|
||||
}
|
||||
|
||||
if self.mine_range.shard_config.num_shard as u64 > puzzle.context.flow_length.as_u64() {
|
||||
return Err("Not enough flow length to shard");
|
||||
}
|
||||
|
@ -90,6 +90,14 @@ impl FlowStore {
|
||||
}
|
||||
self.db.delete_batch_list(batch_list)
|
||||
}
|
||||
|
||||
pub fn get_raw_batch(&self, batch_index: u64) -> Result<Option<EntryBatch>> {
|
||||
self.db.get_entry_batch(batch_index)
|
||||
}
|
||||
|
||||
pub fn get_batch_root(&self, batch_index: u64) -> Result<Option<DataRoot>> {
|
||||
self.db.get_batch_root(batch_index)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@ -575,6 +583,13 @@ impl FlowDBStore {
|
||||
}
|
||||
Ok(self.kvdb.write(tx)?)
|
||||
}
|
||||
|
||||
fn get_batch_root(&self, batch_index: u64) -> Result<Option<DataRoot>> {
|
||||
Ok(self
|
||||
.kvdb
|
||||
.get(COL_ENTRY_BATCH_ROOT, &batch_index.to_be_bytes())?
|
||||
.map(|v| DataRoot::from_slice(&v)))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(DeriveEncode, DeriveDecode, Clone, Debug)]
|
||||
|
@ -6,6 +6,7 @@ use std::mem;
|
||||
use tracing::error;
|
||||
use zgs_spec::{BYTES_PER_LOAD, BYTES_PER_SECTOR, SECTORS_PER_LOAD, SECTORS_PER_SEAL};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum EntryBatchData {
|
||||
Complete(Vec<u8>),
|
||||
/// All `PartialBatch`s are ordered based on `start_index`.
|
||||
|
@ -23,7 +23,7 @@ use super::SealAnswer;
|
||||
use chunk_data::EntryBatchData;
|
||||
use seal::SealInfo;
|
||||
|
||||
#[derive(Encode, Decode)]
|
||||
#[derive(Debug, Encode, Decode)]
|
||||
pub struct EntryBatch {
|
||||
seal: SealInfo,
|
||||
// the inner data
|
||||
|
@ -19,7 +19,7 @@ pub struct SealContextInfo {
|
||||
type ChunkSealBitmap = WrappedBitmap<SEALS_PER_LOAD>;
|
||||
const_assert!(SEALS_PER_LOAD <= u128::BITS as usize);
|
||||
|
||||
#[derive(Default, DeriveEncode, DeriveDecode)]
|
||||
#[derive(Debug, Default, DeriveEncode, DeriveDecode)]
|
||||
pub struct SealInfo {
|
||||
// a bitmap specify which sealing chunks have been sealed
|
||||
bitmap: ChunkSealBitmap,
|
||||
|
@ -776,7 +776,21 @@ impl LogManager {
|
||||
.gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?,
|
||||
}
|
||||
};
|
||||
entry_proof(&top_proof, &sub_proof)
|
||||
let r = entry_proof(&top_proof, &sub_proof);
|
||||
if r.is_err() {
|
||||
let raw_batch = self.flow_store.get_raw_batch(seg_index as u64)?;
|
||||
let db_root = self.flow_store.get_batch_root(seg_index as u64)?;
|
||||
error!(
|
||||
?r,
|
||||
?raw_batch,
|
||||
?db_root,
|
||||
?seg_index,
|
||||
"gen proof error: top_leaves={}, last={}",
|
||||
merkle.pora_chunks_merkle.leaves(),
|
||||
merkle.last_chunk_merkle.leaves()
|
||||
);
|
||||
}
|
||||
r
|
||||
}
|
||||
|
||||
#[instrument(skip(self, merkle))]
|
||||
|
@ -62,6 +62,15 @@ for ((i=0; i<$NUM_NODES; i++)) do
|
||||
CONFIG_TOML=$ROOT_DIR/node$i/config/config.toml
|
||||
sed -i '/seeds = /c\seeds = ""' $CONFIG_TOML
|
||||
sed -i 's/addr_book_strict = true/addr_book_strict = false/' $CONFIG_TOML
|
||||
|
||||
# Change block time to very small
|
||||
sed -i '/timeout_propose = "3s"/c\timeout_propose = "300ms"' $CONFIG_TOML
|
||||
sed -i '/timeout_propose_delta = "500ms"/c\timeout_propose_delta = "50ms"' $CONFIG_TOML
|
||||
sed -i '/timeout_prevote = "1s"/c\timeout_prevote = "100ms"' $CONFIG_TOML
|
||||
sed -i '/timeout_prevote_delta = "500ms"/c\timeout_prevote_delta = "50ms"' $CONFIG_TOML
|
||||
sed -i '/timeout_precommit = "1s"/c\timeout_precommit = "100ms"' $CONFIG_TOML
|
||||
sed -i '/timeout_precommit_delta = "500ms"/c\timeout_precommit_delta = "50ms"' $CONFIG_TOML
|
||||
sed -i '/timeout_commit = "5s"/c\timeout_commit = "500ms"' $CONFIG_TOML
|
||||
done
|
||||
|
||||
# Update persistent_peers in config.toml
|
||||
|
@ -13,7 +13,7 @@ class MineTest(TestFramework):
|
||||
self.zgs_node_configs[0] = {
|
||||
"miner_key": GENESIS_PRIV_KEY,
|
||||
}
|
||||
self.mine_period = int(40 / self.block_time)
|
||||
self.mine_period = int(45 / self.block_time)
|
||||
self.launch_wait_seconds = 15
|
||||
self.log.info("Contract Info: Est. block time %.2f, Mine period %d", self.block_time, self.mine_period)
|
||||
|
||||
@ -35,6 +35,11 @@ class MineTest(TestFramework):
|
||||
self.log.info("flow address: %s", self.contract.address())
|
||||
self.log.info("mine address: %s", self.mine_contract.address())
|
||||
|
||||
first_block = self.contract.first_block()
|
||||
self.log.info("Current block number %d", int(blockchain.eth_blockNumber(), 16))
|
||||
self.log.info("Flow deployment block number %d, epoch 1 start %d", first_block, first_block + self.mine_period)
|
||||
wait_until(lambda: self.contract.epoch() >= 1, timeout=180)
|
||||
|
||||
quality = int(2**256 / 100 / estimate_st_performance())
|
||||
self.mine_contract.set_quality(quality)
|
||||
|
||||
@ -71,6 +76,8 @@ class MineTest(TestFramework):
|
||||
self.log.info("Wait for the third mine answer")
|
||||
wait_until(lambda: self.mine_contract.last_mined_epoch() == start_epoch + 3 and not self.mine_contract.can_submit(), timeout=180)
|
||||
|
||||
self.log.info("Current block number %d", int(blockchain.eth_blockNumber(), 16))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
MineTest(blockchain_node_type=BlockChainNodeType.BSC).main()
|
||||
|
@ -21,7 +21,7 @@ class MineTest(TestFramework):
|
||||
"shard_position": "3 / 8",
|
||||
}
|
||||
self.enable_market = True
|
||||
self.mine_period = int(45 / self.block_time)
|
||||
self.mine_period = int(50 / self.block_time)
|
||||
self.launch_wait_seconds = 15
|
||||
self.log.info("Contract Info: Est. block time %.2f, Mine period %d", self.block_time, self.mine_period)
|
||||
|
||||
@ -50,6 +50,10 @@ class MineTest(TestFramework):
|
||||
|
||||
SECTORS_PER_PRICING = int(8 * ( 2 ** 30 ) / 256)
|
||||
|
||||
first_block = self.contract.first_block()
|
||||
self.log.info("Current block number %d", int(blockchain.eth_blockNumber(), 16))
|
||||
self.log.info("Flow deployment block number %d, epoch 1 start %d, wait for epoch 1 start", first_block, first_block + self.mine_period)
|
||||
wait_until(lambda: self.contract.epoch() >= 1, timeout=180)
|
||||
|
||||
self.log.info("Submit the actual data chunk (256 MB)")
|
||||
self.submit_data(b"\x11", int(SECTORS_PER_PRICING / 32))
|
||||
@ -98,6 +102,8 @@ class MineTest(TestFramework):
|
||||
|
||||
assert_greater_than(secondReward, 100 * firstReward / (start_epoch + 1))
|
||||
|
||||
self.log.info("Current block number %d", int(blockchain.eth_blockNumber(), 16))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
MineTest(blockchain_node_type=BlockChainNodeType.BSC).main()
|
||||
|
@ -7,6 +7,7 @@ from utility.run_all import run_all
|
||||
if __name__ == "__main__":
|
||||
run_all(
|
||||
test_dir = os.path.dirname(__file__),
|
||||
slow_tests={"random_test.py", "same_root_test.py"},
|
||||
slow_tests={"mine_test.py", "random_test.py", "same_root_test.py"},
|
||||
long_manual_tests={"fuzz_test.py"},
|
||||
single_run_tests={"mine_with_market_test.py"},
|
||||
)
|
@ -30,9 +30,9 @@ class BlockChainNodeType(Enum):
|
||||
if self == BlockChainNodeType.Conflux:
|
||||
return 0.5
|
||||
elif self == BlockChainNodeType.BSC:
|
||||
return 25 / estimate_st_performance()
|
||||
return 32 / estimate_st_performance()
|
||||
else:
|
||||
return 3.0
|
||||
return 5.0
|
||||
|
||||
@unique
|
||||
class NodeType(Enum):
|
||||
|
@ -167,8 +167,8 @@ class TestFramework:
|
||||
# sync_blocks(self.blockchain_nodes)
|
||||
elif self.blockchain_node_type == BlockChainNodeType.ZG:
|
||||
# wait for the first block
|
||||
self.log.debug("Wait 3 seconds for 0gchain node to generate first block")
|
||||
time.sleep(3)
|
||||
self.log.debug("Wait for 0gchain node to generate first block")
|
||||
time.sleep(0.5)
|
||||
for node in self.blockchain_nodes:
|
||||
wait_until(lambda: node.net_peerCount() == self.num_blockchain_nodes - 1)
|
||||
wait_until(lambda: node.eth_blockNumber() is not None)
|
||||
|
@ -78,6 +78,7 @@ class ZGNode(BlockchainNode):
|
||||
"--rpc.laddr", "tcp://127.0.0.1:%s" % arrange_port(ZGNODE_PORT_CATEGORY_RPC, index),
|
||||
# overwrite pprof port: 6060
|
||||
"--rpc.pprof_laddr", "127.0.0.1:%s" % arrange_port(ZGNODE_PORT_CATEGORY_PPROF, index),
|
||||
"--log_level", "debug"
|
||||
]
|
||||
|
||||
for k, v in updated_config.items():
|
||||
|
@ -50,11 +50,15 @@ def run_single_test(py, script, test_dir, index, port_min, port_max):
|
||||
)
|
||||
except subprocess.CalledProcessError as err:
|
||||
print_testcase_result(RED, CROSS, script, start_time)
|
||||
print("Output of " + script + "\n" + err.output.decode("utf-8"), flush=True)
|
||||
try:
|
||||
print("Output of " + script + "\n" + err.output.decode("utf-8"), flush=True)
|
||||
except UnicodeDecodeError:
|
||||
print("Output of " + script + "\n", flush=True)
|
||||
print(err.output)
|
||||
raise err
|
||||
print_testcase_result(BLUE, TICK, script, start_time)
|
||||
|
||||
def run_all(test_dir: str, test_subdirs: list[str]=[], slow_tests: set[str]={}, long_manual_tests: set[str]={}):
|
||||
def run_all(test_dir: str, test_subdirs: list[str]=[], slow_tests: set[str]={}, long_manual_tests: set[str]={}, single_run_tests: set[str]={}):
|
||||
tmp_dir = os.path.join(test_dir, "tmp")
|
||||
if not os.path.exists(tmp_dir):
|
||||
os.makedirs(tmp_dir, exist_ok=True)
|
||||
@ -98,7 +102,7 @@ def run_all(test_dir: str, test_subdirs: list[str]=[], slow_tests: set[str]={},
|
||||
for file in os.listdir(subdir_path):
|
||||
if file.endswith("_test.py"):
|
||||
rel_path = os.path.join(subdir, file)
|
||||
if rel_path not in slow_tests and rel_path not in long_manual_tests:
|
||||
if rel_path not in slow_tests and rel_path not in long_manual_tests and rel_path not in single_run_tests:
|
||||
TEST_SCRIPTS.append(rel_path)
|
||||
|
||||
executor = ProcessPoolExecutor(max_workers=options.max_workers)
|
||||
@ -131,6 +135,18 @@ def run_all(test_dir: str, test_subdirs: list[str]=[], slow_tests: set[str]={},
|
||||
print("CalledProcessError " + repr(err))
|
||||
failed.add(script)
|
||||
|
||||
# Run single tests one by one
|
||||
for script in single_run_tests:
|
||||
f = executor.submit(
|
||||
run_single_test, py, script, test_dir, i, options.port_min, options.port_max
|
||||
)
|
||||
try:
|
||||
f.result()
|
||||
except subprocess.CalledProcessError as err:
|
||||
print("CalledProcessError " + repr(err))
|
||||
failed.add(script)
|
||||
i += 1
|
||||
|
||||
print("Elapsed: " + str(int(time.time() - start_time)) + " seconds", flush=True)
|
||||
|
||||
if len(failed) > 0:
|
||||
|
@ -194,9 +194,14 @@ def generate_merkle_tree_by_batch(data):
|
||||
|
||||
|
||||
def submit_data(client, data):
|
||||
shard_config = client.rpc.zgs_getShardConfig()
|
||||
shard_id = int(shard_config["shardId"])
|
||||
num_shard = int(shard_config["numShard"])
|
||||
|
||||
segments = data_to_segments(data)
|
||||
for segment in segments:
|
||||
client.zgs_upload_segment(segment)
|
||||
for index, segment in enumerate(segments):
|
||||
if index % num_shard == shard_id:
|
||||
client.zgs_upload_segment(segment)
|
||||
return segments
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user