Compare commits

...

7 Commits

Author SHA1 Message Date
Bo QIU
c83541339f
Merge ec21c6fce4 into 29fcc415a6 2024-09-06 17:56:27 +08:00
peilun-conflux
29fcc415a6
Add log for proof generation errors. (#182)
Some checks failed
abi-consistent-check / build-and-compare (push) Has been cancelled
code-coverage / unittest-cov (push) Has been cancelled
rust / check (push) Has been cancelled
rust / test (push) Has been cancelled
rust / lints (push) Has been cancelled
functional-test / test (push) Has been cancelled
2024-09-06 17:56:04 +08:00
bruno-valante
bf3694d138
Update mine test process to avoid random bugs on low-performance devices (#184) 2024-09-06 17:53:00 +08:00
boqiu
ec21c6fce4 run mine with market test standalone 2024-09-05 15:42:55 +08:00
boqiu
7ef1a73c7c Fix utf-8 encoding err 2024-09-05 15:25:53 +08:00
boqiu
9028a30e9d upload segment in right shard id 2024-09-05 14:54:00 +08:00
boqiu
3610b30bc6 Change zg chain block time in python tests 2024-09-05 11:21:19 +08:00
15 changed files with 94 additions and 15 deletions

View File

@ -238,6 +238,10 @@ impl PoraService {
return Err("too many mine shards");
}
if puzzle.context.flow_length <= U256::one() {
return Err("no data submitted");
}
if self.mine_range.shard_config.num_shard as u64 > puzzle.context.flow_length.as_u64() {
return Err("Not enough flow length to shard");
}

View File

@ -90,6 +90,14 @@ impl FlowStore {
}
self.db.delete_batch_list(batch_list)
}
pub fn get_raw_batch(&self, batch_index: u64) -> Result<Option<EntryBatch>> {
self.db.get_entry_batch(batch_index)
}
pub fn get_batch_root(&self, batch_index: u64) -> Result<Option<DataRoot>> {
self.db.get_batch_root(batch_index)
}
}
#[derive(Clone, Debug)]
@ -575,6 +583,13 @@ impl FlowDBStore {
}
Ok(self.kvdb.write(tx)?)
}
fn get_batch_root(&self, batch_index: u64) -> Result<Option<DataRoot>> {
Ok(self
.kvdb
.get(COL_ENTRY_BATCH_ROOT, &batch_index.to_be_bytes())?
.map(|v| DataRoot::from_slice(&v)))
}
}
#[derive(DeriveEncode, DeriveDecode, Clone, Debug)]

View File

@ -6,6 +6,7 @@ use std::mem;
use tracing::error;
use zgs_spec::{BYTES_PER_LOAD, BYTES_PER_SECTOR, SECTORS_PER_LOAD, SECTORS_PER_SEAL};
#[derive(Debug)]
pub enum EntryBatchData {
Complete(Vec<u8>),
/// All `PartialBatch`s are ordered based on `start_index`.

View File

@ -23,7 +23,7 @@ use super::SealAnswer;
use chunk_data::EntryBatchData;
use seal::SealInfo;
#[derive(Encode, Decode)]
#[derive(Debug, Encode, Decode)]
pub struct EntryBatch {
seal: SealInfo,
// the inner data

View File

@ -19,7 +19,7 @@ pub struct SealContextInfo {
type ChunkSealBitmap = WrappedBitmap<SEALS_PER_LOAD>;
const_assert!(SEALS_PER_LOAD <= u128::BITS as usize);
#[derive(Default, DeriveEncode, DeriveDecode)]
#[derive(Debug, Default, DeriveEncode, DeriveDecode)]
pub struct SealInfo {
// a bitmap specify which sealing chunks have been sealed
bitmap: ChunkSealBitmap,

View File

@ -776,7 +776,21 @@ impl LogManager {
.gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?,
}
};
entry_proof(&top_proof, &sub_proof)
let r = entry_proof(&top_proof, &sub_proof);
if r.is_err() {
let raw_batch = self.flow_store.get_raw_batch(seg_index as u64)?;
let db_root = self.flow_store.get_batch_root(seg_index as u64)?;
error!(
?r,
?raw_batch,
?db_root,
?seg_index,
"gen proof error: top_leaves={}, last={}",
merkle.pora_chunks_merkle.leaves(),
merkle.last_chunk_merkle.leaves()
);
}
r
}
#[instrument(skip(self, merkle))]

View File

@ -62,6 +62,15 @@ for ((i=0; i<$NUM_NODES; i++)) do
CONFIG_TOML=$ROOT_DIR/node$i/config/config.toml
sed -i '/seeds = /c\seeds = ""' $CONFIG_TOML
sed -i 's/addr_book_strict = true/addr_book_strict = false/' $CONFIG_TOML
# Change block time to very small
sed -i '/timeout_propose = "3s"/c\timeout_propose = "300ms"' $CONFIG_TOML
sed -i '/timeout_propose_delta = "500ms"/c\timeout_propose_delta = "50ms"' $CONFIG_TOML
sed -i '/timeout_prevote = "1s"/c\timeout_prevote = "100ms"' $CONFIG_TOML
sed -i '/timeout_prevote_delta = "500ms"/c\timeout_prevote_delta = "50ms"' $CONFIG_TOML
sed -i '/timeout_precommit = "1s"/c\timeout_precommit = "100ms"' $CONFIG_TOML
sed -i '/timeout_precommit_delta = "500ms"/c\timeout_precommit_delta = "50ms"' $CONFIG_TOML
sed -i '/timeout_commit = "5s"/c\timeout_commit = "500ms"' $CONFIG_TOML
done
# Update persistent_peers in config.toml

View File

@ -13,7 +13,7 @@ class MineTest(TestFramework):
self.zgs_node_configs[0] = {
"miner_key": GENESIS_PRIV_KEY,
}
self.mine_period = int(40 / self.block_time)
self.mine_period = int(45 / self.block_time)
self.launch_wait_seconds = 15
self.log.info("Contract Info: Est. block time %.2f, Mine period %d", self.block_time, self.mine_period)
@ -35,6 +35,11 @@ class MineTest(TestFramework):
self.log.info("flow address: %s", self.contract.address())
self.log.info("mine address: %s", self.mine_contract.address())
first_block = self.contract.first_block()
self.log.info("Current block number %d", int(blockchain.eth_blockNumber(), 16))
self.log.info("Flow deployment block number %d, epoch 1 start %d", first_block, first_block + self.mine_period)
wait_until(lambda: self.contract.epoch() >= 1, timeout=180)
quality = int(2**256 / 100 / estimate_st_performance())
self.mine_contract.set_quality(quality)
@ -71,6 +76,8 @@ class MineTest(TestFramework):
self.log.info("Wait for the third mine answer")
wait_until(lambda: self.mine_contract.last_mined_epoch() == start_epoch + 3 and not self.mine_contract.can_submit(), timeout=180)
self.log.info("Current block number %d", int(blockchain.eth_blockNumber(), 16))
if __name__ == "__main__":
MineTest(blockchain_node_type=BlockChainNodeType.BSC).main()

View File

@ -21,7 +21,7 @@ class MineTest(TestFramework):
"shard_position": "3 / 8",
}
self.enable_market = True
self.mine_period = int(45 / self.block_time)
self.mine_period = int(50 / self.block_time)
self.launch_wait_seconds = 15
self.log.info("Contract Info: Est. block time %.2f, Mine period %d", self.block_time, self.mine_period)
@ -50,6 +50,10 @@ class MineTest(TestFramework):
SECTORS_PER_PRICING = int(8 * ( 2 ** 30 ) / 256)
first_block = self.contract.first_block()
self.log.info("Current block number %d", int(blockchain.eth_blockNumber(), 16))
self.log.info("Flow deployment block number %d, epoch 1 start %d, wait for epoch 1 start", first_block, first_block + self.mine_period)
wait_until(lambda: self.contract.epoch() >= 1, timeout=180)
self.log.info("Submit the actual data chunk (256 MB)")
self.submit_data(b"\x11", int(SECTORS_PER_PRICING / 32))
@ -98,6 +102,8 @@ class MineTest(TestFramework):
assert_greater_than(secondReward, 100 * firstReward / (start_epoch + 1))
self.log.info("Current block number %d", int(blockchain.eth_blockNumber(), 16))
if __name__ == "__main__":
MineTest(blockchain_node_type=BlockChainNodeType.BSC).main()

View File

@ -7,6 +7,7 @@ from utility.run_all import run_all
if __name__ == "__main__":
run_all(
test_dir = os.path.dirname(__file__),
slow_tests={"random_test.py", "same_root_test.py"},
slow_tests={"mine_test.py", "random_test.py", "same_root_test.py"},
long_manual_tests={"fuzz_test.py"},
single_run_tests={"mine_with_market_test.py"},
)

View File

@ -30,9 +30,9 @@ class BlockChainNodeType(Enum):
if self == BlockChainNodeType.Conflux:
return 0.5
elif self == BlockChainNodeType.BSC:
return 25 / estimate_st_performance()
return 32 / estimate_st_performance()
else:
return 3.0
return 5.0
@unique
class NodeType(Enum):

View File

@ -167,8 +167,8 @@ class TestFramework:
# sync_blocks(self.blockchain_nodes)
elif self.blockchain_node_type == BlockChainNodeType.ZG:
# wait for the first block
self.log.debug("Wait 3 seconds for 0gchain node to generate first block")
time.sleep(3)
self.log.debug("Wait for 0gchain node to generate first block")
time.sleep(0.5)
for node in self.blockchain_nodes:
wait_until(lambda: node.net_peerCount() == self.num_blockchain_nodes - 1)
wait_until(lambda: node.eth_blockNumber() is not None)

View File

@ -78,6 +78,7 @@ class ZGNode(BlockchainNode):
"--rpc.laddr", "tcp://127.0.0.1:%s" % arrange_port(ZGNODE_PORT_CATEGORY_RPC, index),
# overwrite pprof port: 6060
"--rpc.pprof_laddr", "127.0.0.1:%s" % arrange_port(ZGNODE_PORT_CATEGORY_PPROF, index),
"--log_level", "debug"
]
for k, v in updated_config.items():

View File

@ -50,11 +50,15 @@ def run_single_test(py, script, test_dir, index, port_min, port_max):
)
except subprocess.CalledProcessError as err:
print_testcase_result(RED, CROSS, script, start_time)
print("Output of " + script + "\n" + err.output.decode("utf-8"), flush=True)
try:
print("Output of " + script + "\n" + err.output.decode("utf-8"), flush=True)
except UnicodeDecodeError:
print("Output of " + script + "\n", flush=True)
print(err.output)
raise err
print_testcase_result(BLUE, TICK, script, start_time)
def run_all(test_dir: str, test_subdirs: list[str]=[], slow_tests: set[str]={}, long_manual_tests: set[str]={}):
def run_all(test_dir: str, test_subdirs: list[str]=[], slow_tests: set[str]={}, long_manual_tests: set[str]={}, single_run_tests: set[str]={}):
tmp_dir = os.path.join(test_dir, "tmp")
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir, exist_ok=True)
@ -98,7 +102,7 @@ def run_all(test_dir: str, test_subdirs: list[str]=[], slow_tests: set[str]={},
for file in os.listdir(subdir_path):
if file.endswith("_test.py"):
rel_path = os.path.join(subdir, file)
if rel_path not in slow_tests and rel_path not in long_manual_tests:
if rel_path not in slow_tests and rel_path not in long_manual_tests and rel_path not in single_run_tests:
TEST_SCRIPTS.append(rel_path)
executor = ProcessPoolExecutor(max_workers=options.max_workers)
@ -131,6 +135,18 @@ def run_all(test_dir: str, test_subdirs: list[str]=[], slow_tests: set[str]={},
print("CalledProcessError " + repr(err))
failed.add(script)
# Run single tests one by one
for script in single_run_tests:
f = executor.submit(
run_single_test, py, script, test_dir, i, options.port_min, options.port_max
)
try:
f.result()
except subprocess.CalledProcessError as err:
print("CalledProcessError " + repr(err))
failed.add(script)
i += 1
print("Elapsed: " + str(int(time.time() - start_time)) + " seconds", flush=True)
if len(failed) > 0:

View File

@ -194,9 +194,14 @@ def generate_merkle_tree_by_batch(data):
def submit_data(client, data):
shard_config = client.rpc.zgs_getShardConfig()
shard_id = int(shard_config["shardId"])
num_shard = int(shard_config["numShard"])
segments = data_to_segments(data)
for segment in segments:
client.zgs_upload_segment(segment)
for index, segment in enumerate(segments):
if index % num_shard == shard_id:
client.zgs_upload_segment(segment)
return segments