fix: admin_getFileLocation; test: sync test (#141)

* feat: add all_shards in admin_getFileLocation

* fix: admin_getFileLocation

* test: improve sync test

* fix: lint
This commit is contained in:
MiniFrenchBread 2024-07-23 15:47:44 +08:00 committed by GitHub
parent f0c3f2cfd0
commit 533bacb234
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 19 additions and 3 deletions

View File

@ -190,6 +190,8 @@ impl RpcServer for RpcServerImpl {
tx_seq: u64, tx_seq: u64,
all_shards: bool, all_shards: bool,
) -> RpcResult<Option<Vec<LocationInfo>>> { ) -> RpcResult<Option<Vec<LocationInfo>>> {
info!("admin_getFileLocation()");
let tx = match self.ctx.log_store.get_tx_by_seq_number(tx_seq).await? { let tx = match self.ctx.log_store.get_tx_by_seq_number(tx_seq).await? {
Some(tx) => tx, Some(tx) => tx,
None => { None => {
@ -225,7 +227,9 @@ impl RpcServer for RpcServerImpl {
shard_config: shard_config.unwrap(), shard_config: shard_config.unwrap(),
}) })
.collect(); .collect();
if all_shards && all_shards_available(info.iter().map(|info| info.shard_config).collect()) {
if !all_shards || all_shards_available(info.iter().map(|info| info.shard_config).collect())
{
Ok(Some(info)) Ok(Some(info))
} else { } else {
Ok(None) Ok(None)

View File

@ -29,6 +29,9 @@ class SyncTest(TestFramework):
client1 = self.nodes[0] client1 = self.nodes[0]
client2 = self.nodes[1] client2 = self.nodes[1]
# stop client2, preventing it from receiving AnnounceFile
client2.shutdown()
# Create submission # Create submission
chunk_data = random.randbytes(256 * 1024) chunk_data = random.randbytes(256 * 1024)
data_root = self.__create_submission(chunk_data) data_root = self.__create_submission(chunk_data)
@ -41,16 +44,22 @@ class SyncTest(TestFramework):
segments = submit_data(client1, chunk_data) segments = submit_data(client1, chunk_data)
self.log.info("segments: %s", [(s["root"], s["index"], s["proof"]) for s in segments]) self.log.info("segments: %s", [(s["root"], s["index"], s["proof"]) for s in segments])
wait_until(lambda: client1.zgs_get_file_info(data_root)["finalized"]) wait_until(lambda: client1.zgs_get_file_info(data_root)["finalized"])
# File should not be auto sync on node 2 # restart client2
client2.start()
client2.wait_for_rpc_connection()
# File should not be auto sync on node 2 and there is no cached file locations
wait_until(lambda: client2.zgs_get_file_info(data_root) is not None) wait_until(lambda: client2.zgs_get_file_info(data_root) is not None)
time.sleep(3) time.sleep(3)
assert_equal(client2.zgs_get_file_info(data_root)["finalized"], False) assert_equal(client2.zgs_get_file_info(data_root)["finalized"], False)
assert(client2.admin_get_file_location(0) is None)
# Trigger file sync by rpc # Trigger file sync by rpc
assert(client2.admin_start_sync_file(0) is None) assert(client2.admin_start_sync_file(0) is None)
wait_until(lambda: client2.sync_status_is_completed_or_unknown(0)) wait_until(lambda: client2.sync_status_is_completed_or_unknown(0))
wait_until(lambda: client2.zgs_get_file_info(data_root)["finalized"]) wait_until(lambda: client2.zgs_get_file_info(data_root)["finalized"])
assert(client2.admin_get_file_location(0) is not None)
# Validate data # Validate data
assert_equal( assert_equal(

View File

@ -113,6 +113,9 @@ class ZgsNode(TestNode):
def sync_status_is_completed_or_unknown(self, tx_seq): def sync_status_is_completed_or_unknown(self, tx_seq):
status = self.rpc.admin_getSyncStatus([tx_seq]) status = self.rpc.admin_getSyncStatus([tx_seq])
return status == "Completed" or status == "unknown" return status == "Completed" or status == "unknown"
def admin_get_file_location(self, tx_seq, all_shards = True):
return self.rpc.admin_getFileLocation([tx_seq, all_shards])
def clean_data(self): def clean_data(self):
shutil.rmtree(os.path.join(self.data_dir, "db")) shutil.rmtree(os.path.join(self.data_dir, "db"))