mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2024-12-23 23:05:17 +00:00
Support mining on sharded storage. (#64)
* Change PoraHash compute * Change padSeed compute * Refactor * Support mining on sharded storage * Detect single core performance and set correct params for test * Fix clippy * Fix an overflow bug
This commit is contained in:
parent
193e154361
commit
2262bc3fb9
@ -1 +1 @@
|
||||
Subproject commit 6a9f52e8c10ff9b5cd7a5844c543c0951b97d395
|
||||
Subproject commit b11385532c3c12688bb6a279262e8e87da91a553
|
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -8374,6 +8374,7 @@ dependencies = [
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
"tracing-subscriber",
|
||||
"zgs_spec",
|
||||
"zgs_version",
|
||||
]
|
||||
|
||||
|
@ -13,6 +13,7 @@ exit-future = "0.2.0"
|
||||
futures = "0.3.21"
|
||||
file_location_cache = { path = "file_location_cache" }
|
||||
zgs_version = { path = "../common/zgs_version" }
|
||||
zgs_spec = { path = "../common/spec" }
|
||||
log_entry_sync = { path = "./log_entry_sync" }
|
||||
miner = { path = "./miner" }
|
||||
network = { path = "./network" }
|
||||
|
@ -6,6 +6,95 @@ use ethers::providers::Middleware;
|
||||
use ethers::providers::Provider;
|
||||
use ethers::signers::LocalWallet;
|
||||
use ethers::signers::Signer;
|
||||
use zgs_spec::BYTES_PER_LOAD;
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct ShardConfig {
|
||||
pub shard_group_load_chunks: usize,
|
||||
pub shard_id: usize,
|
||||
pub num_shard: usize,
|
||||
}
|
||||
|
||||
impl Default for ShardConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
shard_group_load_chunks: 1,
|
||||
shard_id: 0,
|
||||
num_shard: 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ShardConfig {
|
||||
pub fn new(
|
||||
shard_group_bytes: Option<usize>,
|
||||
shard_position: &Option<String>,
|
||||
) -> Result<Option<Self>, String> {
|
||||
let (group_bytes, (id, num)) = match (shard_group_bytes, shard_position) {
|
||||
(None, None) => {
|
||||
return Ok(None);
|
||||
}
|
||||
(Some(bytes), Some(position)) => (bytes, Self::parse_position(position)?),
|
||||
_ => {
|
||||
return Err(
|
||||
"`shard_group_bytes` and `shard_position` should be set simultaneously".into(),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
if group_bytes < BYTES_PER_LOAD || group_bytes & (group_bytes - 1) != 0 {
|
||||
return Err(format!(
|
||||
"Incorrect shard group bytes: {}, should be power of two and >= {}",
|
||||
group_bytes, BYTES_PER_LOAD
|
||||
));
|
||||
}
|
||||
|
||||
let group_chunks = group_bytes / BYTES_PER_LOAD;
|
||||
|
||||
if id >= num {
|
||||
return Err(format!(
|
||||
"Incorrect shard_id: expected [0, {}), actual {}",
|
||||
num, id
|
||||
));
|
||||
}
|
||||
|
||||
if group_chunks < num {
|
||||
return Err(format!("Incorrect shard_group_number: the shard group contains {} loading chunks, which cannot be divided into {} shards", group_chunks, num));
|
||||
}
|
||||
|
||||
Ok(Some(ShardConfig {
|
||||
shard_group_load_chunks: group_chunks,
|
||||
shard_id: id,
|
||||
num_shard: num,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn shard_chunks(&self) -> u64 {
|
||||
(self.shard_group_load_chunks / self.num_shard) as u64
|
||||
}
|
||||
|
||||
pub fn shard_mask(&self) -> u64 {
|
||||
let x = self.shard_group_load_chunks as u64 - self.shard_chunks();
|
||||
!x
|
||||
}
|
||||
|
||||
pub fn parse_position(input: &str) -> Result<(usize, usize), String> {
|
||||
let parts: Vec<&str> = input.trim().split('/').map(|s| s.trim()).collect();
|
||||
|
||||
if parts.len() != 2 {
|
||||
return Err("Incorrect format, expected like: '0 / 8'".into());
|
||||
}
|
||||
|
||||
let numerator = parts[0]
|
||||
.parse::<usize>()
|
||||
.map_err(|e| format!("Cannot parse shard position {:?}", e))?;
|
||||
let denominator = parts[1]
|
||||
.parse::<usize>()
|
||||
.map_err(|e| format!("Cannot parse shard position {:?}", e))?;
|
||||
|
||||
Ok((numerator, denominator))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MinerConfig {
|
||||
pub(crate) miner_id: Option<H256>,
|
||||
@ -16,6 +105,7 @@ pub struct MinerConfig {
|
||||
pub(crate) submission_gas: Option<U256>,
|
||||
pub(crate) cpu_percentage: u64,
|
||||
pub(crate) iter_batch: usize,
|
||||
pub(crate) shard_config: Option<ShardConfig>,
|
||||
}
|
||||
|
||||
pub type MineServiceMiddleware = SignerMiddleware<Provider<Http>, LocalWallet>;
|
||||
@ -31,6 +121,7 @@ impl MinerConfig {
|
||||
submission_gas: Option<U256>,
|
||||
cpu_percentage: u64,
|
||||
iter_batch: usize,
|
||||
shard_config: Option<ShardConfig>,
|
||||
) -> Option<MinerConfig> {
|
||||
miner_key.map(|miner_key| MinerConfig {
|
||||
miner_id,
|
||||
@ -41,6 +132,7 @@ impl MinerConfig {
|
||||
submission_gas,
|
||||
cpu_percentage,
|
||||
iter_batch,
|
||||
shard_config,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -9,13 +9,14 @@ mod loader;
|
||||
mod mine;
|
||||
mod miner_id;
|
||||
pub mod pora;
|
||||
mod recall_range;
|
||||
mod sealer;
|
||||
mod service;
|
||||
mod submitter;
|
||||
mod watcher;
|
||||
|
||||
pub use config::MinerConfig;
|
||||
pub use config::{MinerConfig, ShardConfig};
|
||||
pub use loader::PoraLoader;
|
||||
pub use mine::CustomMineRange;
|
||||
pub use mine::MineRangeConfig;
|
||||
pub use miner_id::load_miner_id;
|
||||
pub use service::{MineService, MinerMessage};
|
||||
|
@ -8,6 +8,8 @@ use tokio::time::{sleep, Duration, Instant};
|
||||
|
||||
use zgs_spec::{SECTORS_PER_LOAD, SECTORS_PER_MAX_MINING_RANGE, SECTORS_PER_PRICING};
|
||||
|
||||
use crate::recall_range::RecallRange;
|
||||
use crate::ShardConfig;
|
||||
use crate::{
|
||||
pora::{AnswerWithoutProof, Miner},
|
||||
watcher::MineContextMessage,
|
||||
@ -23,7 +25,7 @@ pub struct PoraService {
|
||||
loader: Arc<dyn PoraLoader>,
|
||||
|
||||
puzzle: Option<PoraPuzzle>,
|
||||
mine_range: CustomMineRange,
|
||||
mine_range: MineRangeConfig,
|
||||
miner_id: H256,
|
||||
|
||||
cpu_percentage: u64,
|
||||
@ -35,14 +37,15 @@ struct PoraPuzzle {
|
||||
target_quality: U256,
|
||||
}
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
pub struct CustomMineRange {
|
||||
pub struct MineRangeConfig {
|
||||
start_position: Option<u64>,
|
||||
end_position: Option<u64>,
|
||||
shard_config: Option<ShardConfig>,
|
||||
}
|
||||
|
||||
impl CustomMineRange {
|
||||
impl MineRangeConfig {
|
||||
#[inline]
|
||||
fn to_valid_range(self, context: &MineContext) -> Option<(u64, u64)> {
|
||||
fn to_valid_range(self, context: &MineContext) -> Option<RecallRange> {
|
||||
let self_start_position = self.start_position?;
|
||||
let self_end_position = self.end_position?;
|
||||
|
||||
@ -57,7 +60,15 @@ impl CustomMineRange {
|
||||
let start_position = std::cmp::min(self_start_position, minable_length - mining_length);
|
||||
let start_position =
|
||||
(start_position / SECTORS_PER_PRICING as u64) * SECTORS_PER_PRICING as u64;
|
||||
Some((start_position, mining_length))
|
||||
|
||||
Some(RecallRange {
|
||||
start_position,
|
||||
mining_length,
|
||||
shard_mask: self.shard_config.map_or(u64::MAX, |c| c.shard_mask()),
|
||||
shard_id: self
|
||||
.shard_config
|
||||
.map_or(0, |c| c.shard_id as u64 * c.shard_chunks()),
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -86,9 +97,10 @@ impl PoraService {
|
||||
) -> mpsc::UnboundedReceiver<AnswerWithoutProof> {
|
||||
let (mine_answer_sender, mine_answer_receiver) =
|
||||
mpsc::unbounded_channel::<AnswerWithoutProof>();
|
||||
let mine_range = CustomMineRange {
|
||||
let mine_range = MineRangeConfig {
|
||||
start_position: Some(0),
|
||||
end_position: Some(u64::MAX),
|
||||
shard_config: config.shard_config,
|
||||
};
|
||||
let pora = PoraService {
|
||||
mine_context_receiver,
|
||||
@ -180,19 +192,18 @@ impl PoraService {
|
||||
|
||||
#[inline]
|
||||
fn as_miner(&self) -> Option<Miner> {
|
||||
match self.puzzle.as_ref() {
|
||||
Some(puzzle) => self.mine_range.to_valid_range(&puzzle.context).map(
|
||||
|(start_position, mining_length)| Miner {
|
||||
start_position,
|
||||
mining_length,
|
||||
miner_id: &self.miner_id,
|
||||
custom_mine_range: &self.mine_range,
|
||||
context: &puzzle.context,
|
||||
target_quality: &puzzle.target_quality,
|
||||
loader: &*self.loader,
|
||||
},
|
||||
),
|
||||
_ => None,
|
||||
}
|
||||
let puzzle = self.puzzle.as_ref()?;
|
||||
|
||||
let range = self.mine_range.to_valid_range(&puzzle.context)?;
|
||||
(range.mining_length > 0).then_some(())?;
|
||||
|
||||
Some(Miner {
|
||||
range,
|
||||
miner_id: &self.miner_id,
|
||||
mine_range_config: &self.mine_range,
|
||||
context: &puzzle.context,
|
||||
target_quality: &puzzle.target_quality,
|
||||
loader: &*self.loader,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
use crate::{CustomMineRange, PoraLoader};
|
||||
use crate::recall_range::RecallRange;
|
||||
use crate::{MineRangeConfig, PoraLoader};
|
||||
use blake2::{Blake2b512, Digest};
|
||||
use contract_interface::zgs_flow::MineContext;
|
||||
use ethereum_types::{H256, U256};
|
||||
@ -18,13 +19,12 @@ fn keccak(input: impl AsRef<[u8]>) -> [u8; KECCAK256_OUTPUT_BYTES] {
|
||||
}
|
||||
|
||||
pub(crate) struct Miner<'a> {
|
||||
pub start_position: u64,
|
||||
pub mining_length: u64,
|
||||
pub range: RecallRange,
|
||||
pub miner_id: &'a H256,
|
||||
pub context: &'a MineContext,
|
||||
pub target_quality: &'a U256,
|
||||
pub loader: &'a dyn PoraLoader,
|
||||
pub custom_mine_range: &'a CustomMineRange,
|
||||
pub mine_range_config: &'a MineRangeConfig,
|
||||
}
|
||||
#[derive(Debug)]
|
||||
pub struct AnswerWithoutProof {
|
||||
@ -32,8 +32,7 @@ pub struct AnswerWithoutProof {
|
||||
pub context_flow_root: H256,
|
||||
pub nonce: H256,
|
||||
pub miner_id: H256,
|
||||
pub start_position: u64,
|
||||
pub mining_length: u64,
|
||||
pub range: RecallRange,
|
||||
pub recall_position: u64,
|
||||
pub seal_offset: usize,
|
||||
pub sealed_data: [u8; BYTES_PER_SEAL],
|
||||
@ -59,24 +58,21 @@ impl<'a> Miner<'a> {
|
||||
}
|
||||
|
||||
pub async fn iteration(&self, nonce: H256) -> Option<AnswerWithoutProof> {
|
||||
let (scratch_pad, recall_seed) = self.make_scratch_pad(&nonce);
|
||||
let ScratchPad {
|
||||
scratch_pad,
|
||||
recall_seed,
|
||||
pad_seed,
|
||||
} = self.make_scratch_pad(&nonce);
|
||||
|
||||
if self.mining_length == 0 {
|
||||
if self.range.mining_length == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let (_, recall_offset) = U256::from_big_endian(&recall_seed)
|
||||
.div_mod(U256::from((self.mining_length as usize) / SECTORS_PER_LOAD));
|
||||
let recall_offset = recall_offset.as_u64();
|
||||
if !self
|
||||
.custom_mine_range
|
||||
.is_covered(self.start_position + recall_offset * SECTORS_PER_LOAD as u64)
|
||||
.unwrap()
|
||||
{
|
||||
let recall_position = self.range.load_position(recall_seed)?;
|
||||
if !self.mine_range_config.is_covered(recall_position).unwrap() {
|
||||
trace!(
|
||||
"recall offset not in range: recall_offset={}, range={:?}",
|
||||
recall_offset,
|
||||
self.custom_mine_range
|
||||
"recall offset not in range: recall_offset={}",
|
||||
recall_position,
|
||||
);
|
||||
return None;
|
||||
}
|
||||
@ -86,7 +82,7 @@ impl<'a> Miner<'a> {
|
||||
avalibilities,
|
||||
} = self
|
||||
.loader
|
||||
.load_sealed_data(self.start_position / SECTORS_PER_LOAD as u64 + recall_offset)
|
||||
.load_sealed_data(recall_position / SECTORS_PER_LOAD as u64)
|
||||
.await?;
|
||||
|
||||
let scratch_pad: [[u8; BYTES_PER_SEAL]; BYTES_PER_SCRATCHPAD / BYTES_PER_SEAL] =
|
||||
@ -104,8 +100,9 @@ impl<'a> Miner<'a> {
|
||||
*x ^= y;
|
||||
}
|
||||
|
||||
let quality = self.pora(idx, &nonce, &sealed_data);
|
||||
if &quality <= self.target_quality {
|
||||
let quality = self.pora(idx, &sealed_data, pad_seed);
|
||||
let quality_scale = self.range.shard_mask.count_zeros();
|
||||
if quality << quality_scale <= *self.target_quality {
|
||||
debug!("Find a PoRA valid answer, quality: {}", quality);
|
||||
// Undo mix data when find a valid solition
|
||||
for (x, y) in sealed_data.iter_mut().zip(scratch_pad.iter()) {
|
||||
@ -116,11 +113,8 @@ impl<'a> Miner<'a> {
|
||||
context_flow_root: self.context.flow_root.into(),
|
||||
nonce,
|
||||
miner_id: *self.miner_id,
|
||||
start_position: self.start_position,
|
||||
mining_length: self.mining_length,
|
||||
recall_position: self.start_position
|
||||
+ recall_offset * SECTORS_PER_LOAD as u64
|
||||
+ idx as u64 * SECTORS_PER_SEAL as u64,
|
||||
range: self.range,
|
||||
recall_position: recall_position + idx as u64 * SECTORS_PER_SEAL as u64,
|
||||
seal_offset: idx,
|
||||
sealed_data,
|
||||
});
|
||||
@ -129,25 +123,18 @@ impl<'a> Miner<'a> {
|
||||
None
|
||||
}
|
||||
|
||||
fn make_scratch_pad(
|
||||
&self,
|
||||
nonce: &H256,
|
||||
) -> ([u8; BYTES_PER_SCRATCHPAD], [u8; KECCAK256_OUTPUT_BYTES]) {
|
||||
fn make_scratch_pad(&self, nonce: &H256) -> ScratchPad {
|
||||
let mut digest: [u8; BLAKE2B_OUTPUT_BYTES] = {
|
||||
let mut hasher = Blake2b512::new();
|
||||
hasher.update(self.miner_id);
|
||||
hasher.update(nonce);
|
||||
hasher.update(self.context.digest);
|
||||
|
||||
hasher.update([0u8; 24]);
|
||||
hasher.update(self.start_position.to_be_bytes());
|
||||
|
||||
hasher.update([0u8; 24]);
|
||||
hasher.update(self.mining_length.to_be_bytes());
|
||||
|
||||
hasher.update(self.range.digest());
|
||||
hasher.finalize().into()
|
||||
};
|
||||
|
||||
let pad_seed = digest;
|
||||
|
||||
let mut scratch_pad =
|
||||
[[0u8; BLAKE2B_OUTPUT_BYTES]; BYTES_PER_SCRATCHPAD / BLAKE2B_OUTPUT_BYTES];
|
||||
for scratch_pad_cell in scratch_pad.iter_mut() {
|
||||
@ -158,26 +145,27 @@ impl<'a> Miner<'a> {
|
||||
let scratch_pad: [u8; BYTES_PER_SCRATCHPAD] = unsafe { std::mem::transmute(scratch_pad) };
|
||||
let recall_seed: [u8; KECCAK256_OUTPUT_BYTES] = keccak(digest);
|
||||
|
||||
(scratch_pad, recall_seed)
|
||||
ScratchPad {
|
||||
scratch_pad,
|
||||
recall_seed,
|
||||
pad_seed,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn pora(&self, seal_index: usize, nonce: &H256, mixed_data: &[u8; BYTES_PER_SEAL]) -> U256 {
|
||||
fn pora(
|
||||
&self,
|
||||
seal_index: usize,
|
||||
mixed_data: &[u8; BYTES_PER_SEAL],
|
||||
pad_seed: [u8; BLAKE2B_OUTPUT_BYTES],
|
||||
) -> U256 {
|
||||
let mut hasher = Blake2b512::new();
|
||||
hasher.update([0u8; 24]);
|
||||
hasher.update(seal_index.to_be_bytes());
|
||||
|
||||
hasher.update(self.miner_id);
|
||||
hasher.update(nonce);
|
||||
hasher.update(self.context.digest);
|
||||
hasher.update(pad_seed);
|
||||
hasher.update([0u8; 32]);
|
||||
|
||||
hasher.update([0u8; 24]);
|
||||
hasher.update(self.start_position.to_be_bytes());
|
||||
|
||||
hasher.update([0u8; 24]);
|
||||
hasher.update(self.mining_length.to_be_bytes());
|
||||
|
||||
hasher.update([0u8; 64]);
|
||||
hasher.update(mixed_data);
|
||||
|
||||
let digest = hasher.finalize();
|
||||
@ -185,3 +173,9 @@ impl<'a> Miner<'a> {
|
||||
U256::from_big_endian(&digest[0..32])
|
||||
}
|
||||
}
|
||||
|
||||
struct ScratchPad {
|
||||
scratch_pad: [u8; BYTES_PER_SCRATCHPAD],
|
||||
recall_seed: [u8; KECCAK256_OUTPUT_BYTES],
|
||||
pad_seed: [u8; BLAKE2B_OUTPUT_BYTES],
|
||||
}
|
||||
|
52
node/miner/src/recall_range.rs
Normal file
52
node/miner/src/recall_range.rs
Normal file
@ -0,0 +1,52 @@
|
||||
use ethereum_types::U256;
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
use zgs_spec::SECTORS_PER_LOAD;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
|
||||
pub struct RecallRange {
|
||||
pub start_position: u64,
|
||||
pub mining_length: u64,
|
||||
pub shard_mask: u64,
|
||||
pub shard_id: u64,
|
||||
}
|
||||
|
||||
impl RecallRange {
|
||||
pub fn digest(&self) -> [u8; 32] {
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(&[0u8; 24]);
|
||||
hasher.update(&self.start_position.to_be_bytes());
|
||||
|
||||
hasher.update(&[0u8; 24]);
|
||||
hasher.update(&self.mining_length.to_be_bytes());
|
||||
|
||||
hasher.update(&[0u8; 24]);
|
||||
hasher.update(&self.shard_id.to_be_bytes());
|
||||
|
||||
hasher.update(&[0u8; 24]);
|
||||
hasher.update(&self.shard_mask.to_be_bytes());
|
||||
|
||||
let mut output = [0u8; 32];
|
||||
hasher.finalize(&mut output);
|
||||
output
|
||||
}
|
||||
|
||||
pub fn load_position(&self, seed: [u8; 32]) -> Option<u64> {
|
||||
let (_, origin_recall_offset) = U256::from_big_endian(&seed)
|
||||
.div_mod(U256::from((self.mining_length as usize) / SECTORS_PER_LOAD));
|
||||
let origin_recall_offset = origin_recall_offset.as_u64();
|
||||
let recall_offset = (origin_recall_offset & self.shard_mask) | self.shard_id;
|
||||
|
||||
Some(self.start_position + recall_offset * SECTORS_PER_LOAD as u64)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RecallRange> for contract_interface::RecallRange {
|
||||
fn from(value: RecallRange) -> Self {
|
||||
Self {
|
||||
start_position: value.start_position.into(),
|
||||
mine_length: value.mining_length.into(),
|
||||
shard_mask: value.shard_mask,
|
||||
shard_id: value.shard_id,
|
||||
}
|
||||
}
|
||||
}
|
@ -93,8 +93,7 @@ impl Submitter {
|
||||
context_digest: mine_answer.context_digest.0,
|
||||
nonce: mine_answer.nonce.0,
|
||||
miner_id: mine_answer.miner_id.0,
|
||||
start_position: mine_answer.start_position.into(),
|
||||
mine_length: mine_answer.mining_length.into(),
|
||||
range: mine_answer.range.into(),
|
||||
recall_position: mine_answer.recall_position.into(),
|
||||
seal_offset: mine_answer.seal_offset.into(),
|
||||
sealed_context_digest: sealed_context_digest.digest,
|
||||
|
@ -105,13 +105,6 @@ impl MineContextWatcher {
|
||||
}
|
||||
|
||||
async fn query_recent_context(&mut self) -> Result<(), String> {
|
||||
// let mut watcher = self
|
||||
// .provider
|
||||
// .watch_blocks()
|
||||
// .await
|
||||
// .expect("should success")
|
||||
// .stream();
|
||||
// watcher.next().await
|
||||
let context_call = self.flow_contract.make_context_with_result();
|
||||
let epoch_call = self.mine_contract.last_mined_epoch();
|
||||
let quality_call = self.mine_contract.target_quality();
|
||||
@ -120,21 +113,18 @@ impl MineContextWatcher {
|
||||
try_join!(context_call.call(), epoch_call.call(), quality_call.call())
|
||||
.map_err(|e| format!("Failed to query mining context: {:?}", e))?;
|
||||
let report = if context.epoch > epoch && context.digest != EMPTY_HASH.0 {
|
||||
if context.block_digest == [0; 32] {
|
||||
warn!("Mine Context is not updated on time.");
|
||||
None
|
||||
} else {
|
||||
Some((context, quality))
|
||||
}
|
||||
Some((context, quality))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if report != self.last_report {
|
||||
self.mine_context_sender
|
||||
.send(report.clone())
|
||||
.map_err(|e| format!("Failed to send out the most recent mine context: {:?}", e))?;
|
||||
if report == self.last_report {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.mine_context_sender
|
||||
.send(report.clone())
|
||||
.map_err(|e| format!("Failed to send out the most recent mine context: {:?}", e))?;
|
||||
self.last_report = report;
|
||||
|
||||
Ok(())
|
||||
|
@ -3,7 +3,7 @@
|
||||
use crate::ZgsConfig;
|
||||
use ethereum_types::{H256, U256};
|
||||
use log_entry_sync::{CacheConfig, ContractAddress, LogSyncConfig};
|
||||
use miner::MinerConfig;
|
||||
use miner::{MinerConfig, ShardConfig};
|
||||
use network::NetworkConfig;
|
||||
use rpc::RPCConfig;
|
||||
use storage::StorageConfig;
|
||||
@ -142,6 +142,9 @@ impl ZgsConfig {
|
||||
let submission_gas = self.miner_submission_gas.map(U256::from);
|
||||
let cpu_percentage = self.miner_cpu_percentage;
|
||||
let iter_batch = self.mine_iter_batch_size;
|
||||
|
||||
let shard_config = ShardConfig::new(self.shard_group_bytes, &self.shard_position)?;
|
||||
|
||||
Ok(MinerConfig::new(
|
||||
miner_id,
|
||||
miner_key,
|
||||
@ -151,6 +154,7 @@ impl ZgsConfig {
|
||||
submission_gas,
|
||||
cpu_percentage,
|
||||
iter_batch,
|
||||
shard_config,
|
||||
))
|
||||
}
|
||||
|
||||
|
@ -66,6 +66,8 @@ build_config! {
|
||||
(miner_submission_gas, (Option<u64>), None)
|
||||
(miner_cpu_percentage, (u64), 100)
|
||||
(mine_iter_batch_size, (usize), 100)
|
||||
(shard_group_bytes, (Option<usize>), None)
|
||||
(shard_position, (Option<String>), None)
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
|
@ -2,7 +2,7 @@
|
||||
from test_framework.test_framework import TestFramework
|
||||
from config.node_config import MINER_ID, GENESIS_PRIV_KEY
|
||||
from utility.submission import create_submission, submit_data
|
||||
from utility.utils import wait_until
|
||||
from utility.utils import wait_until, estimate_st_performance
|
||||
from test_framework.blockchain_node import BlockChainNodeType
|
||||
|
||||
|
||||
@ -13,8 +13,9 @@ class MineTest(TestFramework):
|
||||
self.zgs_node_configs[0] = {
|
||||
"miner_key": GENESIS_PRIV_KEY,
|
||||
}
|
||||
self.mine_period = int(45 / self.block_time)
|
||||
self.mine_period = int(20 / self.block_time)
|
||||
self.launch_wait_seconds = 15
|
||||
self.log.info("Contract Info: Est. block time %.2f, Mine period %d", self.block_time, self.mine_period)
|
||||
|
||||
def submit_data(self, item, size):
|
||||
submissions_before = self.contract.num_submissions()
|
||||
@ -34,27 +35,34 @@ class MineTest(TestFramework):
|
||||
self.log.info("flow address: %s", self.contract.address())
|
||||
self.log.info("mine address: %s", self.mine_contract.address())
|
||||
|
||||
quality = int(2**256 / 40960)
|
||||
quality = int(2**256 / 400 / estimate_st_performance())
|
||||
self.mine_contract.set_quality(quality)
|
||||
|
||||
self.log.info("Submit the first data chunk")
|
||||
self.submit_data(b"\x11", 2000)
|
||||
|
||||
start_epoch = self.contract.epoch()
|
||||
self.log.info("Submission done, current epoch is %d", start_epoch)
|
||||
|
||||
self.log.info("Wait for the first mine context release")
|
||||
wait_until(lambda: int(blockchain.eth_blockNumber(), 16) > self.mine_period, timeout=180)
|
||||
wait_until(lambda: int(blockchain.eth_blockNumber(), 16) >= start_epoch + 1, timeout=180)
|
||||
self.contract.update_context()
|
||||
|
||||
self.log.info("Wait for the first mine answer")
|
||||
wait_until(lambda: self.mine_contract.last_mined_epoch() == 1)
|
||||
|
||||
self.log.info("Wait for the second mine context release")
|
||||
wait_until(lambda: int(blockchain.eth_blockNumber(), 16) > 2 * self.mine_period, timeout=180)
|
||||
wait_until(lambda: int(blockchain.eth_blockNumber(), 16) >= start_epoch + 2, timeout=180)
|
||||
self.contract.update_context()
|
||||
|
||||
self.log.info("Wait for the second mine answer")
|
||||
wait_until(lambda: self.mine_contract.last_mined_epoch() == 2)
|
||||
|
||||
self.nodes[0].miner_stop()
|
||||
self.log.info("Wait for the third mine context release")
|
||||
wait_until(lambda: int(blockchain.eth_blockNumber(), 16) > 3 * self.mine_period, timeout=180)
|
||||
wait_until(lambda: int(blockchain.eth_blockNumber(), 16) >= start_epoch + 3, timeout=180)
|
||||
self.contract.update_context()
|
||||
|
||||
self.log.info("Submit the second data chunk")
|
||||
self.submit_data(b"\x22", 2000)
|
||||
# Now the storage node should have the latest flow, but the mining context is using an old one.
|
||||
@ -65,4 +73,4 @@ class MineTest(TestFramework):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
MineTest().main()
|
||||
MineTest(blockchain_node_type=BlockChainNodeType.BSC).main()
|
||||
|
@ -2,9 +2,9 @@
|
||||
from test_framework.test_framework import TestFramework
|
||||
from config.node_config import MINER_ID, GENESIS_PRIV_KEY
|
||||
from utility.submission import create_submission, submit_data
|
||||
from utility.utils import wait_until, assert_equal, assert_greater_than
|
||||
from utility.utils import wait_until, assert_equal, assert_greater_than, estimate_st_performance
|
||||
from test_framework.blockchain_node import BlockChainNodeType
|
||||
|
||||
import time
|
||||
|
||||
import math
|
||||
|
||||
@ -16,10 +16,13 @@ class MineTest(TestFramework):
|
||||
self.num_nodes = 1
|
||||
self.zgs_node_configs[0] = {
|
||||
"miner_key": GENESIS_PRIV_KEY,
|
||||
"shard_group_bytes": 4 * 1024 * 1024,
|
||||
"shard_position": "3 / 8",
|
||||
}
|
||||
self.enable_market = True
|
||||
self.mine_period = int(60 / self.block_time)
|
||||
self.mine_period = int(45 / self.block_time)
|
||||
self.launch_wait_seconds = 15
|
||||
self.log.info("Contract Info: Est. block time %.2f, Mine period %d", self.block_time, self.mine_period)
|
||||
|
||||
|
||||
def submit_data(self, item, size, no_submit = False):
|
||||
@ -42,7 +45,7 @@ class MineTest(TestFramework):
|
||||
self.log.info("flow address: %s", self.contract.address())
|
||||
self.log.info("mine address: %s", self.mine_contract.address())
|
||||
|
||||
quality = int(2**256 / 4096)
|
||||
quality = int(2**256 / 5 / estimate_st_performance())
|
||||
self.mine_contract.set_quality(quality)
|
||||
|
||||
SECTORS_PER_PRICING = int(8 * ( 2 ** 30 ) / 256)
|
||||
@ -52,17 +55,22 @@ class MineTest(TestFramework):
|
||||
|
||||
self.log.info("Submit the data hash only (8 GB)")
|
||||
self.submit_data(b"\x11", int(SECTORS_PER_PRICING), no_submit=True)
|
||||
# wait_until(lambda: self.contract.epoch() >= 1, timeout=180)
|
||||
|
||||
start_epoch = self.contract.epoch()
|
||||
|
||||
self.log.info("Submission Done, epoch is %d, current block number %d", start_epoch, int(blockchain.eth_blockNumber(), 16))
|
||||
|
||||
self.log.info("Sumission Done, Current block number %d", int(blockchain.eth_blockNumber(), 16))
|
||||
self.log.info("Wait for mine context release")
|
||||
wait_until(lambda: self.contract.get_mine_context()[0] > 0, timeout=180)
|
||||
wait_until(lambda: self.contract.epoch() >= start_epoch + 1, timeout=180)
|
||||
self.log.info("Current flow length: %d", self.contract.get_mine_context()[3])
|
||||
self.contract.update_context()
|
||||
|
||||
self.log.info("Wait for mine answer")
|
||||
wait_until(lambda: self.mine_contract.last_mined_epoch() == 1)
|
||||
wait_until(lambda: self.mine_contract.last_mined_epoch() == start_epoch + 1)
|
||||
|
||||
rewards = self.reward_contract.reward_distributes()
|
||||
assert_equal(len(self.reward_contract.reward_distributes()), 1)
|
||||
assert_equal(len(self.reward_contract.reward_distributes()), start_epoch + 1)
|
||||
firstReward = rewards[0].args.amount
|
||||
self.log.info("Received reward %d Gwei", firstReward / (10**9))
|
||||
|
||||
@ -70,22 +78,25 @@ class MineTest(TestFramework):
|
||||
self.log.info("Donation Done")
|
||||
self.log.info("Submit the data hash only (8 GB)")
|
||||
self.submit_data(b"\x11", int(SECTORS_PER_PRICING), no_submit=True)
|
||||
self.log.info("Sumission Done, Current block number %d", int(blockchain.eth_blockNumber(), 16))
|
||||
current_epoch = self.contract.epoch()
|
||||
assert_equal(current_epoch, start_epoch + 1);
|
||||
self.log.info("Sumission Done, epoch is %d, current block number %d", self.contract.epoch(), int(blockchain.eth_blockNumber(), 16))
|
||||
|
||||
|
||||
self.log.info("Wait for mine context release")
|
||||
wait_until(lambda: self.contract.get_mine_context()[0] > 1, timeout=180)
|
||||
wait_until(lambda: self.contract.epoch() >= start_epoch + 2, timeout=180)
|
||||
self.contract.update_context()
|
||||
|
||||
self.log.info("Wait for mine answer")
|
||||
wait_until(lambda: self.mine_contract.last_mined_epoch() == 2)
|
||||
wait_until(lambda: self.mine_contract.last_mined_epoch() == start_epoch + 2)
|
||||
rewards = self.reward_contract.reward_distributes()
|
||||
assert_equal(len(self.reward_contract.reward_distributes()), 2)
|
||||
assert_equal(len(self.reward_contract.reward_distributes()), start_epoch + 2)
|
||||
secondReward = rewards[1].args.amount
|
||||
self.log.info("Received reward %d Gwei", secondReward / (10**9))
|
||||
|
||||
assert_greater_than(secondReward, 100 * firstReward)
|
||||
assert_greater_than(secondReward, 100 * firstReward / (start_epoch + 1))
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
MineTest().main()
|
||||
MineTest(blockchain_node_type=BlockChainNodeType.BSC).main()
|
||||
|
@ -18,6 +18,7 @@ from utility.simple_rpc_proxy import SimpleRpcProxy
|
||||
from utility.utils import (
|
||||
initialize_config,
|
||||
wait_until,
|
||||
estimate_st_performance
|
||||
)
|
||||
from test_framework.contracts import load_contract_metadata
|
||||
|
||||
@ -32,7 +33,7 @@ class BlockChainNodeType(Enum):
|
||||
if self == BlockChainNodeType.Conflux:
|
||||
return 0.5
|
||||
elif self == BlockChainNodeType.BSC:
|
||||
return 0.25
|
||||
return 25 / estimate_st_performance()
|
||||
else:
|
||||
return 3.0
|
||||
|
||||
|
@ -27,7 +27,7 @@ class ContractProxy:
|
||||
assert node_idx < len(self.blockchain_nodes)
|
||||
|
||||
contract = self._get_contract(node_idx)
|
||||
return getattr(contract.functions, fn_name)(**args).transact(TX_PARAMS)
|
||||
return getattr(contract.functions, fn_name)(**args).transact(copy(TX_PARAMS))
|
||||
|
||||
def _logs(self, event_name, node_idx, **args):
|
||||
assert node_idx < len(self.blockchain_nodes)
|
||||
@ -37,7 +37,7 @@ class ContractProxy:
|
||||
return getattr(contract.events, event_name).create_filter(fromBlock =0, toBlock="latest").get_all_entries()
|
||||
|
||||
def transfer(self, value, node_idx = 0):
|
||||
tx_params = TX_PARAMS
|
||||
tx_params = copy(TX_PARAMS)
|
||||
tx_params["value"] = value
|
||||
|
||||
contract = self._get_contract(node_idx)
|
||||
@ -53,7 +53,7 @@ class FlowContractProxy(ContractProxy):
|
||||
):
|
||||
assert node_idx < len(self.blockchain_nodes)
|
||||
|
||||
combined_tx_prarams = TX_PARAMS
|
||||
combined_tx_prarams = copy(TX_PARAMS)
|
||||
|
||||
if tx_prarams is not None:
|
||||
combined_tx_prarams.update(tx_prarams)
|
||||
@ -77,7 +77,10 @@ class FlowContractProxy(ContractProxy):
|
||||
return self._call("firstBlock", node_idx)
|
||||
|
||||
def epoch(self, node_idx=0):
|
||||
return self._call("epoch", node_idx)
|
||||
return self.get_mine_context(node_idx)[0]
|
||||
|
||||
def update_context(self, node_idx=0):
|
||||
return self._send("makeContext", node_idx)
|
||||
|
||||
def get_mine_context(self, node_idx=0):
|
||||
return self._call("makeContextWithResult", node_idx)
|
||||
|
@ -4,6 +4,7 @@ import os
|
||||
import platform
|
||||
import rtoml
|
||||
import time
|
||||
import sha3
|
||||
|
||||
from config.node_config import ZGS_CONFIG
|
||||
from eth_utils import encode_hex
|
||||
@ -130,3 +131,12 @@ def assert_greater_than(thing1, thing2):
|
||||
def assert_greater_than_or_equal(thing1, thing2):
|
||||
if thing1 < thing2:
|
||||
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
|
||||
|
||||
# 14900K has the performance point 100
|
||||
def estimate_st_performance():
|
||||
hasher = sha3.keccak_256()
|
||||
input = b"\xcc" * (1<<26)
|
||||
start_time = time.perf_counter()
|
||||
hasher.update(input)
|
||||
digest = hasher.hexdigest()
|
||||
return 10 / (time.perf_counter() - start_time)
|
Loading…
Reference in New Issue
Block a user