-
Notifications
You must be signed in to change notification settings - Fork 42
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Feat(e2e): support multiple aggregators in the e2e tests #2378
base: main
Are you sure you want to change the base?
Changes from all commits
7b05361
99f5881
19b4507
6e4b6b9
4e22900
6906938
7e3ac58
7ce4531
59233ef
932360d
40f32af
e9e0d2f
ed50cc8
b6e18ea
f7c754c
1c7f425
80c2021
bd12065
8bd1e56
be5f9ab
4731590
cad83fb
0b21674
42b4d01
752292f
124810c
33ddc53
366f910
618f376
9e47514
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -143,7 +143,6 @@ impl AggregatorRuntime { | |
info!(self.logger, "→ Trying to transition to READY"; "last_time_point" => ?last_time_point); | ||
|
||
let can_try_transition_from_idle_to_ready = if self.config.is_slave { | ||
println!("Checking if slave aggregator is at the same epoch as master"); | ||
self.runner | ||
.is_slave_aggregator_at_same_epoch_as_master(&last_time_point) | ||
.await? | ||
|
@@ -265,18 +264,19 @@ impl AggregatorRuntime { | |
self.runner | ||
.update_stake_distribution(&new_time_point) | ||
.await?; | ||
if self.config.is_slave { | ||
self.runner | ||
.synchronize_slave_aggregator_signer_registration() | ||
.await?; | ||
} | ||
self.runner.inform_new_epoch(new_time_point.epoch).await?; | ||
|
||
self.runner.upkeep(new_time_point.epoch).await?; | ||
self.runner | ||
.open_signer_registration_round(&new_time_point) | ||
.await?; | ||
self.runner.update_epoch_settings().await?; | ||
if self.config.is_slave { | ||
self.runner | ||
.synchronize_slave_aggregator_signer_registration() | ||
.await?; | ||
// Needed to recompute epoch data for the next signing round on the slave | ||
self.runner.inform_new_epoch(new_time_point.epoch).await?; | ||
} | ||
self.runner.precompute_epoch_data().await?; | ||
} | ||
|
||
|
@@ -940,7 +940,7 @@ mod tests { | |
runner | ||
.expect_inform_new_epoch() | ||
.with(predicate::eq(new_time_point_clone.clone().epoch)) | ||
.once() | ||
.times(2) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why we need to change number of calls ? |
||
.returning(|_| Ok(())); | ||
runner | ||
.expect_update_epoch_settings() | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This test is really heavy to read and to execute (16s on the github actions ci).
|
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,6 +2,7 @@ use crate::{ | |
p2p::{Peer, PeerEvent}, | ||
repeater::MessageRepeater, | ||
}; | ||
use clap::ValueEnum; | ||
use libp2p::Multiaddr; | ||
use mithril_common::{ | ||
logging::LoggerExtensions, | ||
|
@@ -11,9 +12,37 @@ use mithril_common::{ | |
}; | ||
use slog::{debug, info, Logger}; | ||
use std::{net::SocketAddr, sync::Arc, time::Duration}; | ||
use strum::Display; | ||
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; | ||
use warp::Filter; | ||
|
||
/// Signer relay mode | ||
/// | ||
/// The relay mode defines how the relay will behave when it receives a message | ||
#[derive(Debug, Clone, Display, PartialEq, Eq, ValueEnum)] | ||
#[strum(serialize_all = "mixed_case")] | ||
pub enum SignerRelayMode { | ||
/// Passthrough relay mode | ||
/// | ||
/// In this mode, the relay will only call the aggregator with the message received | ||
Passthrough, | ||
/// P2P relay mode | ||
/// | ||
/// In this mode, the relay will publish the message received to the P2P network | ||
P2P, | ||
} | ||
|
||
struct HTTPServerConfiguration<'a> { | ||
server_port: &'a u16, | ||
signer_registration_mode: SignerRelayMode, | ||
signature_registration_mode: SignerRelayMode, | ||
aggregator_endpoint: &'a str, | ||
signer_tx: UnboundedSender<RegisterSignerMessage>, | ||
signature_tx: UnboundedSender<RegisterSignatureMessage>, | ||
signer_repeater: Arc<MessageRepeater<RegisterSignerMessage>>, | ||
logger: &'a Logger, | ||
} | ||
|
||
/// A relay for a Mithril signer | ||
pub struct SignerRelay { | ||
server: TestHttpServer, | ||
|
@@ -29,12 +58,14 @@ impl SignerRelay { | |
pub async fn start( | ||
address: &Multiaddr, | ||
server_port: &u16, | ||
signer_registration_mode: &SignerRelayMode, | ||
signature_registration_mode: &SignerRelayMode, | ||
aggregator_endpoint: &str, | ||
signer_repeater_delay: &Duration, | ||
logger: &Logger, | ||
) -> StdResult<Self> { | ||
let relay_logger = logger.new_with_component_name::<Self>(); | ||
debug!(relay_logger, "Starting..."); | ||
debug!(relay_logger, "Starting..."; "signer_registration_mode" => ?signer_registration_mode, "signature_registration_mode" => ?signature_registration_mode); | ||
let (signature_tx, signature_rx) = unbounded_channel::<RegisterSignatureMessage>(); | ||
let (signer_tx, signer_rx) = unbounded_channel::<RegisterSignerMessage>(); | ||
let signer_repeater = Arc::new(MessageRepeater::new( | ||
|
@@ -43,14 +74,16 @@ impl SignerRelay { | |
logger, | ||
)); | ||
let peer = Peer::new(address).start().await?; | ||
let server = Self::start_http_server( | ||
let server = Self::start_http_server(&HTTPServerConfiguration { | ||
server_port, | ||
signer_registration_mode: signer_registration_mode.to_owned(), | ||
signature_registration_mode: signature_registration_mode.to_owned(), | ||
aggregator_endpoint, | ||
signer_tx, | ||
signature_tx, | ||
signer_repeater.clone(), | ||
logger, | ||
) | ||
signer_tx: signer_tx.clone(), | ||
signature_tx: signature_tx.clone(), | ||
signer_repeater: signer_repeater.clone(), | ||
logger: &relay_logger, | ||
}) | ||
.await; | ||
info!(relay_logger, "Listening on"; "address" => ?server.address()); | ||
|
||
|
@@ -64,44 +97,55 @@ impl SignerRelay { | |
}) | ||
} | ||
|
||
async fn start_http_server( | ||
server_port: &u16, | ||
aggregator_endpoint: &str, | ||
signer_tx: UnboundedSender<RegisterSignerMessage>, | ||
signature_tx: UnboundedSender<RegisterSignatureMessage>, | ||
signer_repeater: Arc<MessageRepeater<RegisterSignerMessage>>, | ||
logger: &Logger, | ||
) -> TestHttpServer { | ||
let server_logger = logger.new_with_name("http_server"); | ||
async fn start_http_server(configuration: &HTTPServerConfiguration<'_>) -> TestHttpServer { | ||
let server_logger = configuration.logger.new_with_name("http_server"); | ||
test_http_server_with_socket_address( | ||
warp::path::end() | ||
.and(warp::get()) | ||
.and(middlewares::with_logger(&server_logger)) | ||
.and(middlewares::with_aggregator_endpoint( | ||
aggregator_endpoint.to_string(), | ||
configuration.aggregator_endpoint.to_string(), | ||
)) | ||
.and_then(handlers::aggregator_features_handler) | ||
.or(warp::path("register-signatures") | ||
.and(warp::post()) | ||
.and(warp::body::json()) | ||
.and(middlewares::with_signer_relay_mode( | ||
configuration.signature_registration_mode.clone(), | ||
)) | ||
.and(middlewares::with_aggregator_endpoint( | ||
configuration.aggregator_endpoint.to_string(), | ||
)) | ||
.and(middlewares::with_logger(&server_logger)) | ||
.and(middlewares::with_transmitter(signature_tx)) | ||
.and(middlewares::with_transmitter( | ||
configuration.signature_tx.clone(), | ||
)) | ||
.and_then(handlers::register_signatures_handler)) | ||
.or(warp::path("register-signer") | ||
.and(warp::post()) | ||
.and(warp::body::json()) | ||
.and(middlewares::with_signer_relay_mode( | ||
configuration.signer_registration_mode.clone(), | ||
)) | ||
.and(middlewares::with_aggregator_endpoint( | ||
configuration.aggregator_endpoint.to_string(), | ||
)) | ||
.and(middlewares::with_logger(&server_logger)) | ||
.and(middlewares::with_transmitter(signer_tx)) | ||
.and(middlewares::with_repeater(signer_repeater.clone())) | ||
.and(middlewares::with_transmitter( | ||
configuration.signer_tx.clone(), | ||
)) | ||
.and(middlewares::with_repeater( | ||
configuration.signer_repeater.clone(), | ||
)) | ||
.and_then(handlers::register_signer_handler)) | ||
.or(warp::path("epoch-settings") | ||
.and(warp::get()) | ||
.and(middlewares::with_logger(&server_logger)) | ||
.and(middlewares::with_aggregator_endpoint( | ||
aggregator_endpoint.to_string(), | ||
configuration.aggregator_endpoint.to_string(), | ||
)) | ||
.and_then(handlers::epoch_settings_handler)), | ||
([0, 0, 0, 0], *server_port).into(), | ||
([0, 0, 0, 0], *configuration.server_port).into(), | ||
) | ||
} | ||
|
||
|
@@ -173,6 +217,8 @@ mod middlewares { | |
|
||
use crate::repeater::MessageRepeater; | ||
|
||
use super::SignerRelayMode; | ||
|
||
pub fn with_logger( | ||
logger: &slog::Logger, | ||
) -> impl Filter<Extract = (slog::Logger,), Error = Infallible> + Clone { | ||
|
@@ -197,6 +243,12 @@ mod middlewares { | |
) -> impl Filter<Extract = (String,), Error = Infallible> + Clone { | ||
warp::any().map(move || aggregator_endpoint.clone()) | ||
} | ||
|
||
pub fn with_signer_relay_mode( | ||
signer_relay_mode: SignerRelayMode, | ||
) -> impl Filter<Extract = (SignerRelayMode,), Error = Infallible> + Clone { | ||
warp::any().map(move || signer_relay_mode.clone()) | ||
} | ||
} | ||
|
||
mod handlers { | ||
|
@@ -205,10 +257,12 @@ mod handlers { | |
use slog::{debug, Logger}; | ||
use std::{convert::Infallible, sync::Arc}; | ||
use tokio::sync::mpsc::UnboundedSender; | ||
use warp::http::StatusCode; | ||
use warp::{http::StatusCode, reply::WithStatus}; | ||
|
||
use crate::repeater; | ||
|
||
use super::SignerRelayMode; | ||
|
||
pub async fn aggregator_features_handler( | ||
logger: Logger, | ||
aggregator_endpoint: String, | ||
|
@@ -223,40 +277,66 @@ mod handlers { | |
|
||
pub async fn register_signer_handler( | ||
register_signer_message: RegisterSignerMessage, | ||
signer_relay_mode: SignerRelayMode, | ||
aggregator_endpoint: String, | ||
logger: Logger, | ||
tx: UnboundedSender<RegisterSignerMessage>, | ||
repeater: Arc<repeater::MessageRepeater<RegisterSignerMessage>>, | ||
) -> Result<impl warp::Reply, Infallible> { | ||
debug!(logger, "Serve HTTP route /register-signer"; "register_signer_message" => #?register_signer_message); | ||
|
||
repeater.set_message(register_signer_message.clone()).await; | ||
match tx.send(register_signer_message) { | ||
Ok(_) => Ok(Box::new(warp::reply::with_status( | ||
"".to_string(), | ||
StatusCode::CREATED, | ||
))), | ||
Err(err) => Ok(Box::new(warp::reply::with_status( | ||
format!("{err:?}"), | ||
StatusCode::INTERNAL_SERVER_ERROR, | ||
))), | ||
debug!(logger, "Serve HTTP route /register-signer"; "signer_relay_mode" => ?signer_relay_mode, "register_signer_message" => #?register_signer_message,); | ||
match signer_relay_mode { | ||
SignerRelayMode::P2P => { | ||
repeater.set_message(register_signer_message.clone()).await; | ||
match tx.send(register_signer_message) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The match on tx.send seems to be a technical stuff (expect perhaps the StatusCode returned on sucess) and we may extract it into a function. |
||
Ok(_) => Ok(Box::new(warp::reply::with_status( | ||
"".to_string(), | ||
StatusCode::CREATED, | ||
))), | ||
Err(err) => Ok(Box::new(warp::reply::with_status( | ||
format!("{err:?}"), | ||
StatusCode::INTERNAL_SERVER_ERROR, | ||
))), | ||
} | ||
} | ||
SignerRelayMode::Passthrough => { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There is no test in this file about this code. |
||
let response = reqwest::Client::new() | ||
.post(format!("{aggregator_endpoint}/register-signer")) | ||
.json(®ister_signer_message) | ||
.send() | ||
.await; | ||
reply_response(logger, response).await | ||
} | ||
} | ||
} | ||
|
||
pub async fn register_signatures_handler( | ||
register_signature_message: RegisterSignatureMessage, | ||
signer_relay_mode: SignerRelayMode, | ||
aggregator_endpoint: String, | ||
logger: Logger, | ||
tx: UnboundedSender<RegisterSignatureMessage>, | ||
) -> Result<impl warp::Reply, Infallible> { | ||
debug!(logger, "Serve HTTP route /register-signatures"; "register_signature_message" => #?register_signature_message); | ||
match tx.send(register_signature_message) { | ||
Ok(_) => Ok(Box::new(warp::reply::with_status( | ||
"".to_string(), | ||
StatusCode::CREATED, | ||
))), | ||
Err(err) => Ok(Box::new(warp::reply::with_status( | ||
format!("{err:?}"), | ||
StatusCode::INTERNAL_SERVER_ERROR, | ||
))), | ||
debug!(logger, "Serve HTTP route /register-signatures"; "signer_relay_mode" => ?signer_relay_mode, "register_signature_message" => #?register_signature_message); | ||
|
||
match signer_relay_mode { | ||
SignerRelayMode::P2P => match tx.send(register_signature_message) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Extract the |
||
Ok(_) => Ok(Box::new(warp::reply::with_status( | ||
"".to_string(), | ||
StatusCode::CREATED, | ||
))), | ||
Err(err) => Ok(Box::new(warp::reply::with_status( | ||
format!("{err:?}"), | ||
StatusCode::INTERNAL_SERVER_ERROR, | ||
))), | ||
}, | ||
SignerRelayMode::Passthrough => { | ||
let response = reqwest::Client::new() | ||
.post(format!("{aggregator_endpoint}/register-signatures")) | ||
.json(®ister_signature_message) | ||
.send() | ||
.await; | ||
reply_response(logger, response).await | ||
} | ||
} | ||
} | ||
|
||
|
@@ -275,7 +355,7 @@ mod handlers { | |
pub async fn reply_response( | ||
logger: Logger, | ||
response: Result<Response, Error>, | ||
) -> Result<impl warp::Reply, Infallible> { | ||
) -> Result<Box<WithStatus<String>>, Infallible> { | ||
match response { | ||
Ok(response) => match StatusCode::from_u16(response.status().into()) { | ||
Ok(status) => match response.text().await { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -21,6 +21,7 @@ clap = { version = "4.5.28", features = ["derive"] } | |
indicatif = { version = "0.17.11", features = ["tokio"] } | ||
mithril-common = { path = "../../mithril-common", features = ["full"] } | ||
mithril-doc = { path = "../../internal/mithril-doc" } | ||
mithril-relay = { path = "../../mithril-relay" } | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm puzzled over making the E2e runner depends on other binary crates, is making the argument configuration always correct worth the burden of having a dependency as huge as this one ? And why limit it to only the relay we should do this also for the aggregator and signer too, making the build of the e2e far heavier and longer for little gain. |
||
reqwest = { version = "0.12.12", features = ["json"] } | ||
serde = { version = "1.0.217", features = ["derive"] } | ||
serde_json = "1.0.138" | ||
|
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,22 +1,29 @@ | ||
use crate::assertions; | ||
use crate::MithrilInfrastructure; | ||
use std::sync::Arc; | ||
|
||
use anyhow::anyhow; | ||
use tokio::sync::RwLock; | ||
use tokio::task::JoinSet; | ||
|
||
use mithril_common::{ | ||
chain_observer::ChainObserver, | ||
entities::{Epoch, SignedEntityTypeDiscriminants}, | ||
StdResult, | ||
}; | ||
|
||
pub struct Spec<'a> { | ||
pub infrastructure: &'a mut MithrilInfrastructure, | ||
use crate::{assertions, Aggregator, MithrilInfrastructure}; | ||
|
||
pub struct Spec { | ||
pub infrastructure: Arc<RwLock<Option<MithrilInfrastructure>>>, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The fact that the |
||
is_signing_cardano_transactions: bool, | ||
is_signing_cardano_stake_distribution: bool, | ||
is_signing_cardano_database: bool, | ||
next_era: Option<String>, | ||
regenesis_on_era_switch: bool, | ||
} | ||
|
||
impl<'a> Spec<'a> { | ||
impl Spec { | ||
pub fn new( | ||
infrastructure: &'a mut MithrilInfrastructure, | ||
infrastructure: Arc<RwLock<Option<MithrilInfrastructure>>>, | ||
signed_entity_types: Vec<String>, | ||
next_era: Option<String>, | ||
regenesis_on_era_switch: bool, | ||
|
@@ -43,224 +50,271 @@ impl<'a> Spec<'a> { | |
} | ||
} | ||
|
||
pub async fn run(&mut self) -> StdResult<()> { | ||
let aggregator_endpoint = self.infrastructure.aggregator().endpoint(); | ||
assertions::wait_for_enough_immutable(self.infrastructure.aggregator().db_directory()) | ||
.await?; | ||
let start_epoch = self | ||
.infrastructure | ||
.chain_observer() | ||
.get_current_epoch() | ||
.await? | ||
.unwrap_or_default(); | ||
pub async fn run(self) -> StdResult<()> { | ||
let mut join_set = JoinSet::new(); | ||
let spec = Arc::new(self); | ||
let infrastructure_guard = spec.infrastructure.read().await; | ||
let infrastructure = infrastructure_guard | ||
.as_ref() | ||
.ok_or(anyhow!("No infrastructure found"))?; | ||
let aggregators = infrastructure_guard | ||
.as_ref() | ||
.ok_or(anyhow!("No infrastructure found"))? | ||
.aggregators(); | ||
|
||
// Transfer some funds on the devnet to have some Cardano transactions to sign. | ||
// This step needs to be executed early in the process so that the transactions are available | ||
// for signing in the penultimate immutable chunk before the end of the test. | ||
// As we get closer to the tip of the chain when signing, we'll be able to relax this constraint. | ||
assertions::transfer_funds(self.infrastructure.devnet()).await?; | ||
assertions::transfer_funds(infrastructure.devnet()).await?; | ||
|
||
for index in 0..aggregators.len() { | ||
let spec_clone = spec.clone(); | ||
join_set.spawn(async move { | ||
let infrastructure_guard = spec_clone.infrastructure.read().await; | ||
let infrastructure = infrastructure_guard | ||
.as_ref() | ||
.ok_or(anyhow!("No infrastructure found"))?; | ||
|
||
spec_clone | ||
.run_scenario( | ||
infrastructure.aggregator(index), | ||
infrastructure.chain_observer(index), | ||
infrastructure, | ||
) | ||
.await | ||
}); | ||
} | ||
|
||
while let Some(res) = join_set.join_next().await { | ||
res??; | ||
} | ||
|
||
Ok(()) | ||
} | ||
|
||
pub async fn run_scenario( | ||
&self, | ||
aggregator: &Aggregator, | ||
chain_observer: Arc<dyn ChainObserver>, | ||
infrastructure: &MithrilInfrastructure, | ||
) -> StdResult<()> { | ||
assertions::wait_for_enough_immutable(aggregator).await?; | ||
let start_epoch = chain_observer | ||
.get_current_epoch() | ||
.await? | ||
.unwrap_or_default(); | ||
|
||
// Wait 4 epochs after start epoch for the aggregator to be able to bootstrap a genesis certificate | ||
let mut target_epoch = start_epoch + 4; | ||
assertions::wait_for_target_epoch( | ||
self.infrastructure.chain_observer(), | ||
aggregator, | ||
chain_observer.clone(), | ||
target_epoch, | ||
"minimal epoch for the aggregator to be able to bootstrap genesis certificate" | ||
.to_string(), | ||
) | ||
.await?; | ||
assertions::bootstrap_genesis_certificate(self.infrastructure.aggregator_mut()).await?; | ||
assertions::wait_for_epoch_settings(&aggregator_endpoint).await?; | ||
assertions::bootstrap_genesis_certificate(aggregator).await?; | ||
assertions::wait_for_epoch_settings(aggregator).await?; | ||
|
||
// Wait 2 epochs before changing stake distribution, so that we use at least one original stake distribution | ||
target_epoch += 2; | ||
assertions::wait_for_target_epoch( | ||
self.infrastructure.chain_observer(), | ||
aggregator, | ||
chain_observer.clone(), | ||
target_epoch, | ||
"epoch after which the stake distribution will change".to_string(), | ||
) | ||
.await?; | ||
let delegation_round = 1; | ||
assertions::delegate_stakes_to_pools(self.infrastructure.devnet(), delegation_round) | ||
.await?; | ||
|
||
if aggregator.is_first() { | ||
// Delegate some stakes to pools | ||
let delegation_round = 1; | ||
assertions::delegate_stakes_to_pools(infrastructure.devnet(), delegation_round).await?; | ||
} | ||
|
||
// Wait 2 epochs before changing protocol parameters | ||
target_epoch += 2; | ||
assertions::wait_for_target_epoch( | ||
self.infrastructure.chain_observer(), | ||
aggregator, | ||
chain_observer.clone(), | ||
target_epoch, | ||
"epoch after which the protocol parameters will change".to_string(), | ||
) | ||
.await?; | ||
assertions::update_protocol_parameters(self.infrastructure.aggregator_mut()).await?; | ||
assertions::update_protocol_parameters(aggregator).await?; | ||
|
||
// Wait 6 epochs after protocol parameters update, so that we make sure that we use new protocol parameters as well as new stake distribution a few times | ||
target_epoch += 6; | ||
assertions::wait_for_target_epoch( | ||
self.infrastructure.chain_observer(), | ||
aggregator, | ||
chain_observer.clone(), | ||
target_epoch, | ||
"epoch after which the certificate chain will be long enough to catch most common troubles with stake distribution and protocol parameters".to_string(), | ||
) | ||
.await?; | ||
|
||
// Verify that artifacts are produced and signed correctly | ||
let mut target_epoch = self.verify_artifacts_production(target_epoch).await?; | ||
let mut target_epoch = self | ||
.verify_artifacts_production(target_epoch, aggregator, infrastructure) | ||
.await?; | ||
|
||
// Verify that artifacts are produced and signed correctly after era switch | ||
if let Some(next_era) = &self.next_era { | ||
// Switch to next era | ||
self.infrastructure | ||
.register_switch_to_next_era(next_era) | ||
.await?; | ||
if aggregator.is_first() { | ||
infrastructure.register_switch_to_next_era(next_era).await?; | ||
} | ||
target_epoch += 5; | ||
assertions::wait_for_target_epoch( | ||
self.infrastructure.chain_observer(), | ||
aggregator, | ||
chain_observer.clone(), | ||
target_epoch, | ||
"epoch after which the era switch will have triggered".to_string(), | ||
) | ||
.await?; | ||
|
||
// Proceed to a re-genesis of the certificate chain | ||
if self.regenesis_on_era_switch { | ||
assertions::bootstrap_genesis_certificate(self.infrastructure.aggregator_mut()) | ||
.await?; | ||
assertions::bootstrap_genesis_certificate(aggregator).await?; | ||
target_epoch += 5; | ||
assertions::wait_for_target_epoch( | ||
self.infrastructure.chain_observer(), | ||
aggregator, | ||
chain_observer.clone(), | ||
target_epoch, | ||
"epoch after which the re-genesis on era switch will be completed".to_string(), | ||
) | ||
.await?; | ||
} | ||
|
||
// Verify that artifacts are produced and signed correctly | ||
self.verify_artifacts_production(target_epoch).await?; | ||
self.verify_artifacts_production(target_epoch, aggregator, infrastructure) | ||
.await?; | ||
} | ||
|
||
Ok(()) | ||
} | ||
|
||
async fn verify_artifacts_production(&self, target_epoch: Epoch) -> StdResult<Epoch> { | ||
let aggregator_endpoint = self.infrastructure.aggregator().endpoint(); | ||
async fn verify_artifacts_production( | ||
&self, | ||
target_epoch: Epoch, | ||
aggregator: &Aggregator, | ||
infrastructure: &MithrilInfrastructure, | ||
) -> StdResult<Epoch> { | ||
let expected_epoch_min = target_epoch - 3; | ||
// Verify that mithril stake distribution artifacts are produced and signed correctly | ||
{ | ||
let hash = | ||
assertions::assert_node_producing_mithril_stake_distribution(&aggregator_endpoint) | ||
.await?; | ||
assertions::assert_node_producing_mithril_stake_distribution(aggregator).await?; | ||
let certificate_hash = assertions::assert_signer_is_signing_mithril_stake_distribution( | ||
&aggregator_endpoint, | ||
aggregator, | ||
&hash, | ||
expected_epoch_min, | ||
) | ||
.await?; | ||
assertions::assert_is_creating_certificate_with_enough_signers( | ||
&aggregator_endpoint, | ||
aggregator, | ||
&certificate_hash, | ||
self.infrastructure.signers().len(), | ||
infrastructure.signers().len(), | ||
) | ||
.await?; | ||
let mut client = self.infrastructure.build_client()?; | ||
let mut client = infrastructure.build_client(aggregator).await?; | ||
assertions::assert_client_can_verify_mithril_stake_distribution(&mut client, &hash) | ||
.await?; | ||
} | ||
|
||
// Verify that snapshot artifacts are produced and signed correctly | ||
{ | ||
let digest = assertions::assert_node_producing_snapshot(&aggregator_endpoint).await?; | ||
let digest = assertions::assert_node_producing_snapshot(aggregator).await?; | ||
let certificate_hash = assertions::assert_signer_is_signing_snapshot( | ||
&aggregator_endpoint, | ||
aggregator, | ||
&digest, | ||
expected_epoch_min, | ||
) | ||
.await?; | ||
|
||
assertions::assert_is_creating_certificate_with_enough_signers( | ||
&aggregator_endpoint, | ||
aggregator, | ||
&certificate_hash, | ||
self.infrastructure.signers().len(), | ||
infrastructure.signers().len(), | ||
) | ||
.await?; | ||
|
||
let mut client = self.infrastructure.build_client()?; | ||
let mut client = infrastructure.build_client(aggregator).await?; | ||
assertions::assert_client_can_verify_snapshot(&mut client, &digest).await?; | ||
} | ||
|
||
// Verify that Cardano database snapshot artifacts are produced and signed correctly | ||
if self.is_signing_cardano_database { | ||
let hash = | ||
assertions::assert_node_producing_cardano_database_snapshot(&aggregator_endpoint) | ||
.await?; | ||
assertions::assert_node_producing_cardano_database_snapshot(aggregator).await?; | ||
let certificate_hash = assertions::assert_signer_is_signing_cardano_database_snapshot( | ||
&aggregator_endpoint, | ||
aggregator, | ||
&hash, | ||
expected_epoch_min, | ||
) | ||
.await?; | ||
|
||
assertions::assert_is_creating_certificate_with_enough_signers( | ||
&aggregator_endpoint, | ||
aggregator, | ||
&certificate_hash, | ||
self.infrastructure.signers().len(), | ||
infrastructure.signers().len(), | ||
) | ||
.await?; | ||
|
||
assertions::assert_node_producing_cardano_database_digests_map(&aggregator_endpoint) | ||
.await?; | ||
assertions::assert_node_producing_cardano_database_digests_map(aggregator).await?; | ||
|
||
let mut client = self.infrastructure.build_client()?; | ||
let mut client = infrastructure.build_client(aggregator).await?; | ||
assertions::assert_client_can_verify_cardano_database(&mut client, &hash).await?; | ||
} | ||
|
||
// Verify that Cardano transactions artifacts are produced and signed correctly | ||
if self.is_signing_cardano_transactions { | ||
let hash = assertions::assert_node_producing_cardano_transactions(&aggregator_endpoint) | ||
.await?; | ||
let hash = assertions::assert_node_producing_cardano_transactions(aggregator).await?; | ||
let certificate_hash = assertions::assert_signer_is_signing_cardano_transactions( | ||
&aggregator_endpoint, | ||
aggregator, | ||
&hash, | ||
expected_epoch_min, | ||
) | ||
.await?; | ||
|
||
assertions::assert_is_creating_certificate_with_enough_signers( | ||
&aggregator_endpoint, | ||
aggregator, | ||
&certificate_hash, | ||
self.infrastructure.signers().len(), | ||
infrastructure.signers().len(), | ||
) | ||
.await?; | ||
|
||
let transaction_hashes = self | ||
.infrastructure | ||
let transaction_hashes = infrastructure | ||
.devnet() | ||
.mithril_payments_transaction_hashes()?; | ||
let mut client = self.infrastructure.build_client()?; | ||
let mut client = infrastructure.build_client(aggregator).await?; | ||
assertions::assert_client_can_verify_transactions(&mut client, transaction_hashes) | ||
.await?; | ||
} | ||
|
||
// Verify that Cardano stake distribution artifacts are produced and signed correctly | ||
if self.is_signing_cardano_stake_distribution { | ||
{ | ||
let (hash, epoch) = assertions::assert_node_producing_cardano_stake_distribution( | ||
&aggregator_endpoint, | ||
) | ||
.await?; | ||
let (hash, epoch) = | ||
assertions::assert_node_producing_cardano_stake_distribution(aggregator) | ||
.await?; | ||
let certificate_hash = | ||
assertions::assert_signer_is_signing_cardano_stake_distribution( | ||
&aggregator_endpoint, | ||
aggregator, | ||
&hash, | ||
expected_epoch_min, | ||
) | ||
.await?; | ||
assertions::assert_is_creating_certificate_with_enough_signers( | ||
&aggregator_endpoint, | ||
aggregator, | ||
&certificate_hash, | ||
self.infrastructure.signers().len(), | ||
infrastructure.signers().len(), | ||
) | ||
.await?; | ||
|
||
let mut client = self.infrastructure.build_client()?; | ||
let mut client = infrastructure.build_client(aggregator).await?; | ||
assertions::assert_client_can_verify_cardano_stake_distribution( | ||
&mut client, | ||
&hash, | ||
|
Large diffs are not rendered by default.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can you explain how this change help to stabilize the e2e tests ? I'm quite puzzled over the fact that we need to call
runner.inform_new_epoch
twice.From what I understand this doesn't impact the methods called between the
inform_new_epoch
calls:runner.upkeep
call should not be impactedopen_signer_registration_round
do nothing on slaveupdate_epoch_settings
should not be impacted as the data registered by the epoch service (protocol parameters and transactions signing config) don't depends on the master aggregatorThe functional impacts should be:
next_signers
in the interval between the twoinform_new_epoch
callsinform_epoch
calls will be done without needing a roundtrip to the master aggregatorIs the last point the problem on fast network ? Maybe the synchronizer should be able to "edit" the next signers in the
epoch_service
instead ?