Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: add tests for calculating block timestamp #395

Merged
merged 4 commits into from
Apr 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

30 changes: 18 additions & 12 deletions crates/edr_napi/src/provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ mod config;
use std::sync::Arc;

use edr_eth::remote::jsonrpc;
use edr_provider::InvalidRequestReason;
use edr_provider::{time::CurrentTime, InvalidRequestReason};
use napi::{tokio::runtime, Env, JsFunction, JsObject, Status};
use napi_derive::napi;

Expand Down Expand Up @@ -52,17 +52,23 @@ impl Provider {
edr_provider::Logger::is_enabled(&*logger),
))?;

let result = edr_provider::Provider::new(runtime, logger, subscriber_callback, config)
.map_or_else(
|error| Err(napi::Error::new(Status::GenericFailure, error.to_string())),
|provider| {
Ok(Provider {
provider: Arc::new(provider),
#[cfg(feature = "scenarios")]
scenario_file,
})
},
);
let result = edr_provider::Provider::new(
runtime,
logger,
subscriber_callback,
config,
CurrentTime,
)
.map_or_else(
|error| Err(napi::Error::new(Status::GenericFailure, error.to_string())),
|provider| {
Ok(Provider {
provider: Arc::new(provider),
#[cfg(feature = "scenarios")]
scenario_file,
})
},
);

deferred.resolve(|_env| result);
Ok::<_, napi::Error>(())
Expand Down
1 change: 1 addition & 0 deletions crates/edr_provider/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ edition = "2021"
[dependencies]
alloy-sol-types = { version = "0.5.1", default-features = false, features = ["std"]}
anyhow = { version = "1.0.75", optional = true }
auto_impl = { version = "1.2", default-features = false }
dyn-clone = { version = "1.0.13", default-features = false }
edr_defaults = { version = "0.3.5", path = "../edr_defaults" }
edr_eth = { version = "0.3.5", path = "../edr_eth", features = ["rand"] }
Expand Down
9 changes: 6 additions & 3 deletions crates/edr_provider/src/console_log.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,15 +60,18 @@ pub(crate) mod tests {
};
use edr_evm::hex;

use crate::data::ProviderData;
use crate::{data::ProviderData, time::TimeSinceEpoch};

pub struct ConsoleLogTransaction {
pub transaction: TransactionRequestAndSender,
pub expected_call_data: Bytes,
}

pub fn deploy_console_log_contract<LoggerErrorT: Debug + Send + Sync + 'static>(
provider_data: &mut ProviderData<LoggerErrorT>,
pub fn deploy_console_log_contract<
LoggerErrorT: Debug + Send + Sync + 'static,
TimerT: Clone + TimeSinceEpoch,
>(
provider_data: &mut ProviderData<LoggerErrorT, TimerT>,
) -> anyhow::Result<ConsoleLogTransaction> {
// Compiled with solc 0.8.17, without optimizations
/*
Expand Down
41 changes: 22 additions & 19 deletions crates/edr_provider/src/data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ use crate::{
pending::BlockchainWithPending,
requests::hardhat::rpc_types::{ForkConfig, ForkMetadata},
snapshot::Snapshot,
time::{CurrentTime, TimeSinceEpoch},
MiningConfig, ProviderConfig, ProviderError, SubscriptionEvent, SubscriptionEventData,
SyncSubscriberCallback,
};
Expand Down Expand Up @@ -119,7 +120,7 @@ pub enum CreationError {
RpcClient(#[from] RpcClientError),
}

pub struct ProviderData<LoggerErrorT: Debug> {
pub struct ProviderData<LoggerErrorT: Debug, TimerT: Clone + TimeSinceEpoch = CurrentTime> {
runtime_handle: runtime::Handle,
initial_config: ProviderConfig,
blockchain: Box<dyn SyncBlockchain<BlockchainError, StateError>>,
Expand Down Expand Up @@ -150,6 +151,7 @@ pub struct ProviderData<LoggerErrorT: Debug> {
logger: Box<dyn SyncLogger<BlockchainError = BlockchainError, LoggerError = LoggerErrorT>>,
impersonated_accounts: HashSet<Address>,
subscriber_callback: Box<dyn SyncSubscriberCallback>,
timer: TimerT,
call_override: Option<Arc<dyn SyncCallOverride>>,
// We need the Arc to let us avoid returning references to the cache entries which need &mut
// self to get.
Expand All @@ -158,13 +160,14 @@ pub struct ProviderData<LoggerErrorT: Debug> {
block_number_to_state_id: BTreeMap<u64, StateId>,
}

impl<LoggerErrorT: Debug> ProviderData<LoggerErrorT> {
impl<LoggerErrorT: Debug, TimerT: Clone + TimeSinceEpoch> ProviderData<LoggerErrorT, TimerT> {
pub fn new(
runtime_handle: runtime::Handle,
logger: Box<dyn SyncLogger<BlockchainError = BlockchainError, LoggerError = LoggerErrorT>>,
subscriber_callback: Box<dyn SyncSubscriberCallback>,
call_override: Option<Arc<dyn SyncCallOverride>>,
config: ProviderConfig,
timer: TimerT,
) -> Result<Self, CreationError> {
let InitialAccounts {
local_accounts,
Expand All @@ -180,7 +183,7 @@ impl<LoggerErrorT: Debug> ProviderData<LoggerErrorT> {
prev_randao_generator,
block_time_offset_seconds,
next_block_base_fee_per_gas,
} = create_blockchain_and_state(runtime_handle.clone(), &config, genesis_accounts)?;
} = create_blockchain_and_state(runtime_handle.clone(), &config, &timer, genesis_accounts)?;

let max_cached_states = std::env::var(EDR_MAX_CACHED_STATES_ENV_VAR).map_or_else(
|err| match err {
Expand Down Expand Up @@ -250,6 +253,7 @@ impl<LoggerErrorT: Debug> ProviderData<LoggerErrorT> {
logger,
impersonated_accounts: HashSet::new(),
subscriber_callback,
timer,
call_override,
block_state_cache,
current_state_id,
Expand All @@ -271,6 +275,7 @@ impl<LoggerErrorT: Debug> ProviderData<LoggerErrorT> {
self.subscriber_callback.clone(),
self.call_override.clone(),
config,
self.timer.clone(),
)?;

std::mem::swap(self, &mut reset_instance);
Expand Down Expand Up @@ -1133,7 +1138,7 @@ impl<LoggerErrorT: Debug> ProviderData<LoggerErrorT> {
}

let mine_block_with_interval =
|data: &mut ProviderData<LoggerErrorT>,
|data: &mut ProviderData<LoggerErrorT, TimerT>,
mined_blocks: &mut Vec<DebugMineBlockResult<BlockchainError>>|
-> Result<(), ProviderError<LoggerErrorT>> {
let previous_timestamp = mined_blocks
Expand Down Expand Up @@ -1966,8 +1971,7 @@ impl<LoggerErrorT: Debug> ProviderData<LoggerErrorT> {
let latest_block_header = latest_block.header();

let current_timestamp =
i64::try_from(SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs())
.expect("timestamp too large");
i64::try_from(self.timer.since_epoch()).expect("timestamp too large");

let (mut block_timestamp, mut new_offset) = if let Some(timestamp) = timestamp {
timestamp.checked_sub(latest_block_header.timestamp).ok_or(
Expand Down Expand Up @@ -2158,7 +2162,10 @@ impl StateId {
}
}

fn block_time_offset_seconds(config: &ProviderConfig) -> Result<i64, CreationError> {
fn block_time_offset_seconds(
config: &ProviderConfig,
timer: &impl TimeSinceEpoch,
) -> Result<i64, CreationError> {
config.initial_date.map_or(Ok(0), |initial_date| {
let initial_timestamp = i64::try_from(
initial_date
Expand All @@ -2168,13 +2175,8 @@ fn block_time_offset_seconds(config: &ProviderConfig) -> Result<i64, CreationErr
)
.expect("initial date must be representable as i64");

let current_timestamp = i64::try_from(
SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("current time must be after UNIX epoch")
.as_secs(),
)
.expect("Current timestamp must be representable as i64");
let current_timestamp = i64::try_from(timer.since_epoch())
.expect("Current timestamp must be representable as i64");

Ok(initial_timestamp - current_timestamp)
})
Expand All @@ -2194,6 +2196,7 @@ struct BlockchainAndState {
fn create_blockchain_and_state(
runtime: runtime::Handle,
config: &ProviderConfig,
timer: &impl TimeSinceEpoch,
mut genesis_accounts: HashMap<Address, Account>,
) -> Result<BlockchainAndState, CreationError> {
let mut prev_randao_generator = RandomHashGenerator::with_seed(edr_defaults::MIX_HASH_SEED);
Expand Down Expand Up @@ -2292,10 +2295,9 @@ fn create_blockchain_and_state(
.timestamp,
);

let elapsed_time = SystemTime::now()
.duration_since(fork_block_timestamp)
.expect("current time must be after fork block")
.as_secs();
let elapsed_time = timer
.since(fork_block_timestamp)
.expect("current time must be after fork block");

-i64::try_from(elapsed_time)
.expect("Elapsed time since fork block must be representable as i64")
Expand Down Expand Up @@ -2368,7 +2370,7 @@ fn create_blockchain_and_state(
.state_at_block_number(0, irregular_state.state_overrides())
.expect("Genesis state must exist");

let block_time_offset_seconds = block_time_offset_seconds(config)?;
let block_time_offset_seconds = block_time_offset_seconds(config, timer)?;

Ok(BlockchainAndState {
fork_metadata: None,
Expand Down Expand Up @@ -2479,6 +2481,7 @@ pub(crate) mod test_utils {
subscription_callback_noop,
None,
config.clone(),
CurrentTime,
)?;

provider_data.impersonate_account(impersonated_account);
Expand Down
6 changes: 1 addition & 5 deletions crates/edr_provider/src/error.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use core::fmt::Debug;
use std::{num::TryFromIntError, time::SystemTimeError};
use std::num::TryFromIntError;

use alloy_sol_types::{ContractError, SolInterface};
use edr_eth::{
Expand Down Expand Up @@ -151,9 +151,6 @@ pub enum ProviderError<LoggerErrorT> {
/// State error
#[error(transparent)]
State(#[from] StateError),
/// System time error
#[error(transparent)]
SystemTime(#[from] SystemTimeError),
/// Timestamp lower than previous timestamp
#[error("Timestamp {proposed} is lower than the previous block's timestamp {previous}")]
TimestampLowerThanPrevious { proposed: u64, previous: u64 },
Expand Down Expand Up @@ -242,7 +239,6 @@ impl<LoggerErrorT: Debug> From<ProviderError<LoggerErrorT>> for jsonrpc::Error {
ProviderError::SetNextPrevRandaoUnsupported { .. } => INVALID_INPUT,
ProviderError::Signature(_) => INVALID_INPUT,
ProviderError::State(_) => INVALID_INPUT,
ProviderError::SystemTime(_) => INVALID_INPUT,
ProviderError::TimestampLowerThanPrevious { .. } => INVALID_INPUT,
ProviderError::TimestampEqualsPrevious { .. } => INVALID_INPUT,
ProviderError::TransactionFailed(_) => INVALID_INPUT,
Expand Down
13 changes: 8 additions & 5 deletions crates/edr_provider/src/interval.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use tokio::{
time::Instant,
};

use crate::{data::ProviderData, IntervalConfig, ProviderError};
use crate::{data::ProviderData, time::TimeSinceEpoch, IntervalConfig, ProviderError};

/// Type for interval mining on a separate thread.
pub struct IntervalMiner<LoggerErrorT: Debug> {
Expand All @@ -24,10 +24,10 @@ struct Inner<LoggerErrorT: Debug> {
}

impl<LoggerErrorT: Debug + Send + Sync + 'static> IntervalMiner<LoggerErrorT> {
pub fn new(
pub fn new<TimerT: Clone + TimeSinceEpoch>(
runtime: runtime::Handle,
config: IntervalConfig,
data: Arc<Mutex<ProviderData<LoggerErrorT>>>,
data: Arc<Mutex<ProviderData<LoggerErrorT, TimerT>>>,
) -> Self {
let (cancellation_sender, cancellation_receiver) = oneshot::channel();
let background_task = runtime
Expand All @@ -44,9 +44,12 @@ impl<LoggerErrorT: Debug + Send + Sync + 'static> IntervalMiner<LoggerErrorT> {
}

#[cfg_attr(feature = "tracing", tracing::instrument(skip_all))]
async fn interval_mining_loop<LoggerErrorT: Debug + Send + Sync + 'static>(
async fn interval_mining_loop<
LoggerErrorT: Debug + Send + Sync + 'static,
TimerT: Clone + TimeSinceEpoch,
>(
config: IntervalConfig,
data: Arc<Mutex<ProviderData<LoggerErrorT>>>,
data: Arc<Mutex<ProviderData<LoggerErrorT, TimerT>>>,
mut cancellation_receiver: oneshot::Receiver<()>,
) -> Result<(), ProviderError<LoggerErrorT>> {
let mut now = Instant::now();
Expand Down
19 changes: 13 additions & 6 deletions crates/edr_provider/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ mod subscribe;
/// Utilities for testing
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;
/// Types for temporal operations
pub mod time;

use core::fmt::Debug;
use std::sync::Arc;
Expand All @@ -25,6 +27,7 @@ use logger::SyncLogger;
use mock::SyncCallOverride;
use parking_lot::Mutex;
use requests::{eth::handle_set_interval_mining, hardhat::rpc_types::ResetProviderConfig};
use time::{CurrentTime, TimeSinceEpoch};
use tokio::{runtime, sync::Mutex as AsyncMutex, task};

pub use self::{
Expand Down Expand Up @@ -93,29 +96,33 @@ pub struct ResponseWithTraces {
/// }
/// }
/// ```
pub struct Provider<LoggerErrorT: Debug> {
data: Arc<AsyncMutex<ProviderData<LoggerErrorT>>>,
pub struct Provider<LoggerErrorT: Debug, TimerT: Clone + TimeSinceEpoch = CurrentTime> {
data: Arc<AsyncMutex<ProviderData<LoggerErrorT, TimerT>>>,
/// Interval miner runs in the background, if enabled. It holds the data
/// mutex, so it needs to internally check for cancellation/self-destruction
/// while async-awaiting the lock to avoid a deadlock.
interval_miner: Arc<Mutex<Option<IntervalMiner<LoggerErrorT>>>>,
runtime: runtime::Handle,
}

impl<LoggerErrorT: Debug + Send + Sync + 'static> Provider<LoggerErrorT> {
impl<LoggerErrorT: Debug + Send + Sync + 'static, TimerT: Clone + TimeSinceEpoch>
Provider<LoggerErrorT, TimerT>
{
/// Constructs a new instance.
pub fn new(
runtime: runtime::Handle,
logger: Box<dyn SyncLogger<BlockchainError = BlockchainError, LoggerError = LoggerErrorT>>,
subscriber_callback: Box<dyn SyncSubscriberCallback>,
config: ProviderConfig,
timer: TimerT,
) -> Result<Self, CreationError> {
let data = ProviderData::new(
runtime.clone(),
logger,
subscriber_callback,
None,
config.clone(),
timer,
)?;
let data = Arc::new(AsyncMutex::new(data));

Expand Down Expand Up @@ -167,7 +174,7 @@ impl<LoggerErrorT: Debug + Send + Sync + 'static> Provider<LoggerErrorT> {
/// Handles a batch of JSON requests for an execution provider.
fn handle_batch_request(
&self,
data: &mut ProviderData<LoggerErrorT>,
data: &mut ProviderData<LoggerErrorT, TimerT>,
request: Vec<MethodInvocation>,
) -> Result<ResponseWithTraces, ProviderError<LoggerErrorT>> {
let mut results = Vec::new();
Expand All @@ -185,7 +192,7 @@ impl<LoggerErrorT: Debug + Send + Sync + 'static> Provider<LoggerErrorT> {

fn handle_single_request(
&self,
data: &mut ProviderData<LoggerErrorT>,
data: &mut ProviderData<LoggerErrorT, TimerT>,
request: MethodInvocation,
) -> Result<ResponseWithTraces, ProviderError<LoggerErrorT>> {
let method_name = if data.logger_mut().is_enabled() {
Expand Down Expand Up @@ -443,7 +450,7 @@ impl<LoggerErrorT: Debug + Send + Sync + 'static> Provider<LoggerErrorT> {

fn reset(
&self,
data: &mut ProviderData<LoggerErrorT>,
data: &mut ProviderData<LoggerErrorT, TimerT>,
config: Option<ResetProviderConfig>,
) -> Result<bool, ProviderError<LoggerErrorT>> {
let mut interval_miner = self.interval_miner.lock();
Expand Down
Loading
Loading