Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions crates/apollo_l1_provider/src/l1_provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -435,8 +435,12 @@ impl L1Provider {
rejected_transactions: txs_snapshot.rejected,
rejected_staged_transactions: txs_snapshot.rejected_staged,
committed_transactions: txs_snapshot.committed,
cancellation_started_on_l2: txs_snapshot.cancellation_started_on_l2,
cancelled_on_l2: txs_snapshot.cancelled_on_l2,
consumed: txs_snapshot.consumed,
l1_provider_state: self.state.as_str().to_string(),
current_height: self.current_height,
number_of_txs_in_records: self.tx_manager.records.len(),
})
}
}
Expand Down
2 changes: 1 addition & 1 deletion crates/apollo_l1_provider/src/transaction_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,6 @@ impl TransactionManager {
let validation_status = self.with_record(tx_hash, |record| {
// If the current time affects the state, update state now.
record.update_time_based_state(unix_now, policy);

if !record.is_validatable() {
match record.state {
TransactionState::Committed => {
Expand Down Expand Up @@ -261,6 +260,7 @@ impl TransactionManager {
TransactionState::CancelledOnL2 => {
snapshot.cancelled_on_l2.push(tx_hash);
}
// TODO(guyn): add a CancellationFinalizedOnL1 state.
TransactionState::Consumed => {
snapshot.consumed.push(tx_hash);
}
Expand Down
123 changes: 123 additions & 0 deletions crates/apollo_l1_provider/tests/flow_test_cancellation.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
mod utils;
use std::time::Duration;

use apollo_l1_provider_types::{
InvalidValidationStatus,
L1ProviderClient,
SessionState,
ValidationStatus,
};
use starknet_api::block::BlockNumber;
use utils::{
send_cancellation_finalization,
send_cancellation_request,
send_message_from_l1_to_l2,
setup_anvil_base_layer,
setup_scraper_and_provider,
CALL_DATA,
CALL_DATA_2,
COOLDOWN_DURATION,
TARGET_L2_HEIGHT,
TIMELOCK_DURATION,
};

#[tokio::test]
async fn new_l1_handler_tx_propose_validate_cancellation_timelock() {
// Setup.

// Setup the base layer.
let base_layer = setup_anvil_base_layer().await;

let (l2_hash, nonce) = send_message_from_l1_to_l2(&base_layer, CALL_DATA).await;

let l1_provider_client = setup_scraper_and_provider(&base_layer).await;

// Test.
let next_block_height = BlockNumber(TARGET_L2_HEIGHT.0 + 1);

// Check that we can validate this message.
l1_provider_client.start_block(SessionState::Validate, next_block_height).await.unwrap();
assert_eq!(
l1_provider_client.validate(l2_hash, next_block_height).await.unwrap(),
ValidationStatus::Validated
);

// Wait until the cancellation timelock is over.
tokio::time::sleep(COOLDOWN_DURATION + Duration::from_secs(1)).await;

send_cancellation_request(&base_layer, CALL_DATA, nonce).await;

// Leave enough time for the cancellation request to be scraped and sent to provider.
tokio::time::sleep(COOLDOWN_DURATION + Duration::from_secs(1)).await;

// Cancellation marked as started on L2.
let snapshot = l1_provider_client.get_l1_provider_snapshot().await.unwrap();
assert!(snapshot.cancellation_started_on_l2.contains(&l2_hash));
assert_eq!(snapshot.number_of_txs_in_records, 1);

// Should still be able to validate.
l1_provider_client.start_block(SessionState::Validate, next_block_height).await.unwrap();
assert_eq!(
l1_provider_client.validate(l2_hash, next_block_height).await.unwrap(),
ValidationStatus::Validated
);

// Should not be able to propose.
let n_txs = 1;
l1_provider_client.start_block(SessionState::Propose, next_block_height).await.unwrap();
let txs = l1_provider_client.get_txs(n_txs, next_block_height).await.unwrap();
assert!(txs.is_empty());

// Sleep at least one second more than the cooldown to make sure we are not failing due to
// fractional seconds.
tokio::time::sleep(TIMELOCK_DURATION + Duration::from_secs(1)).await;

// Should no longer be able to validate.
l1_provider_client.start_block(SessionState::Validate, next_block_height).await.unwrap();
assert_eq!(
l1_provider_client.validate(l2_hash, next_block_height).await.unwrap(),
ValidationStatus::Invalid(InvalidValidationStatus::CancelledOnL2)
);

// Still cannot propose.
l1_provider_client.start_block(SessionState::Propose, next_block_height).await.unwrap();
let txs = l1_provider_client.get_txs(n_txs, next_block_height).await.unwrap();
assert!(txs.is_empty());

// Cancellation on L2 is finished, we no longer propose or validate.
// Must check the snapshot only AFTER we try to validate, since that triggers an update of the
// record state.
let snapshot = l1_provider_client.get_l1_provider_snapshot().await.unwrap();
assert!(!snapshot.cancellation_started_on_l2.contains(&l2_hash));
assert!(snapshot.cancelled_on_l2.contains(&l2_hash));
assert_eq!(snapshot.number_of_txs_in_records, 1);

send_cancellation_finalization(&base_layer, CALL_DATA, nonce).await;

// Sleep at least one second more than the cooldown to make sure we are not failing due to
// fractional seconds.
tokio::time::sleep(COOLDOWN_DURATION + Duration::from_secs(1)).await;

// TODO(guyn): check that the event gets deleted, after we add that functionality.

// Check that the scraper and provider are still working.
let (new_l2_hash, _nonce) = send_message_from_l1_to_l2(&base_layer, CALL_DATA_2).await;
assert_ne!(new_l2_hash, l2_hash);

// Wait for another scraping.
tokio::time::sleep(COOLDOWN_DURATION + Duration::from_secs(1)).await;

// Check that we can validate this message.
l1_provider_client.start_block(SessionState::Validate, next_block_height).await.unwrap();
assert_eq!(
l1_provider_client.validate(new_l2_hash, next_block_height).await.unwrap(),
ValidationStatus::Validated
);

// The first tx is still cancelled.
// TODO(guyn): after we implement cancellation deletion we should update this to "not found".
assert_eq!(
l1_provider_client.validate(l2_hash, next_block_height).await.unwrap(),
ValidationStatus::Invalid(InvalidValidationStatus::CancelledOnL2)
);
}
57 changes: 57 additions & 0 deletions crates/apollo_l1_provider/tests/flow_test_one_tx.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
mod utils;
use std::time::Duration;

use apollo_l1_provider_types::{L1ProviderClient, SessionState, ValidationStatus};
use starknet_api::block::BlockNumber;
use utils::{
send_message_from_l1_to_l2,
setup_anvil_base_layer,
setup_scraper_and_provider,
CALL_DATA,
COOLDOWN_DURATION,
TARGET_L2_HEIGHT,
};

#[tokio::test]
async fn new_l1_handler_tx_propose_validate_cooldown() {
// Setup.

// Setup the base layer.
let base_layer = setup_anvil_base_layer().await;

let (l2_hash, _nonce) = send_message_from_l1_to_l2(&base_layer, CALL_DATA).await;

let l1_provider_client = setup_scraper_and_provider(&base_layer).await;

// Test.
let next_block_height = BlockNumber(TARGET_L2_HEIGHT.0 + 1);

// Check that we can validate this message even though no time has passed.
l1_provider_client.start_block(SessionState::Validate, next_block_height).await.unwrap();
assert_eq!(
l1_provider_client.validate(l2_hash, next_block_height).await.unwrap(),
ValidationStatus::Validated
);

// Test that we do not propose anything before the cooldown is over.
l1_provider_client.start_block(SessionState::Propose, next_block_height).await.unwrap();
let n_txs = 1;
let txs = l1_provider_client.get_txs(n_txs, next_block_height).await.unwrap();
assert!(txs.is_empty());

// Sleep at least one second more than the cooldown to make sure we are not failing due to
// fractional seconds.
tokio::time::sleep(COOLDOWN_DURATION + Duration::from_secs(1)).await;

// Test that we propose after the cooldown is over.
l1_provider_client.start_block(SessionState::Propose, next_block_height).await.unwrap();
let txs = l1_provider_client.get_txs(n_txs, next_block_height).await.unwrap();
assert!(!txs.is_empty());

// Check that we can validate this message after the cooldown, too.
l1_provider_client.start_block(SessionState::Validate, next_block_height).await.unwrap();
assert_eq!(
l1_provider_client.validate(l2_hash, next_block_height).await.unwrap(),
ValidationStatus::Validated
);
}
Loading