Skip to content

feat(l2): implement batch endpoint #3374

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 22 commits into from
Jul 1, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions cmd/ethrex/l2/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -442,6 +442,8 @@ impl Command {
deposit_logs_hash: H256::zero(),
message_hashes,
blobs_bundle: BlobsBundle::empty(),
commit_tx: None,
verify_tx: None,
};

// Store batch info in L2 storage
Expand Down
7 changes: 6 additions & 1 deletion crates/common/types/batch.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,19 @@
use serde::Serialize;

use crate::H256;

use super::BlobsBundle;

#[derive(Clone)]
#[derive(Clone, Serialize)]
pub struct Batch {
pub number: u64,
pub first_block: u64,
pub last_block: u64,
pub state_root: H256,
pub deposit_logs_hash: H256,
pub message_hashes: Vec<H256>,
#[serde(skip_serializing)]
pub blobs_bundle: BlobsBundle,
pub commit_tx: Option<H256>,
pub verify_tx: Option<H256>,
}
126 changes: 97 additions & 29 deletions crates/l2/based/block_fetcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -203,44 +203,27 @@ async fn fetch(state: &mut BlockFetcherState) -> Result<(), BlockFetcherError> {
"Node is {l2_batches_behind} batches behind. Last batch number known: {last_l2_batch_number_known}, last committed batch number: {last_l2_committed_batch_number}"
);

let batch_committed_logs = get_logs(state).await?;
let (batch_committed_logs, batch_verified_logs) = get_logs(state).await?;

let mut missing_batches_logs =
filter_logs(&batch_committed_logs, last_l2_batch_number_known).await?;

missing_batches_logs.sort_by_key(|(_log, batch_number)| *batch_number);

for (batch_committed_log, batch_number) in missing_batches_logs {
let batch_commit_tx_calldata = state
.eth_client
.get_transaction_by_hash(batch_committed_log.transaction_hash)
.await?
.ok_or(BlockFetcherError::InternalError(format!(
"Failed to get the receipt for transaction {:x}",
batch_committed_log.transaction_hash
)))?
.data;

let batch = decode_batch_from_calldata(&batch_commit_tx_calldata)?;

store_batch(state, &batch).await?;

seal_batch(state, &batch, batch_number).await?;
}
process_committed_logs(batch_committed_logs, state, last_l2_batch_number_known).await?;
process_verified_logs(batch_verified_logs, state).await?;
}

info!("Node is up to date");

Ok(())
}

/// Fetch logs from the L1 chain for the BatchCommitted event.
/// Fetch logs from the L1 chain for the BatchCommitted and BatchVerified events.
/// This function fetches logs, starting from the last fetched block number (aka the last block that was processed)
/// and going up to the current block number.
async fn get_logs(state: &mut BlockFetcherState) -> Result<Vec<RpcLog>, BlockFetcherError> {
async fn get_logs(
state: &mut BlockFetcherState,
) -> Result<(Vec<RpcLog>, Vec<RpcLog>), BlockFetcherError> {
let last_l1_block_number = state.eth_client.get_block_number().await?;

let mut batch_committed_logs = Vec::new();
let mut batch_verified_logs = Vec::new();
while state.last_l1_block_fetched < last_l1_block_number {
let new_last_l1_fetched_block = min(
state.last_l1_block_fetched + state.fetch_block_step,
Expand All @@ -254,7 +237,7 @@ async fn get_logs(state: &mut BlockFetcherState) -> Result<Vec<RpcLog>, BlockFet
);

// Fetch logs from the L1 chain for the BatchCommitted event.
let logs = state
let committed_logs = state
.eth_client
.get_logs(
state.last_l1_block_fetched + 1,
Expand All @@ -264,13 +247,64 @@ async fn get_logs(state: &mut BlockFetcherState) -> Result<Vec<RpcLog>, BlockFet
)
.await?;

// Fetch logs from the L1 chain for the BatchVerified event.
let verified_logs = state
.eth_client
.get_logs(
state.last_l1_block_fetched + 1,
new_last_l1_fetched_block,
state.on_chain_proposer_address,
keccak(b"BatchVerified(uint256)"),
)
.await?;

// Update the last L1 block fetched.
state.last_l1_block_fetched = new_last_l1_fetched_block;

batch_committed_logs.extend_from_slice(&logs);
batch_committed_logs.extend_from_slice(&committed_logs);
batch_verified_logs.extend_from_slice(&verified_logs);
}

Ok(batch_committed_logs)
Ok((batch_committed_logs, batch_verified_logs))
}

/// Process the logs from the event `BatchCommitted`.
/// Gets the committed batches that are missing in the local store from the logs,
/// and seals the batch in the rollup store.
async fn process_committed_logs(
batch_committed_logs: Vec<RpcLog>,
state: &mut BlockFetcherState,
last_l2_batch_number_known: u64,
) -> Result<(), BlockFetcherError> {
let mut missing_batches_logs =
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider returning a BTreeMap<BatchNumber, Log> instead of a Vec with tuples if possible. This will save us the call to sorting here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This part is simply a refactor to also process verified_logs. I just copied and pasted what was done before. We can review this in the future.

filter_logs(&batch_committed_logs, last_l2_batch_number_known).await?;

missing_batches_logs.sort_by_key(|(_log, batch_number)| *batch_number);

for (batch_committed_log, batch_number) in missing_batches_logs {
let batch_commit_tx_calldata = state
.eth_client
.get_transaction_by_hash(batch_committed_log.transaction_hash)
.await?
.ok_or(BlockFetcherError::InternalError(format!(
"Failed to get the receipt for transaction {:x}",
batch_committed_log.transaction_hash
)))?
.data;

let batch = decode_batch_from_calldata(&batch_commit_tx_calldata)?;

store_batch(state, &batch).await?;

seal_batch(
state,
&batch,
batch_number,
batch_committed_log.transaction_hash,
)
.await?;
}
Ok(())
}

/// Given the logs from the event `BatchCommitted`,
Expand Down Expand Up @@ -391,8 +425,9 @@ async fn seal_batch(
state: &mut BlockFetcherState,
batch: &[Block],
batch_number: U256,
commit_tx: H256,
) -> Result<(), BlockFetcherError> {
let batch = get_batch(state, batch, batch_number).await?;
let batch = get_batch(state, batch, batch_number, commit_tx).await?;

state.rollup_store.seal_batch(batch).await?;

Expand Down Expand Up @@ -453,6 +488,7 @@ async fn get_batch(
state: &mut BlockFetcherState,
batch: &[Block],
batch_number: U256,
commit_tx: H256,
) -> Result<Batch, BlockFetcherError> {
let deposits: Vec<PrivilegedL2Transaction> = batch
.iter()
Expand Down Expand Up @@ -539,5 +575,37 @@ async fn get_batch(
deposit_logs_hash,
message_hashes: get_batch_message_hashes(state, batch).await?,
blobs_bundle,
commit_tx: Some(commit_tx),
verify_tx: None,
})
}

/// Process the logs from the event `BatchVerified`.
/// Gets the batch number from the logs and stores the verify transaction hash in the rollup store
async fn process_verified_logs(
batch_verified_logs: Vec<RpcLog>,
state: &mut BlockFetcherState,
) -> Result<(), BlockFetcherError> {
for batch_verified_log in batch_verified_logs {
let batch_number = U256::from_big_endian(
batch_verified_log
.log
.topics
.get(1)
.ok_or(BlockFetcherError::InternalError(
"Failed to get verified batch number from BatchVerified log".to_string(),
))?
.as_bytes(),
);

let verify_tx_hash = batch_verified_log.transaction_hash;

state
.rollup_store
.store_verify_tx_by_batch(batch_number.as_u64(), verify_tx_hash)
.await?;

info!("Stored verify transaction hash {verify_tx_hash:#x} for batch {batch_number}");
}
Ok(())
}
102 changes: 102 additions & 0 deletions crates/l2/networking/rpc/l2/batch.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
use ethrex_common::types::{BlockHash, batch::Batch};
use ethrex_storage::Store;
use serde::Serialize;
use serde_json::Value;
use tracing::info;

use crate::{
rpc::{RpcApiContext, RpcHandler},
utils::RpcErr,
};

#[derive(Serialize)]
pub struct RpcBatch {
#[serde(flatten)]
pub batch: Batch,
#[serde(skip_serializing_if = "Option::is_none")]
pub block_hashes: Option<Vec<BlockHash>>,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If block hashes can be none, why not simply use an empty vec?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The block_hashes field is optional. You can choose whether to request them using a boolean parameter, similar to this. IMO, an empty vec could indicate that the batch is empty.

}

impl RpcBatch {
pub async fn build(batch: Batch, block_hashes: bool, store: &Store) -> Result<Self, RpcErr> {
let block_hashes = if block_hashes {
Some(get_block_hashes(
batch.first_block,
batch.last_block,
store,
)?)
} else {
None
};

Ok(RpcBatch {
batch,
block_hashes,
})
}
}

fn get_block_hashes(
first_block: u64,
last_block: u64,
store: &Store,
) -> Result<Vec<BlockHash>, RpcErr> {
let mut block_hashes = Vec::new();
for block_number in first_block..=last_block {
let header = store
.get_block_header(block_number)?
.ok_or(RpcErr::Internal(format!(
"Failed to retrieve block header for block number {block_number}"
)))?;
let hash = header.hash();
block_hashes.push(hash);
}
Ok(block_hashes)
}

pub struct GetBatchByBatchNumberRequest {
pub batch_number: u64,
pub block_hashes: bool,
}

impl RpcHandler for GetBatchByBatchNumberRequest {
fn parse(params: &Option<Vec<Value>>) -> Result<GetBatchByBatchNumberRequest, RpcErr> {
let params = params.as_ref().ok_or(ethrex_rpc::RpcErr::BadParams(
"No params provided".to_owned(),
))?;
if params.len() != 2 {
return Err(ethrex_rpc::RpcErr::BadParams(
"Expected 2 params".to_owned(),
))?;
};
// Parse BatchNumber
let hex_str = serde_json::from_value::<String>(params[0].clone())
.map_err(|e| ethrex_rpc::RpcErr::BadParams(e.to_string()))?;

// Check that the BatchNumber is 0x prefixed
let hex_str = hex_str
.strip_prefix("0x")
.ok_or(ethrex_rpc::RpcErr::BadHexFormat(0))?;

// Parse hex string
let batch_number =
u64::from_str_radix(hex_str, 16).map_err(|_| ethrex_rpc::RpcErr::BadHexFormat(0))?;

let block_hashes = serde_json::from_value(params[1].clone())?;

Ok(GetBatchByBatchNumberRequest {
batch_number,
block_hashes,
})
}

async fn handle(&self, context: RpcApiContext) -> Result<Value, RpcErr> {
info!("Requested batch with number: {}", self.batch_number);
let Some(batch) = context.rollup_store.get_batch(self.batch_number).await? else {
return Ok(Value::Null);
};
let rpc_batch = RpcBatch::build(batch, self.block_hashes, &context.l1_ctx.storage).await?;

serde_json::to_value(&rpc_batch).map_err(|error| RpcErr::Internal(error.to_string()))
}
}
1 change: 1 addition & 0 deletions crates/l2/networking/rpc/l2/mod.rs
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
pub mod batch;
pub mod l1_message;
pub mod transaction;
2 changes: 2 additions & 0 deletions crates/l2/networking/rpc/rpc.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use crate::l2::batch::GetBatchByBatchNumberRequest;
use crate::l2::l1_message::GetL1MessageProof;
use crate::utils::{RpcErr, RpcNamespace, resolve_namespace};
use axum::extract::State;
Expand Down Expand Up @@ -207,6 +208,7 @@ pub async fn map_l2_requests(req: &RpcRequest, context: RpcApiContext) -> Result
match req.method.as_str() {
"ethrex_sendTransaction" => SponsoredTx::call(req, context).await,
"ethrex_getMessageProof" => GetL1MessageProof::call(req, context).await,
"ethrex_getBatchByNumber" => GetBatchByBatchNumberRequest::call(req, context).await,
unknown_ethrex_l2_method => {
Err(ethrex_rpc::RpcErr::MethodNotFound(unknown_ethrex_l2_method.to_owned()).into())
}
Expand Down
7 changes: 7 additions & 0 deletions crates/l2/sequencer/l1_committer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,8 @@ async fn commit_next_batch_to_l1(state: &mut CommitterState) -> Result<(), Commi
deposit_logs_hash,
message_hashes,
blobs_bundle,
commit_tx: None,
verify_tx: None,
};

state.rollup_store.seal_batch(batch.clone()).await?;
Expand Down Expand Up @@ -252,6 +254,11 @@ async fn commit_next_batch_to_l1(state: &mut CommitterState) -> Result<(), Commi
});
);

state
.rollup_store
.store_commit_tx_by_batch(batch.number, commit_tx_hash)
.await?;

info!(
"Commitment sent for batch {}, with tx hash {commit_tx_hash:#x}.",
batch.number
Expand Down
5 changes: 5 additions & 0 deletions crates/l2/sequencer/l1_proof_sender.rs
Original file line number Diff line number Diff line change
Expand Up @@ -343,6 +343,11 @@ pub async fn send_proof_to_contract(
)
.await?;

state
.rollup_store
.store_verify_tx_by_batch(batch_number, verify_tx_hash)
.await?;

info!(
?batch_number,
?verify_tx_hash,
Expand Down
8 changes: 8 additions & 0 deletions crates/l2/sequencer/l1_proof_verifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,14 @@ impl L1ProofVerifier {
)
.await?;

// Store the verify transaction hash for each batch that was aggregated.
for i in 0..aggregated_proofs_count {
let batch_number = first_batch_number + i;
self.rollup_store
.store_verify_tx_by_batch(batch_number, verify_tx_hash)
.await?;
}

Ok(Some(verify_tx_hash))
Comment on lines +188 to 196
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be better to create a function that takes a range of batch numbers and stores them all in a single transaction

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Created #3432!

}

Expand Down
Loading
Loading