diff --git a/AGENTS.md b/AGENTS.md index dcf787e..ade6071 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -77,6 +77,7 @@ Primary objective in this phase: make sequencer behavior, safety checks, and per - Storage model is append-oriented; avoid mutable status flags for open/closed entities. - Open batch/frame are derived by “latest row” convention. - A frame’s leading direct-input prefix is derivable from `sequenced_l2_txs` plus `frames.safe_block`. +- `direct_inputs` contains only L1 app direct input **bodies**. InputBox payload first byte: **0x00** = direct input (tag stripped, body stored and executed), **0x01** = batch submission (for scheduler, not stored), **others** = discarded (invalid/garbage). The input reader only accepts 0x00-tagged payloads and stores `payload[1..]`. - Safe cursor/head values should be derived from persisted facts when possible, not duplicated as mutable fields. - Replay/catch-up must use persisted ordering plus persisted frame fee (`frames.fee`) to mirror inclusion semantics. - Included user-op identity is constrained by `UNIQUE(sender, nonce)`. diff --git a/Cargo.lock b/Cargo.lock index b8f554c..96b22cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3765,10 +3765,12 @@ name = "sequencer" version = "0.1.0" dependencies = [ "alloy", + "alloy-network-primitives", "alloy-primitives", "alloy-sol-types", "app-core", "async-recursion", + "async-trait", "axum", "cartesi-rollups-contracts", "clap", diff --git a/benchmarks/src/runtime.rs b/benchmarks/src/runtime.rs index 84f06ec..be69303 100644 --- a/benchmarks/src/runtime.rs +++ b/benchmarks/src/runtime.rs @@ -70,6 +70,9 @@ impl ManagedSequencer { .open(log_path.as_path())?; let stderr_log = stdout_log.try_clone()?; + let batch_submitter_key = default_private_keys().first().cloned().unwrap_or_else(|| { + "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80".to_string() + }); let mut child = Command::new(config.sequencer_bin.as_str()) .arg("--http-addr") .arg(http_addr) @@ -81,6 +84,8 @@ impl ManagedSequencer { .arg(domain.chain_id.to_string()) .arg("--domain-verifying-contract") .arg(domain.verifying_contract.to_string()) + .arg("--batch-submitter-private-key") + .arg(&batch_submitter_key) .env("RUST_LOG", DEFAULT_SEQUENCER_RUST_LOG) .stdout(Stdio::from(stdout_log)) .stderr(Stdio::from(stderr_log)) diff --git a/examples/app-core/src/application/wallet.rs b/examples/app-core/src/application/wallet.rs index e051a60..35e9bec 100644 --- a/examples/app-core/src/application/wallet.rs +++ b/examples/app-core/src/application/wallet.rs @@ -155,7 +155,10 @@ impl Application for WalletApp { Ok(()) } - fn execute_direct_input(&mut self, _payload: &[u8]) -> Result<(), AppError> { + fn execute_direct_input( + &mut self, + _input: &sequencer_core::l2_tx::DirectInput, + ) -> Result<(), AppError> { self.executed_input_count = self.executed_input_count.saturating_add(1); Ok(()) } diff --git a/examples/canonical-app/src/scheduler/core.rs b/examples/canonical-app/src/scheduler/core.rs index 213bac5..832f566 100644 --- a/examples/canonical-app/src/scheduler/core.rs +++ b/examples/canonical-app/src/scheduler/core.rs @@ -6,6 +6,7 @@ use alloy_sol_types::Eip712Domain; use alloy_sol_types::SolStruct; use sequencer_core::application::Application; use sequencer_core::batch::{Batch, Frame, WireUserOp}; +use sequencer_core::l2_tx::DirectInput; use std::collections::VecDeque; pub const SEQUENCER_ADDRESS: Address = address!("0x1111111111111111111111111111111111111111"); @@ -46,10 +47,12 @@ pub struct Scheduler { app: A, config: SchedulerConfig, direct_q: VecDeque, + next_expected_batch_nonce: u64, } #[derive(Debug, Clone, PartialEq, Eq)] struct QueuedDirectInput { + sender: Address, payload: Vec, inclusion_block: u64, } @@ -60,6 +63,7 @@ impl Scheduler { app, config, direct_q: VecDeque::new(), + next_expected_batch_nonce: 0, } } @@ -74,6 +78,7 @@ impl Scheduler { if input.sender != self.config.sequencer_address { self.direct_q.push_back(QueuedDirectInput { + sender: input.sender, payload: input.payload, inclusion_block: input.inclusion_block, }); @@ -93,7 +98,12 @@ impl Scheduler { return ProcessOutcome::BatchInvalid; }; + if batch.nonce != self.next_expected_batch_nonce { + return ProcessOutcome::BatchInvalid; + } + let Some((frame_head, frame_tail)) = batch.frames.split_first() else { + self.next_expected_batch_nonce = batch.nonce + 1; return ProcessOutcome::BatchExecuted; }; @@ -115,6 +125,7 @@ impl Scheduler { self.execute_frame_user_ops(domain, frame); } + self.next_expected_batch_nonce = batch.nonce + 1; ProcessOutcome::BatchExecuted } @@ -171,7 +182,12 @@ impl Scheduler { break; } let queued = self.direct_q.pop_front().expect("queue front must exist"); - if let Err(err) = self.app.execute_direct_input(queued.payload.as_slice()) { + let input = DirectInput { + sender: queued.sender, + block_number: queued.inclusion_block, + payload: queued.payload, + }; + if let Err(err) = self.app.execute_direct_input(&input) { eprintln!("scheduler failed to execute drained direct input: {err}"); } } @@ -184,7 +200,12 @@ impl Scheduler { self.config.max_wait_blocks, current_block, ) { - let status = self.app.execute_direct_input(front.payload.as_slice()); + let input = DirectInput { + sender: front.sender, + block_number: front.inclusion_block, + payload: front.payload.clone(), + }; + let status = self.app.execute_direct_input(&input); if let Err(err) = status { eprintln!("scheduler failed to execute overdue direct input: {err}"); } @@ -332,9 +353,9 @@ mod tests { fn execute_direct_input( &mut self, - payload: &[u8], + input: &DirectInput, ) -> Result<(), sequencer_core::application::AppError> { - let marker = payload.first().copied().unwrap_or(0); + let marker = input.payload.first().copied().unwrap_or(0); self.executed.push(RecordedTx::Direct(marker)); Ok(()) } @@ -411,7 +432,7 @@ mod tests { } #[test] - fn batch_drains_safe_directs_before_executing_user_ops() { + fn batch_drains_safe_inputs_before_executing_user_ops() { let mut scheduler = Scheduler::new( RecordingApp::default(), SchedulerConfig { @@ -428,6 +449,7 @@ mod tests { let signing_key = SigningKey::from_bytes((&[1_u8; 32]).into()).expect("signing key"); let batch = Batch { + nonce: 0, frames: vec![Frame { user_ops: vec![sign_wire_user_op( &test_domain(), @@ -465,6 +487,7 @@ mod tests { scheduler.process_input(direct_input(1, 1)); let signing_key = SigningKey::from_bytes((&[2_u8; 32]).into()).expect("signing key"); let batch = Batch { + nonce: 0, frames: vec![Frame { user_ops: vec![sign_wire_user_op( &test_domain(), @@ -498,6 +521,7 @@ mod tests { scheduler.process_input(direct_input(1, 9)); let signing_key = SigningKey::from_bytes((&[3_u8; 32]).into()).expect("signing key"); let stale_batch = Batch { + nonce: 0, frames: vec![Frame { user_ops: vec![sign_wire_user_op( &test_domain(), @@ -529,6 +553,7 @@ mod tests { let signing_key_a = SigningKey::from_bytes((&[4_u8; 32]).into()).expect("signing key a"); let signing_key_b = SigningKey::from_bytes((&[5_u8; 32]).into()).expect("signing key b"); let invalid = Batch { + nonce: 0, frames: vec![ Frame { user_ops: vec![sign_wire_user_op( @@ -574,6 +599,7 @@ mod tests { let signing_key = SigningKey::from_bytes((&[6_u8; 32]).into()).expect("signing key"); let invalid = Batch { + nonce: 0, frames: vec![Frame { user_ops: vec![sign_wire_user_op( &test_domain(), @@ -607,6 +633,7 @@ mod tests { scheduler.process_input(direct_input(10, 1)); scheduler.process_input(direct_input(11, 2)); let batch = Batch { + nonce: 0, frames: vec![Frame { user_ops: vec![], safe_block: 10, @@ -679,6 +706,7 @@ mod tests { ); let batch = Batch { + nonce: 0, frames: vec![Frame { user_ops: vec![WireUserOp { nonce: 0, @@ -717,6 +745,7 @@ mod tests { let valid = sign_wire_user_op(&test_domain(), &signing_key, 0, 10, vec![4]); let batch = Batch { + nonce: 0, frames: vec![ Frame { user_ops: vec![bad_nonce], @@ -758,7 +787,10 @@ mod tests { }, ); - let batch = Batch { frames: vec![] }; + let batch = Batch { + nonce: 0, + frames: vec![], + }; assert_eq!( scheduler.process_input(batch_input(10, batch)), @@ -784,6 +816,7 @@ mod tests { address!("0x3333333333333333333333333333333333333333"), ); let batch = Batch { + nonce: 0, frames: vec![Frame { user_ops: vec![sign_wire_user_op( &batch_domain, diff --git a/justfile b/justfile index 9333e51..3992a23 100644 --- a/justfile +++ b/justfile @@ -12,10 +12,12 @@ check-all-targets: test: cargo test --workspace +# Run sequencer tests sequentially so partition static config (init) is not shared across parallel tests. test-sequencer: - cargo test -p sequencer --lib + cargo test -p sequencer --lib -- --test-threads=1 cargo test -p sequencer --test e2e_sequencer -- --test-threads=1 cargo test -p sequencer --test ws_broadcaster -- --test-threads=1 + cargo test -p sequencer --test batch_submitter_integration -- --test-threads=1 bench target="all": just -f benchmarks/justfile {{target}} diff --git a/sequencer-core/src/application/mod.rs b/sequencer-core/src/application/mod.rs index 150b624..a2f7e84 100644 --- a/sequencer-core/src/application/mod.rs +++ b/sequencer-core/src/application/mod.rs @@ -1,6 +1,7 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use crate::l2_tx::DirectInput; use crate::l2_tx::ValidUserOp; use crate::user_op::UserOp; use alloy_primitives::{Address, U256}; @@ -92,7 +93,7 @@ pub trait Application: Send { Ok(ExecutionOutcome::Included) } - fn execute_direct_input(&mut self, _payload: &[u8]) -> Result<(), AppError> { + fn execute_direct_input(&mut self, _input: &DirectInput) -> Result<(), AppError> { Ok(()) } diff --git a/sequencer-core/src/batch.rs b/sequencer-core/src/batch.rs index b126352..1bd4725 100644 --- a/sequencer-core/src/batch.rs +++ b/sequencer-core/src/batch.rs @@ -4,8 +4,17 @@ use crate::user_op::UserOp; use ssz_derive::{Decode, Encode}; +/// Tag byte for InputBox payloads that are L1 app direct inputs (e.g. deposits). +/// L1/app must post such inputs as `0x00 || body`. Only these are stored (body only) and executed. +pub const INPUT_TAG_DIRECT_INPUT: u8 = 0x00; + +/// Batch submissions are sent as raw `ssz(Batch)` with no tag; classification at L1 is by +/// attempting SSZ decode, and at the rollup by msg_sender. + #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] pub struct Batch { + /// Batch index (nonce) for deduplication and ordering at the scheduler. + pub nonce: u64, pub frames: Vec, } @@ -35,3 +44,24 @@ impl WireUserOp { } } } + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BatchForSubmission { + pub batch_index: u64, + pub created_at_ms: u64, + pub batch: Batch, +} + +impl BatchForSubmission { + /// Encode the batch for the scheduler as a single SSZ payload. + /// + /// Payload is `ssz(Batch { nonce: batch_index, frames })`. The scheduler decodes this + /// and uses `batch.nonce` for deduplication; classification at the rollup is by msg_sender. + pub fn encode_for_scheduler(&self) -> Vec { + let batch = Batch { + nonce: self.batch_index, + frames: self.batch.frames.clone(), + }; + ssz::Encode::as_ssz_bytes(&batch) + } +} diff --git a/sequencer-core/src/broadcast.rs b/sequencer-core/src/broadcast.rs index 84f92f4..9c11abd 100644 --- a/sequencer-core/src/broadcast.rs +++ b/sequencer-core/src/broadcast.rs @@ -15,6 +15,8 @@ pub enum BroadcastTxMessage { }, DirectInput { offset: u64, + sender: String, + block_number: u64, payload: String, }, } @@ -37,6 +39,8 @@ impl BroadcastTxMessage { }, SequencedL2Tx::Direct(direct) => Self::DirectInput { offset, + sender: direct.sender.to_string(), + block_number: direct.block_number, payload: alloy_primitives::hex::encode_prefixed(direct.payload.as_slice()), }, } diff --git a/sequencer-core/src/l2_tx.rs b/sequencer-core/src/l2_tx.rs index 7a2a63c..02452cd 100644 --- a/sequencer-core/src/l2_tx.rs +++ b/sequencer-core/src/l2_tx.rs @@ -5,6 +5,8 @@ use alloy_primitives::Address; #[derive(Debug, Clone, PartialEq, Eq)] pub struct DirectInput { + pub sender: Address, + pub block_number: u64, pub payload: Vec, } diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 4343aa4..7edb129 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -23,13 +23,15 @@ rusqlite = { version = "0.38.0", features = ["bundled"] } rusqlite_migration = "2.3.0" alloy-primitives = { version = "1.4.1", features = ["serde", "k256"] } alloy-sol-types = "1.4.1" -alloy = { version = "1.0", features = ["contract", "network", "reqwest", "rpc-types", "sol-types", "node-bindings"] } +alloy = { version = "1.0", features = ["contract", "network", "reqwest", "rpc-types", "sol-types", "node-bindings", "signer-local", "signers"] } +alloy-network-primitives = "1.7" thiserror = "1" ssz = { package = "ethereum_ssz", version = "0.10" } ssz_derive = { package = "ethereum_ssz_derive", version = "0.10" } clap = { version = "4", features = ["derive", "env"] } async-recursion = "1" cartesi-rollups-contracts = "=2.2.0" +async-trait = "0.1" [dev-dependencies] futures-util = "0.3" diff --git a/sequencer/src/api/tx.rs b/sequencer/src/api/tx.rs index 8f10dd9..dad6617 100644 --- a/sequencer/src/api/tx.rs +++ b/sequencer/src/api/tx.rs @@ -89,6 +89,7 @@ mod tests { crate::l2_tx_feed::L2TxFeedConfig { idle_poll_interval: std::time::Duration::from_millis(2), page_size: 64, + batch_submitter_address: None, }, ); diff --git a/sequencer/src/batch_submitter/batch_poster.rs b/sequencer/src/batch_submitter/batch_poster.rs new file mode 100644 index 0000000..5cdc05e --- /dev/null +++ b/sequencer/src/batch_submitter/batch_poster.rs @@ -0,0 +1,225 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use alloy::providers::Provider; +use async_trait::async_trait; +use cartesi_rollups_contracts::input_box::InputBox; +use sequencer_core::batch::Batch; +use thiserror::Error; + +use crate::partition::{decode_evm_advance_input, get_input_added_events}; + +pub type TxHash = alloy_primitives::B256; + +#[derive(Debug, Clone)] +pub struct BatchPosterConfig { + pub l1_submit_address: alloy_primitives::Address, + pub app_address: alloy_primitives::Address, + pub batch_submitter_address: alloy_primitives::Address, + pub start_block: u64, + pub confirmation_depth: u64, + /// Error codes that trigger `get_logs` retries with a shorter block range. + pub long_block_range_error_codes: Vec, +} + +#[derive(Debug, Error)] +pub enum BatchPosterError { + #[error("provider/transport: {0}")] + Provider(String), +} + +#[async_trait] +pub trait BatchPoster: Send + Sync { + async fn submit_batch(&self, payload: Vec) -> Result; + + async fn observed_submitted_batch_nonces( + &self, + from_block: u64, + ) -> Result, BatchPosterError>; +} + +#[derive(Clone)] +pub struct EthereumBatchPoster { + provider: P, + config: BatchPosterConfig, +} + +impl

EthereumBatchPoster

+where + P: Provider + Send + Sync + Clone + 'static, +{ + pub fn new(provider: P, config: BatchPosterConfig) -> Self { + Self { provider, config } + } +} + +#[async_trait] +impl

BatchPoster for EthereumBatchPoster

+where + P: Provider + Send + Sync + Clone + 'static, +{ + async fn submit_batch(&self, payload: Vec) -> Result { + let input_box = InputBox::new(self.config.l1_submit_address, &self.provider); + let pending = input_box + .addInput(self.config.app_address, payload.into()) + .send() + .await + .map_err(|err| BatchPosterError::Provider(err.to_string()))?; + let tx_hash = *pending.tx_hash(); + + pending + .with_required_confirmations(self.config.confirmation_depth.saturating_add(1)) + .watch() + .await + .map_err(|err| BatchPosterError::Provider(err.to_string()))?; + + Ok(tx_hash) + } + + async fn observed_submitted_batch_nonces( + &self, + from_block: u64, + ) -> Result, BatchPosterError> { + let latest = self + .provider + .get_block_number() + .await + .map_err(|err| BatchPosterError::Provider(err.to_string()))?; + let end_block = latest.saturating_sub(self.config.confirmation_depth); + let start_block = from_block.max(self.config.start_block); + if start_block > end_block { + return Ok(Vec::new()); + } + + let events = get_input_added_events( + &self.provider, + self.config.app_address, + &self.config.l1_submit_address, + start_block, + end_block, + self.config.long_block_range_error_codes.as_slice(), + ) + .await + .map_err(|errs| { + BatchPosterError::Provider( + errs.into_iter() + .next() + .map(|e| e.to_string()) + .unwrap_or_default(), + ) + })?; + + let mut observed_nonces = Vec::new(); + for (event, _log) in events { + let evm_advance = decode_evm_advance_input(event.input.as_ref()) + .map_err(BatchPosterError::Provider)?; + if evm_advance.msgSender != self.config.batch_submitter_address { + continue; + } + let batch: Batch = ssz::Decode::from_ssz_bytes(evm_advance.payload.as_ref()) + .map_err(|err| BatchPosterError::Provider(format!("{err:?}")))?; + observed_nonces.push(batch.nonce); + } + + Ok(observed_nonces) + } +} + +#[cfg(test)] +pub(crate) mod mock { + use super::{Batch, BatchPoster, BatchPosterError, TxHash}; + use async_trait::async_trait; + use std::sync::Mutex; + + #[derive(Debug)] + pub struct MockBatchPoster { + pub submissions: Mutex>, + pub fail_submit: Mutex, + pub observed_submitted_nonces: Mutex>, + pub observed_submitted_error: Mutex>, + pub last_from_block: Mutex>, + } + + impl MockBatchPoster { + pub fn new() -> Self { + Self { + submissions: Mutex::new(Vec::new()), + fail_submit: Mutex::new(false), + observed_submitted_nonces: Mutex::new(Vec::new()), + observed_submitted_error: Mutex::new(None), + last_from_block: Mutex::new(None), + } + } + + pub fn submissions(&self) -> Vec<(u64, usize)> { + self.submissions.lock().expect("lock").clone() + } + + pub fn set_observed_submitted_nonces(&self, value: Vec) { + *self.observed_submitted_nonces.lock().expect("lock") = value; + } + + pub fn set_observed_submitted_error(&self, value: Option<&str>) { + *self.observed_submitted_error.lock().expect("lock") = value.map(str::to_string); + } + + pub fn last_from_block(&self) -> Option { + *self.last_from_block.lock().expect("lock") + } + } + + #[async_trait] + impl BatchPoster for MockBatchPoster { + async fn submit_batch(&self, payload: Vec) -> Result { + if *self.fail_submit.lock().expect("lock") { + return Err(BatchPosterError::Provider("mock submit fail".into())); + } + let batch_index = ssz::Decode::from_ssz_bytes(payload.as_ref()) + .map(|b: Batch| b.nonce) + .unwrap_or(0); + self.submissions + .lock() + .expect("lock") + .push((batch_index, payload.len())); + Ok(TxHash::ZERO) + } + + async fn observed_submitted_batch_nonces( + &self, + from_block: u64, + ) -> Result, BatchPosterError> { + *self.last_from_block.lock().expect("lock") = Some(from_block); + if let Some(err) = self.observed_submitted_error.lock().expect("lock").clone() { + return Err(BatchPosterError::Provider(err)); + } + let configured = self.observed_submitted_nonces.lock().expect("lock").clone(); + if !configured.is_empty() { + return Ok(configured); + } + Ok(self + .submissions + .lock() + .expect("lock") + .iter() + .map(|(idx, _)| *idx) + .collect()) + } + } +} + +#[cfg(test)] +mod tests { + use super::{BatchPoster, mock::MockBatchPoster}; + + #[tokio::test] + async fn mock_poster_tracks_requested_suffix_start_block() { + let poster = MockBatchPoster::new(); + let observed = poster + .observed_submitted_batch_nonces(42) + .await + .expect("observe submitted batches"); + + assert!(observed.is_empty()); + assert_eq!(poster.last_from_block(), Some(42)); + } +} diff --git a/sequencer/src/batch_submitter/config.rs b/sequencer/src/batch_submitter/config.rs new file mode 100644 index 0000000..6b0fd48 --- /dev/null +++ b/sequencer/src/batch_submitter/config.rs @@ -0,0 +1,19 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use std::time::Duration; + +/// Batch-submitter-specific options. L1 RPC URL and InputBox address are shared with the +/// input reader and come from the same discovery at startup (see `L1Config` in `config`). +/// These fields are parsed as part of `RunConfig` and passed through at runtime. +#[derive(Debug, Clone)] +pub struct BatchSubmitterConfig { + /// How often the submitter polls for new work when idle. + pub idle_poll_interval_ms: u64, +} + +impl BatchSubmitterConfig { + pub fn idle_poll_interval(&self) -> Duration { + Duration::from_millis(self.idle_poll_interval_ms) + } +} diff --git a/sequencer/src/batch_submitter/mod.rs b/sequencer/src/batch_submitter/mod.rs new file mode 100644 index 0000000..7b33556 --- /dev/null +++ b/sequencer/src/batch_submitter/mod.rs @@ -0,0 +1,18 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +//! Batch submitter: posts closed batches to L1 with at-least-once semantics. +//! +//! The batch index is used as the batch nonce (id). The scheduler checks that nonces are +//! strictly increasing and invalidates otherwise, so duplicates are deduplicated at the +//! scheduler level. See `worker` for the wake → read S → compare → submit → sleep loop. + +mod batch_poster; +mod config; +mod worker; + +pub use batch_poster::{ + BatchPoster, BatchPosterConfig, BatchPosterError, EthereumBatchPoster, TxHash, +}; +pub use config::BatchSubmitterConfig; +pub use worker::{BatchSubmitter, BatchSubmitterError, TickOutcome}; diff --git a/sequencer/src/batch_submitter/worker.rs b/sequencer/src/batch_submitter/worker.rs new file mode 100644 index 0000000..b38a2ee --- /dev/null +++ b/sequencer/src/batch_submitter/worker.rs @@ -0,0 +1,389 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +//! Batch submitter worker: at-least-once submission to L1, deduplicated by the scheduler. +//! +//! The worker is intentionally stateless with respect to submitted-batch progress. +//! On each tick it derives the highest submitted batch nonce from L1, compares that +//! with locally closed batches, submits the first missing batch if any, then loops. + +use std::sync::Arc; +use std::time::Duration; + +use alloy_primitives::Address; +use sequencer_core::batch::Batch; +use thiserror::Error; +use tracing::warn; + +use crate::batch_submitter::{BatchPoster, BatchPosterError, BatchSubmitterConfig, TxHash}; +use crate::shutdown::ShutdownSignal; +use crate::storage::{Storage, StorageOpenError}; + +#[derive(Debug, Error)] +pub enum BatchSubmitterError { + #[error(transparent)] + OpenStorage(#[from] StorageOpenError), + #[error(transparent)] + Storage(#[from] rusqlite::Error), + #[error("batch submitter join error: {0}")] + Join(String), + #[error("failed to decode stored safe batch input: {0}")] + StoredBatchDecode(String), + #[error(transparent)] + Poster(#[from] BatchPosterError), +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TickOutcome { + Idle, + Submitted { batch_index: u64, tx_hash: TxHash }, +} + +pub struct BatchSubmitter { + db_path: String, + batch_submitter_address: Address, + poster: Arc

, + idle_poll_interval: Duration, + shutdown: ShutdownSignal, +} + +impl BatchSubmitter

{ + pub fn new( + db_path: impl Into, + batch_submitter_address: Address, + poster: Arc

, + shutdown: ShutdownSignal, + config: BatchSubmitterConfig, + ) -> Self { + Self { + db_path: db_path.into(), + batch_submitter_address, + poster, + idle_poll_interval: config.idle_poll_interval(), + shutdown, + } + } + + pub fn start( + self, + ) -> Result>, StorageOpenError> { + let _ = Storage::open_read_only(self.db_path.as_str())?; + Ok(tokio::spawn(async move { self.run_forever().await })) + } + + async fn run_forever(self) -> Result<(), BatchSubmitterError> { + loop { + if self.shutdown.is_shutdown_requested() { + return Ok(()); + } + + match self.tick_once().await { + Ok(TickOutcome::Submitted { .. }) => continue, + Ok(TickOutcome::Idle) => {} + Err(BatchSubmitterError::Poster(source)) => { + warn!(error = %source, "batch submitter tick failed, will retry"); + } + Err(err) => return Err(err), + } + + tokio::select! { + _ = self.shutdown.wait_for_shutdown() => return Ok(()), + _ = tokio::time::sleep(self.idle_poll_interval) => {} + } + } + } + + pub(crate) async fn tick_once(&self) -> Result { + let latest_batch_opt = self.load_latest_batch_index().await?; + let Some(latest_batch_index) = latest_batch_opt else { + return Ok(TickOutcome::Idle); + }; + + if latest_batch_index == 0 { + return Ok(TickOutcome::Idle); + } + + let last_closed = latest_batch_index - 1; + let next_expected = { + let (safe_block, safe_observed_nonces) = self.load_safe_observed_batch_nonces().await?; + let safe_next_expected = advance_expected_batch_nonce(0, safe_observed_nonces); + + let recent_observed_nonces = self + .poster + .observed_submitted_batch_nonces(safe_block.saturating_add(1)) + .await?; + advance_expected_batch_nonce(safe_next_expected, recent_observed_nonces) + }; + let latest_submitted = next_expected.checked_sub(1); + let first_to_submit = latest_submitted.map(|s| s + 1).unwrap_or(0); + if first_to_submit > last_closed { + return Ok(TickOutcome::Idle); + } + if first_to_submit < last_closed { + let pending_batches = last_closed - first_to_submit + 1; + warn!( + first_to_submit, + last_closed, pending_batches, "multiple closed batches are pending submission" + ); + } + + let batch = self.load_batch_for_submission(first_to_submit).await?; + let tx_hash = self + .poster + .submit_batch(batch.encode_for_scheduler()) + .await?; + + Ok(TickOutcome::Submitted { + batch_index: first_to_submit, + tx_hash, + }) + } + + async fn load_latest_batch_index(&self) -> Result, BatchSubmitterError> { + let db_path = self.db_path.clone(); + tokio::task::spawn_blocking(move || { + let mut storage = Storage::open_read_only(&db_path)?; + storage + .latest_batch_index() + .map_err(BatchSubmitterError::from) + }) + .await + .map_err(|err| BatchSubmitterError::Join(err.to_string()))? + } + + async fn load_safe_observed_batch_nonces( + &self, + ) -> Result<(u64, Vec), BatchSubmitterError> { + let db_path = self.db_path.clone(); + let batch_submitter_address = self.batch_submitter_address; + let (safe_block, payloads) = tokio::task::spawn_blocking(move || { + let mut storage = Storage::open_read_only(&db_path)?; + storage + .load_safe_input_payloads_for_sender(batch_submitter_address) + .map_err(BatchSubmitterError::from) + }) + .await + .map_err(|err| BatchSubmitterError::Join(err.to_string()))??; + + let mut observed_nonces = Vec::with_capacity(payloads.len()); + for payload in payloads { + let batch: Batch = ssz::Decode::from_ssz_bytes(payload.as_ref()) + .map_err(|err| BatchSubmitterError::StoredBatchDecode(format!("{err:?}")))?; + observed_nonces.push(batch.nonce); + } + + Ok((safe_block, observed_nonces)) + } + + async fn load_batch_for_submission( + &self, + batch_index: u64, + ) -> Result { + let db_path = self.db_path.clone(); + tokio::task::spawn_blocking(move || { + let mut storage = Storage::open_read_only(&db_path)?; + storage + .load_batch_for_submission(batch_index) + .map_err(BatchSubmitterError::from) + }) + .await + .map_err(|err| BatchSubmitterError::Join(err.to_string()))? + } +} + +fn advance_expected_batch_nonce( + mut expected: u64, + observed_nonces: impl IntoIterator, +) -> u64 { + for nonce in observed_nonces { + if nonce == expected { + expected = expected.saturating_add(1); + } + } + expected +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use alloy_primitives::Address; + + use crate::batch_submitter::{ + BatchSubmitterConfig, BatchSubmitterError, TickOutcome, batch_poster::mock::MockBatchPoster, + }; + use crate::shutdown::ShutdownSignal; + use crate::storage::{SafeInputRange, Storage, StoredSafeInput}; + use tempfile::TempDir; + + const SQLITE_SYNCHRONOUS_PRAGMA: &str = "NORMAL"; + const BATCH_SUBMITTER_ADDRESS: Address = Address::repeat_byte(0x11); + + fn temp_db(name: &str) -> (TempDir, String) { + let dir = tempfile::Builder::new() + .prefix(format!("sequencer-batch-submitter-{name}-").as_str()) + .tempdir() + .expect("create temporary test directory"); + let path = dir.path().join("sequencer.sqlite"); + (dir, path.to_string_lossy().into_owned()) + } + + fn seed_two_closed_batches(db_path: &str) { + let mut storage = Storage::open(db_path, SQLITE_SYNCHRONOUS_PRAGMA).expect("open storage"); + let mut head = storage + .initialize_open_state(0, SafeInputRange::empty_at(0)) + .expect("initialize open state"); + let next_safe = head.safe_block; + storage + .close_frame_and_batch(&mut head, next_safe) + .expect("close batch 0"); + storage + .close_frame_and_batch(&mut head, next_safe) + .expect("close batch 1"); + storage + .close_frame_and_batch(&mut head, next_safe) + .expect("close batch 2"); + } + + fn seed_safe_submitted_batches(db_path: &str, safe_block: u64, nonces: &[u64]) { + let mut storage = Storage::open(db_path, SQLITE_SYNCHRONOUS_PRAGMA).expect("open storage"); + let inputs: Vec<_> = nonces + .iter() + .map(|nonce| StoredSafeInput { + sender: BATCH_SUBMITTER_ADDRESS, + payload: ssz::Encode::as_ssz_bytes(&sequencer_core::batch::Batch { + nonce: *nonce, + frames: Vec::new(), + }), + block_number: safe_block, + }) + .collect(); + storage + .append_safe_inputs(safe_block, inputs.as_slice()) + .expect("append safe submitted batches"); + } + + #[tokio::test] + async fn tick_once_submits_first_missing_closed_batch() { + let (_dir, path) = temp_db("tick-submits"); + seed_two_closed_batches(&path); + + let mock = Arc::new(MockBatchPoster::new()); + let config = BatchSubmitterConfig { + idle_poll_interval_ms: 1000, + }; + let submitter = super::BatchSubmitter::new( + path.clone(), + BATCH_SUBMITTER_ADDRESS, + mock.clone(), + ShutdownSignal::default(), + config, + ); + + let outcome = submitter.tick_once().await.expect("tick once"); + assert_eq!( + outcome, + TickOutcome::Submitted { + batch_index: 0, + tx_hash: alloy_primitives::B256::ZERO + } + ); + + let submissions = mock.submissions(); + assert_eq!(submissions.len(), 1); + assert_eq!(submissions[0].0, 0); + } + + #[tokio::test] + async fn tick_once_submits_nothing_when_already_caught_up() { + let (_dir, path) = temp_db("tick-caught-up"); + seed_two_closed_batches(&path); + seed_safe_submitted_batches(&path, 10, &[0, 1]); + + let mock = Arc::new(MockBatchPoster::new()); + mock.set_observed_submitted_nonces(vec![2]); + let config = BatchSubmitterConfig { + idle_poll_interval_ms: 1000, + }; + let submitter = super::BatchSubmitter::new( + path.clone(), + BATCH_SUBMITTER_ADDRESS, + mock.clone(), + ShutdownSignal::default(), + config, + ); + + let outcome = submitter.tick_once().await.expect("tick once"); + assert_eq!(outcome, TickOutcome::Idle); + assert!(mock.submissions().is_empty()); + assert_eq!(mock.last_from_block(), Some(11)); + } + + #[tokio::test] + async fn tick_once_combines_safe_prefix_with_recent_chain_suffix() { + let (_dir, path) = temp_db("tick-combines-prefix-and-suffix"); + seed_two_closed_batches(&path); + seed_safe_submitted_batches(&path, 10, &[0]); + + let mock = Arc::new(MockBatchPoster::new()); + mock.set_observed_submitted_nonces(vec![1]); + let submitter = super::BatchSubmitter::new( + path.clone(), + BATCH_SUBMITTER_ADDRESS, + mock.clone(), + ShutdownSignal::default(), + BatchSubmitterConfig { + idle_poll_interval_ms: 1000, + }, + ); + + let outcome = submitter.tick_once().await.expect("tick once"); + assert_eq!( + outcome, + TickOutcome::Submitted { + batch_index: 2, + tx_hash: alloy_primitives::B256::ZERO + } + ); + assert_eq!(mock.last_from_block(), Some(11)); + } + + #[tokio::test] + async fn tick_once_propagates_poster_errors() { + let (_dir, path) = temp_db("tick-poster-error"); + seed_two_closed_batches(&path); + + let mock = Arc::new(MockBatchPoster::new()); + mock.set_observed_submitted_error(Some("rpc fail")); + let submitter = super::BatchSubmitter::new( + path, + BATCH_SUBMITTER_ADDRESS, + mock, + ShutdownSignal::default(), + BatchSubmitterConfig { + idle_poll_interval_ms: 1000, + }, + ); + + let err = submitter + .tick_once() + .await + .expect_err("poster error should propagate"); + assert!(matches!(err, BatchSubmitterError::Poster(_))); + } + + #[test] + fn advance_expected_batch_nonce_matches_scheduler_nonce_rule() { + assert_eq!(super::advance_expected_batch_nonce(0, Vec::::new()), 0); + assert_eq!(super::advance_expected_batch_nonce(0, vec![0, 1, 2]), 3); + assert_eq!(super::advance_expected_batch_nonce(0, vec![0, 2, 3]), 1); + assert_eq!(super::advance_expected_batch_nonce(0, vec![1, 2, 3]), 0); + assert_eq!(super::advance_expected_batch_nonce(0, vec![0, 1, 1, 2]), 3); + assert_eq!( + super::advance_expected_batch_nonce(0, vec![6, 4, 3, 2, 2, 0, 1]), + 2 + ); + assert_eq!(super::advance_expected_batch_nonce(0, vec![0, 2, 1]), 2); + assert_eq!(super::advance_expected_batch_nonce(2, vec![2, 3]), 4); + } +} diff --git a/sequencer/src/config.rs b/sequencer/src/config.rs index f7b47d6..05956b5 100644 --- a/sequencer/src/config.rs +++ b/sequencer/src/config.rs @@ -3,7 +3,7 @@ use alloy_primitives::{Address, U256}; use alloy_sol_types::Eip712Domain; -use clap::Parser; +use clap::{ArgGroup, Parser}; pub const DOMAIN_NAME: &str = "CartesiAppSequencer"; pub const DOMAIN_VERSION: &str = "1"; @@ -11,17 +11,57 @@ pub const DOMAIN_VERSION: &str = "1"; const DEFAULT_HTTP_ADDR: &str = "127.0.0.1:3000"; const DEFAULT_DB_PATH: &str = "sequencer.db"; -/// `-32005` Infura -/// `-32600`, `-32602` Alchemy -/// `-32616` QuickNode -const DEFAULT_LONG_BLOCK_RANGE_ERROR_CODES: &[&str] = &["-32005", "-32600", "-32602", "-32616"]; +/// Shared L1 / InputBox configuration used by both the input reader and the batch submitter. +/// +/// Built once at startup from `RunConfig` plus the discovered InputBox address, so RPC URL, +/// InputBox address, and app (verifying contract) address are defined in a single place and +/// not duplicated across component configs. +#[derive(Debug, Clone)] +pub struct L1Config { + /// L1 Ethereum RPC URL (e.g. for reading safe blocks and posting batch inputs). + pub eth_rpc_url: String, + /// InputBox contract address (same contract for ingesting direct inputs and for submitting batches). + pub input_box_address: Address, + /// Application / verifying contract address (used to discover InputBox and filter inputs). + pub app_address: Address, + /// Hex-encoded private key used by the batch submitter for posting batches to L1. + /// + /// `RunConfig` is responsible for resolving whether this comes from an inline + /// value or a key file; by the time `L1Config` is constructed this is always + /// the fully resolved private key. + pub batch_submitter_private_key: String, + /// EOA address of the batch submitter (derived from `batch_submitter_private_key`). + /// Inputs from this sender are batch submissions; all others are direct inputs. + pub batch_submitter_address: Address, +} #[derive(Debug, Clone, Parser)] #[command( name = "sequencer", about = "Deterministic sequencer prototype with low-latency soft confirmations", version, - after_help = "Examples:\n sequencer --eth-rpc-url http://127.0.0.1:8545 --domain-chain-id 31337 --domain-verifying-contract 0x1111111111111111111111111111111111111111\n sequencer --http-addr 0.0.0.0:3000 --db-path ./sequencer.db --eth-rpc-url https://eth.example --domain-chain-id 1 --domain-verifying-contract 0x4444444444444444444444444444444444444444" + after_help = "\ +Examples: + sequencer \\ + --eth-rpc-url http://127.0.0.1:8545 \\ + --domain-chain-id 31337 \\ + --domain-verifying-contract 0x1111111111111111111111111111111111111111 \\ + --batch-submitter-private-key 0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + + sequencer \\ + --http-addr 0.0.0.0:3000 \\ + --db-path ./sequencer.db \\ + --eth-rpc-url https://eth.example \\ + --domain-chain-id 1 \\ + --domain-verifying-contract 0x4444444444444444444444444444444444444444 \\ + --batch-submitter-private-key 0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ +", + group( + ArgGroup::new("batch_submitter_key_source") + .args(&["batch_submitter_private_key", "batch_submitter_private_key_file"]) + .required(true) + .multiple(false) + ) )] pub struct RunConfig { #[arg(long, env = "SEQ_HTTP_ADDR", default_value = DEFAULT_HTTP_ADDR, value_parser = parse_non_empty_string)] @@ -31,12 +71,46 @@ pub struct RunConfig { #[arg(long, env = "SEQ_ETH_RPC_URL", value_parser = parse_non_empty_string)] pub eth_rpc_url: String, /// Error codes that trigger `get_logs` retries with a shorter block range. - #[arg(long, env = "SEQ_LONG_BLOCK_RANGE_ERROR_CODES", value_delimiter = ',', default_values = DEFAULT_LONG_BLOCK_RANGE_ERROR_CODES)] + #[arg(long, env = "SEQ_LONG_BLOCK_RANGE_ERROR_CODES", value_delimiter = ',', default_values = crate::partition::DEFAULT_LONG_BLOCK_RANGE_ERROR_CODES)] pub long_block_range_error_codes: Vec, #[arg(long, env = "SEQ_DOMAIN_CHAIN_ID")] pub domain_chain_id: u64, #[arg(long, env = "SEQ_DOMAIN_VERIFYING_CONTRACT", value_parser = parse_address)] pub domain_verifying_contract: Address, + /// Hex-encoded private key used by the batch submitter for posting batches to L1. + /// Exactly one of this or `batch_submitter_private_key_file` must be set when a signer + /// is desired. + #[arg( + long, + env = "SEQ_BATCH_SUBMITTER_PRIVATE_KEY", + group = "batch_submitter_key_source" + )] + pub batch_submitter_private_key: Option, + /// Path to a file whose first line contains the batch submitter private key. Takes + /// precedence over `batch_submitter_private_key` if both are set (checked by clap group). + #[arg( + long, + env = "SEQ_BATCH_SUBMITTER_PRIVATE_KEY_FILE", + group = "batch_submitter_key_source" + )] + pub batch_submitter_private_key_file: Option, + + /// How often the batch submitter polls for new work when idle. + #[arg( + long, + env = "SEQ_BATCH_SUBMITTER_IDLE_POLL_INTERVAL_MS", + default_value = "5000" + )] + pub batch_submitter_idle_poll_interval_ms: u64, + + /// Number of blocks behind Latest that the batch submitter treats as confirmed. + /// The submitter scans only up to `Latest - depth`, and waits for the same depth after posting. + #[arg( + long, + env = "SEQ_BATCH_SUBMITTER_CONFIRMATION_DEPTH", + default_value = "0" + )] + pub batch_submitter_confirmation_depth: u64, } impl RunConfig { @@ -80,7 +154,12 @@ mod tests { #[test] fn run_config_requires_deployment_domain_inputs() { - let err = RunConfig::try_parse_from(["sequencer"]).expect_err("domain inputs are required"); + let err = RunConfig::try_parse_from([ + "sequencer", + "--batch-submitter-private-key", + "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", + ]) + .expect_err("domain inputs are required"); let message = err.to_string(); assert!(message.contains("--eth-rpc-url")); @@ -98,6 +177,8 @@ mod tests { "31337", "--domain-verifying-contract", "0x1111111111111111111111111111111111111111", + "--batch-submitter-private-key", + "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", ]) .expect("parse run config"); @@ -122,6 +203,8 @@ mod tests { "31337", "--domain-verifying-contract", "0x1111111111111111111111111111111111111111", + "--batch-submitter-private-key", + "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", ]) .expect("parse run config"); diff --git a/sequencer/src/inclusion_lane/catch_up.rs b/sequencer/src/inclusion_lane/catch_up.rs index 9f5e48d..f269322 100644 --- a/sequencer/src/inclusion_lane/catch_up.rs +++ b/sequencer/src/inclusion_lane/catch_up.rs @@ -1,6 +1,8 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use alloy_primitives::Address; + use crate::storage::Storage; use sequencer_core::application::Application; use sequencer_core::l2_tx::SequencedL2Tx; @@ -12,16 +14,23 @@ const DEFAULT_CATCH_UP_PAGE_SIZE: usize = 256; pub(super) fn catch_up_application( app: &mut impl Application, storage: &mut Storage, + batch_submitter_address: Address, ) -> Result<(), CatchUpError> { - catch_up_application_paged(app, storage, DEFAULT_CATCH_UP_PAGE_SIZE) + catch_up_application_paged( + app, + storage, + batch_submitter_address, + DEFAULT_CATCH_UP_PAGE_SIZE, + ) } pub(super) fn catch_up_application_paged( app: &mut impl Application, storage: &mut Storage, + batch_submitter_address: Address, page_size: usize, ) -> Result<(), CatchUpError> { - let mut next_offset = app.executed_input_count(); + let mut next_offset = 0; let page_size = page_size.max(1); loop { @@ -37,7 +46,7 @@ pub(super) fn catch_up_application_paged( } for item in replay { - replay_sequenced_l2_tx(app, item)?; + replay_sequenced_l2_tx(app, batch_submitter_address, item)?; next_offset = next_offset.saturating_add(1); } } @@ -45,6 +54,7 @@ pub(super) fn catch_up_application_paged( fn replay_sequenced_l2_tx( app: &mut impl Application, + batch_submitter_address: Address, item: SequencedL2Tx, ) -> Result<(), CatchUpError> { match item { @@ -54,10 +64,16 @@ fn replay_sequenced_l2_tx( reason: err.to_string(), }) } - SequencedL2Tx::Direct(direct) => app - .execute_direct_input(direct.payload.as_slice()) - .map_err(|err| CatchUpError::ReplayDirectInputInternal { - reason: err.to_string(), - }), + SequencedL2Tx::Direct(direct) => { + if direct.sender == batch_submitter_address { + return Ok(()); + } + + app.execute_direct_input(&direct).map_err(|err| { + CatchUpError::ReplayDirectInputInternal { + reason: err.to_string(), + } + }) + } } } diff --git a/sequencer/src/inclusion_lane/config.rs b/sequencer/src/inclusion_lane/config.rs index 2fa73be..5ea70bc 100644 --- a/sequencer/src/inclusion_lane/config.rs +++ b/sequencer/src/inclusion_lane/config.rs @@ -3,19 +3,21 @@ use std::time::Duration; +use alloy_primitives::Address; use sequencer_core::application::Application; use sequencer_core::user_op::SignedUserOp; const DEFAULT_MAX_USER_OPS_PER_CHUNK: usize = 1024; -const DEFAULT_SAFE_DIRECT_BUFFER_CAPACITY: usize = 2048; +const DEFAULT_SAFE_INPUT_BUFFER_CAPACITY: usize = 2048; const DEFAULT_MAX_BATCH_OPEN: Duration = Duration::from_secs(2 * 60 * 60); const DEFAULT_MAX_BATCH_USER_OP_BYTES: usize = 1_048_576; // 1 MiB const DEFAULT_IDLE_POLL_INTERVAL: Duration = Duration::from_millis(2); #[derive(Debug, Clone, Copy)] pub struct InclusionLaneConfig { + pub batch_submitter_address: Address, pub max_user_ops_per_chunk: usize, - pub safe_direct_buffer_capacity: usize, + pub safe_input_buffer_capacity: usize, pub max_batch_open: Duration, // Soft threshold for batch rotation. @@ -31,10 +33,11 @@ pub struct InclusionLaneConfig { } impl InclusionLaneConfig { - pub fn for_app() -> Self { + pub fn for_app(batch_submitter_address: Address) -> Self { Self { + batch_submitter_address, max_user_ops_per_chunk: DEFAULT_MAX_USER_OPS_PER_CHUNK, - safe_direct_buffer_capacity: DEFAULT_SAFE_DIRECT_BUFFER_CAPACITY, + safe_input_buffer_capacity: DEFAULT_SAFE_INPUT_BUFFER_CAPACITY, max_batch_open: DEFAULT_MAX_BATCH_OPEN, max_batch_user_op_bytes: DEFAULT_MAX_BATCH_USER_OP_BYTES .max(SignedUserOp::max_batch_metadata() + A::MAX_METHOD_PAYLOAD_BYTES), diff --git a/sequencer/src/inclusion_lane/error.rs b/sequencer/src/inclusion_lane/error.rs index afb2407..03333db 100644 --- a/sequencer/src/inclusion_lane/error.rs +++ b/sequencer/src/inclusion_lane/error.rs @@ -13,13 +13,13 @@ pub enum InclusionLaneError { #[source] source: CatchUpError, }, - #[error("cannot load next undrained direct-input index")] + #[error("cannot load next undrained safe-input index")] LoadNextUndrainedDirectInputIndex { #[source] source: rusqlite::Error, }, - #[error("cannot load safe direct inputs")] - LoadSafeDirectInputs { + #[error("cannot load safe inputs")] + LoadSafeInputs { #[source] source: rusqlite::Error, }, diff --git a/sequencer/src/inclusion_lane/lane.rs b/sequencer/src/inclusion_lane/lane.rs index 54523a2..d9f0fcb 100644 --- a/sequencer/src/inclusion_lane/lane.rs +++ b/sequencer/src/inclusion_lane/lane.rs @@ -8,8 +8,9 @@ use tokio::sync::mpsc; use tokio::task::JoinHandle; use crate::shutdown::ShutdownSignal; -use crate::storage::{DirectInputRange, Storage, StoredDirectInput, WriteHead}; +use crate::storage::{SafeInputRange, Storage, StoredSafeInput, WriteHead}; use sequencer_core::application::{AppError, Application, ExecutionOutcome}; +use sequencer_core::l2_tx::DirectInput; use sequencer_core::user_op::SignedUserOp; use super::catch_up::catch_up_application; @@ -52,8 +53,8 @@ impl InclusionLane { fn run_forever(&mut self) -> Result<(), InclusionLaneError> { self.run_catch_up()?; let mut included = Vec::with_capacity(self.config.max_user_ops_per_chunk.max(1)); - let mut safe_directs = Vec::with_capacity(self.config.safe_direct_buffer_capacity.max(1)); - let mut lane_state = self.load_or_initialize_lane_state(&mut safe_directs)?; + let mut safe_inputs = Vec::with_capacity(self.config.safe_input_buffer_capacity.max(1)); + let mut lane_state = self.load_or_initialize_lane_state(&mut safe_inputs)?; loop { if self.shutdown.is_shutdown_requested() { @@ -62,7 +63,7 @@ impl InclusionLane { } let advanced_safe_frontier = - self.maybe_advance_safe_frontier(&mut lane_state, &mut safe_directs)?; + self.maybe_advance_safe_frontier(&mut lane_state, &mut safe_inputs)?; let included_user_op_count = self.process_user_op_chunk(&mut lane_state.head, &mut included)?; @@ -77,20 +78,24 @@ impl InclusionLane { } fn run_catch_up(&mut self) -> Result<(), InclusionLaneError> { - catch_up_application(&mut self.app, &mut self.storage) - .map_err(|source| InclusionLaneError::CatchUp { source }) + catch_up_application( + &mut self.app, + &mut self.storage, + self.config.batch_submitter_address, + ) + .map_err(|source| InclusionLaneError::CatchUp { source }) } fn load_or_initialize_lane_state( &mut self, - safe_directs: &mut Vec, + safe_inputs: &mut Vec, ) -> Result { let next_safe_input_index = self .storage - .load_next_undrained_direct_input_index() + .load_next_undrained_safe_input_index() .map_err(|source| InclusionLaneError::LoadNextUndrainedDirectInputIndex { source })?; - let last_drained_direct_range = DirectInputRange::empty_at(next_safe_input_index); + let last_drained_direct_range = SafeInputRange::empty_at(next_safe_input_index); if let Some(head) = self .storage .load_open_state() @@ -105,16 +110,16 @@ impl InclusionLane { let frontier = self .storage .load_safe_frontier() - .map_err(|source| InclusionLaneError::LoadSafeDirectInputs { source })?; + .map_err(|source| InclusionLaneError::LoadSafeInputs { source })?; assert!( frontier.end_exclusive >= last_drained_direct_range.end_exclusive, - "safe direct-input head regressed during lane initialization: safe_end={}, next={}", + "safe-input head regressed during lane initialization: safe_end={}, next={}", frontier.end_exclusive, last_drained_direct_range.end_exclusive ); let leading_direct_range = last_drained_direct_range.advance_to(frontier.end_exclusive); - self.execute_safe_direct_inputs_range(leading_direct_range, safe_directs)?; + self.execute_safe_inputs_range(leading_direct_range, safe_inputs)?; let head = self .storage .initialize_open_state(frontier.safe_block, leading_direct_range) @@ -153,15 +158,15 @@ impl InclusionLane { fn maybe_advance_safe_frontier( &mut self, lane_state: &mut LaneState, - safe_directs: &mut Vec, + safe_inputs: &mut Vec, ) -> Result { let frontier = self .storage .load_safe_frontier() - .map_err(|source| InclusionLaneError::LoadSafeDirectInputs { source })?; + .map_err(|source| InclusionLaneError::LoadSafeInputs { source })?; assert!( frontier.end_exclusive >= lane_state.last_drained_direct_range.end_exclusive, - "safe direct-input head regressed: safe_end={}, next={}", + "safe-input head regressed: safe_end={}, next={}", frontier.end_exclusive, lane_state.last_drained_direct_range.end_exclusive ); @@ -172,7 +177,7 @@ impl InclusionLane { let leading_direct_range = lane_state .last_drained_direct_range .advance_to(frontier.end_exclusive); - self.execute_safe_direct_inputs_range(leading_direct_range, safe_directs)?; + self.execute_safe_inputs_range(leading_direct_range, safe_inputs)?; self.close_frame_only( &mut lane_state.head, frontier.safe_block, @@ -195,19 +200,19 @@ impl InclusionLane { }) } - fn execute_safe_direct_inputs_range( + fn execute_safe_inputs_range( &mut self, - direct_range: DirectInputRange, - chunk: &mut Vec, - ) -> Result { - let max_chunk_len = self.config.safe_direct_buffer_capacity.max(1) as u64; + direct_range: SafeInputRange, + chunk: &mut Vec, + ) -> Result { + let max_chunk_len = self.config.safe_input_buffer_capacity.max(1) as u64; let mut chunk_start = direct_range.start_inclusive; while chunk_start < direct_range.end_exclusive { let chunk_end_exclusive = direct_range .end_exclusive .min(chunk_start.saturating_add(max_chunk_len)); - self.load_safe_direct_inputs_chunk(chunk_start, chunk_end_exclusive, chunk)?; - self.execute_safe_direct_inputs_chunk(chunk.as_slice())?; + self.load_safe_inputs_chunk(chunk_start, chunk_end_exclusive, chunk)?; + self.execute_safe_inputs_chunk(chunk.as_slice())?; chunk_start = chunk_end_exclusive; } @@ -228,32 +233,41 @@ impl InclusionLane { &mut self, head: &mut WriteHead, next_safe_block: u64, - leading_direct_range: DirectInputRange, + leading_direct_range: SafeInputRange, ) -> Result<(), InclusionLaneError> { self.storage .close_frame_only(head, next_safe_block, leading_direct_range) .map_err(|source| InclusionLaneError::CloseFrameRotate { source }) } - fn load_safe_direct_inputs_chunk( + fn load_safe_inputs_chunk( &mut self, start_inclusive: u64, end_exclusive: u64, - chunk: &mut Vec, + chunk: &mut Vec, ) -> Result<(), InclusionLaneError> { chunk.clear(); self.storage .fill_safe_inputs(start_inclusive, end_exclusive, chunk) - .map_err(|source| InclusionLaneError::LoadSafeDirectInputs { source }) + .map_err(|source| InclusionLaneError::LoadSafeInputs { source }) } - fn execute_safe_direct_inputs_chunk( + fn execute_safe_inputs_chunk( &mut self, - chunk: &[StoredDirectInput], + chunk: &[StoredSafeInput], ) -> Result<(), InclusionLaneError> { for input in chunk { + if input.sender == self.config.batch_submitter_address { + continue; + } + let direct_input = DirectInput { + sender: input.sender, + block_number: input.block_number, + payload: input.payload.clone(), + }; + self.app - .execute_direct_input(input.payload.as_slice()) + .execute_direct_input(&direct_input) .map_err(|source| InclusionLaneError::ExecuteDirectInput { source })?; } Ok(()) @@ -357,6 +371,6 @@ fn user_op_count_to_bytes(user_op_count: u64) -> u64 { } struct LaneState { - last_drained_direct_range: DirectInputRange, + last_drained_direct_range: SafeInputRange, head: WriteHead, } diff --git a/sequencer/src/inclusion_lane/tests.rs b/sequencer/src/inclusion_lane/tests.rs index 17e6801..c6358c3 100644 --- a/sequencer/src/inclusion_lane/tests.rs +++ b/sequencer/src/inclusion_lane/tests.rs @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 (see LICENSE) use std::collections::HashMap; +use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; use std::time::{Duration, SystemTime}; use alloy_primitives::{Address, Signature, U256}; @@ -11,9 +13,9 @@ use tempfile::TempDir; use tokio::sync::{mpsc, oneshot}; use crate::shutdown::ShutdownSignal; -use crate::storage::{DirectInputRange, Storage, StoredDirectInput}; +use crate::storage::{SafeInputRange, Storage, StoredSafeInput}; use sequencer_core::application::{AppError, Application, InvalidReason}; -use sequencer_core::l2_tx::{SequencedL2Tx, ValidUserOp}; +use sequencer_core::l2_tx::{DirectInput, SequencedL2Tx, ValidUserOp}; use sequencer_core::user_op::{SignedUserOp, UserOp}; use super::catch_up::catch_up_application_paged; @@ -54,7 +56,7 @@ impl Application for TestApp { Ok(()) } - fn execute_direct_input(&mut self, _payload: &[u8]) -> Result<(), AppError> { + fn execute_direct_input(&mut self, _input: &DirectInput) -> Result<(), AppError> { self.executed_input_count = self.executed_input_count.saturating_add(1); Ok(()) } @@ -71,8 +73,15 @@ struct TestDb { #[derive(Debug, Clone, PartialEq, Eq)] enum ReplayEvent { - UserOp { sender: Address, data: Vec }, - DirectInput(Vec), + UserOp { + sender: Address, + data: Vec, + }, + DirectInput { + sender: Address, + block_number: u64, + payload: Vec, + }, } struct ReplayRecordingApp { @@ -80,6 +89,40 @@ struct ReplayRecordingApp { replayed: Vec, } +struct SharedCountingApp { + executed_direct_inputs: Arc, +} + +impl Application for SharedCountingApp { + const MAX_METHOD_PAYLOAD_BYTES: usize = WALLET_MAX_METHOD_PAYLOAD_BYTES; + + fn current_user_nonce(&self, _sender: Address) -> u32 { + 0 + } + + fn current_user_balance(&self, _sender: Address) -> U256 { + U256::MAX + } + + fn validate_user_op( + &self, + _sender: Address, + _user_op: &UserOp, + _current_fee: u64, + ) -> Result<(), InvalidReason> { + Ok(()) + } + + fn execute_valid_user_op(&mut self, _user_op: &ValidUserOp) -> Result<(), AppError> { + Ok(()) + } + + fn execute_direct_input(&mut self, _input: &DirectInput) -> Result<(), AppError> { + self.executed_direct_inputs.fetch_add(1, Ordering::SeqCst); + Ok(()) + } +} + impl ReplayRecordingApp { fn with_executed_input_count(executed_input_count: u64) -> Self { Self { @@ -124,9 +167,12 @@ impl Application for ReplayRecordingApp { Ok(()) } - fn execute_direct_input(&mut self, payload: &[u8]) -> Result<(), AppError> { - self.replayed - .push(ReplayEvent::DirectInput(payload.to_vec())); + fn execute_direct_input(&mut self, input: &DirectInput) -> Result<(), AppError> { + self.replayed.push(ReplayEvent::DirectInput { + sender: input.sender, + block_number: input.block_number, + payload: input.payload.clone(), + }); self.executed_input_count = self.executed_input_count.saturating_add(1); Ok(()) } @@ -150,8 +196,9 @@ fn temp_db(name: &str) -> TestDb { fn default_test_config() -> InclusionLaneConfig { InclusionLaneConfig { + batch_submitter_address: Address::from_slice(&[0xff; 20]), max_user_ops_per_chunk: 16, - safe_direct_buffer_capacity: 16, + safe_input_buffer_capacity: 16, max_batch_open: Duration::MAX, max_batch_user_op_bytes: 1_000_000_000, idle_poll_interval: Duration::from_millis(2), @@ -212,7 +259,7 @@ fn make_pending_user_op( fn seed_replay_fixture(db_path: &str) -> Vec { let mut storage = Storage::open(db_path, "NORMAL").expect("open storage"); let mut head = storage - .initialize_open_state(0, DirectInputRange::empty_at(0)) + .initialize_open_state(0, SafeInputRange::empty_at(0)) .expect("initialize open state"); let user_op_a = make_pending_user_op(0x51).0; @@ -221,16 +268,17 @@ fn seed_replay_fixture(db_path: &str) -> Vec { .append_user_ops_chunk(&mut head, &[user_op_a, user_op_b]) .expect("append first frame user ops"); storage - .append_safe_direct_inputs( + .append_safe_inputs( 10, - &[StoredDirectInput { + &[StoredSafeInput { + sender: Address::ZERO, payload: vec![0xaa], block_number: 10, }], ) .expect("append first direct input"); storage - .close_frame_only(&mut head, 10, DirectInputRange::new(0, 1)) + .close_frame_only(&mut head, 10, SafeInputRange::new(0, 1)) .expect("close first frame"); let user_op_c = make_pending_user_op(0x53).0; @@ -238,29 +286,31 @@ fn seed_replay_fixture(db_path: &str) -> Vec { .append_user_ops_chunk(&mut head, &[user_op_c]) .expect("append second frame user op"); storage - .append_safe_direct_inputs( + .append_safe_inputs( 20, - &[StoredDirectInput { + &[StoredSafeInput { + sender: Address::ZERO, payload: vec![0xbb], block_number: 20, }], ) .expect("append second direct input"); storage - .close_frame_only(&mut head, 20, DirectInputRange::new(1, 2)) + .close_frame_only(&mut head, 20, SafeInputRange::new(1, 2)) .expect("close second frame"); storage - .append_safe_direct_inputs( + .append_safe_inputs( 30, - &[StoredDirectInput { + &[StoredSafeInput { + sender: Address::ZERO, payload: vec![0xcc], block_number: 30, }], ) .expect("append third direct input"); storage - .close_frame_only(&mut head, 30, DirectInputRange::new(2, 3)) + .close_frame_only(&mut head, 30, SafeInputRange::new(2, 3)) .expect("close third frame"); vec![ @@ -272,13 +322,25 @@ fn seed_replay_fixture(db_path: &str) -> Vec { sender: Address::from_slice(&[0x52; 20]), data: vec![0x52; 4], }, - ReplayEvent::DirectInput(vec![0xaa]), + ReplayEvent::DirectInput { + sender: Address::ZERO, + block_number: 10, + payload: vec![0xaa], + }, ReplayEvent::UserOp { sender: Address::from_slice(&[0x53; 20]), data: vec![0x53; 4], }, - ReplayEvent::DirectInput(vec![0xbb]), - ReplayEvent::DirectInput(vec![0xcc]), + ReplayEvent::DirectInput { + sender: Address::ZERO, + block_number: 20, + payload: vec![0xbb], + }, + ReplayEvent::DirectInput { + sender: Address::ZERO, + block_number: 30, + payload: vec![0xcc], + }, ] } @@ -295,7 +357,7 @@ fn read_frame_direct_count(db_path: &str, batch_index: i64, frame_in_batch: i64) "SELECT COUNT(*) FROM sequenced_l2_txs WHERE batch_index = ?1 AND frame_in_batch = ?2 - AND direct_input_index IS NOT NULL", + AND safe_input_index IS NOT NULL", params![batch_index, frame_in_batch], |row| row.get(0), ) @@ -356,9 +418,10 @@ async fn direct_inputs_close_frame_and_persist_drain() { Storage::open(db.path.as_str(), "NORMAL").expect("open feeder storage"); feeder_storage - .append_safe_direct_inputs( + .append_safe_inputs( 10, - &[StoredDirectInput { + &[StoredSafeInput { + sender: Address::ZERO, payload: vec![0xaa], block_number: 10, }], @@ -376,24 +439,85 @@ async fn direct_inputs_close_frame_and_persist_drain() { assert_eq!(frames_count, 2); } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn sequenced_safe_inputs_are_drained_but_not_executed() { + let db = temp_db("sequenced-safe-inputs-skip"); + let batch_submitter_address = Address::from([0xfe; 20]); + let executed_direct_inputs = Arc::new(AtomicU64::new(0)); + let storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); + let shutdown = ShutdownSignal::default(); + let (tx, lane_handle) = InclusionLane::start( + 128, + shutdown.clone(), + SharedCountingApp { + executed_direct_inputs: executed_direct_inputs.clone(), + }, + storage, + InclusionLaneConfig { + batch_submitter_address, + ..default_test_config() + }, + ); + let initialized = wait_until(Duration::from_secs(2), || { + let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); + storage + .load_open_state() + .expect("load open state") + .is_some() + }) + .await; + assert!(initialized, "lane should initialize open state"); + + let mut feeder_storage = + Storage::open(db.path.as_str(), "NORMAL").expect("open feeder storage"); + feeder_storage + .append_safe_inputs( + 10, + &[StoredSafeInput { + sender: batch_submitter_address, + payload: vec![0xaa], + block_number: 10, + }], + ) + .expect("append safe batch-submitter input"); + + let drained = wait_until(Duration::from_secs(2), || { + read_frame_direct_count(db.path.as_str(), 0, 1) == 1 + }) + .await; + drop(tx); + shutdown_lane(&shutdown, lane_handle).await; + + assert!( + drained, + "expected sequenced safe input to be drained into frame 1" + ); + assert_eq!( + executed_direct_inputs.load(Ordering::SeqCst), + 0, + "batch-submitter safe input should be skipped by the local app" + ); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn direct_inputs_are_paginated_by_buffer_capacity() { let db = temp_db("directs-pagination"); let mut config = default_test_config(); - config.safe_direct_buffer_capacity = 2; + config.safe_input_buffer_capacity = 2; let (_tx, shutdown, lane_handle) = start_lane(db.path.as_str(), config).await; let mut feeder_storage = Storage::open(db.path.as_str(), "NORMAL").expect("open feeder storage"); let mut directs = Vec::new(); for index in 0..5_u64 { - directs.push(StoredDirectInput { - payload: vec![index as u8], + directs.push(StoredSafeInput { + sender: Address::ZERO, + payload: vec![0x10 + index as u8], block_number: 10, }); } feeder_storage - .append_safe_direct_inputs(10, directs.as_slice()) + .append_safe_inputs(10, directs.as_slice()) .expect("append safe direct inputs"); let drained = wait_until(Duration::from_secs(2), || { @@ -408,16 +532,17 @@ async fn direct_inputs_are_paginated_by_buffer_capacity() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn safe_directs_already_available_are_sequenced_before_later_user_ops() { +async fn safe_inputs_already_available_are_sequenced_before_later_user_ops() { let db = temp_db("directs-before-later-userops"); let (tx, shutdown, lane_handle) = start_lane(db.path.as_str(), default_test_config()).await; let mut feeder_storage = Storage::open(db.path.as_str(), "NORMAL").expect("open feeder storage"); feeder_storage - .append_safe_direct_inputs( + .append_safe_inputs( 10, - &[StoredDirectInput { + &[StoredSafeInput { + sender: Address::ZERO, payload: vec![0xaa], block_number: 10, }], @@ -565,23 +690,25 @@ fn catch_up_replays_multiple_pages() { let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); let mut app = ReplayRecordingApp::default(); - catch_up_application_paged(&mut app, &mut storage, 2).expect("catch up in pages"); + catch_up_application_paged(&mut app, &mut storage, Address::from([0xff; 20]), 2) + .expect("catch up in pages"); assert_eq!(app.replayed, expected); assert_eq!(app.executed_input_count(), expected.len() as u64); } #[test] -fn catch_up_starts_from_executed_input_count_offset() { +fn catch_up_replays_from_storage_even_when_app_reports_executed_inputs() { let db = temp_db("catch-up-offset"); let expected = seed_replay_fixture(db.path.as_str()); let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); let mut app = ReplayRecordingApp::with_executed_input_count(3); - catch_up_application_paged(&mut app, &mut storage, 2).expect("catch up from offset"); + catch_up_application_paged(&mut app, &mut storage, Address::from([0xff; 20]), 2) + .expect("catch up from storage"); - assert_eq!(app.replayed, expected[3..].to_vec()); - assert_eq!(app.executed_input_count(), expected.len() as u64); + assert_eq!(app.replayed, expected); + assert_eq!(app.executed_input_count(), 3 + expected.len() as u64); } #[test] @@ -591,7 +718,8 @@ fn catch_up_handles_mixed_user_ops_and_direct_inputs_across_page_boundary() { let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); let mut app = ReplayRecordingApp::default(); - catch_up_application_paged(&mut app, &mut storage, 4).expect("catch up across page boundary"); + catch_up_application_paged(&mut app, &mut storage, Address::from([0xff; 20]), 4) + .expect("catch up across page boundary"); assert_eq!(app.replayed, expected); } @@ -603,7 +731,7 @@ fn catch_up_load_error_reports_offset() { Storage::open_without_migrations(db.path.as_str(), "NORMAL").expect("open raw storage"); let mut app = ReplayRecordingApp::default(); - let err = catch_up_application_paged(&mut app, &mut storage, 2) + let err = catch_up_application_paged(&mut app, &mut storage, Address::from([0xff; 20]), 2) .expect_err("catch up should fail without schema"); assert!(matches!(err, CatchUpError::LoadReplay { offset: 0, .. })); diff --git a/sequencer/src/input_reader/mod.rs b/sequencer/src/input_reader/mod.rs index 6c05d97..46fc0d9 100644 --- a/sequencer/src/input_reader/mod.rs +++ b/sequencer/src/input_reader/mod.rs @@ -1,10 +1,9 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -//! Reads safe (direct) inputs from a reference source (e.g. InputBox contract) and appends them +//! Reads safe InputBox inputs from a reference source (e.g. InputBox contract) and appends them //! to sequencer storage. Minimal design: no epochs or consensus; flat contiguous indices only. -mod logs; mod reader; pub use reader::{InputReader, InputReaderConfig, InputReaderError}; diff --git a/sequencer/src/input_reader/reader.rs b/sequencer/src/input_reader/reader.rs index 6dafabd..f1155c0 100644 --- a/sequencer/src/input_reader/reader.rs +++ b/sequencer/src/input_reader/reader.rs @@ -6,25 +6,24 @@ use std::time::Duration; use alloy::eips::BlockNumberOrTag::Safe; use alloy::providers::Provider; use alloy::providers::ProviderBuilder; -use alloy_primitives::Address; +use alloy_primitives::{Address, U256}; use cartesi_rollups_contracts::application::Application; use cartesi_rollups_contracts::input_box::InputBox; use tokio::task::JoinHandle; use tracing::{info, warn}; -use super::logs::get_input_added_events; +use crate::partition::{decode_evm_advance_input, get_input_added_events}; use crate::shutdown::ShutdownSignal; -use crate::storage::{Storage, StorageOpenError, StoredDirectInput}; +use crate::storage::{Storage, StorageOpenError, StoredSafeInput}; const SQLITE_SYNCHRONOUS_PRAGMA: &str = "NORMAL"; #[derive(Debug, Clone)] pub struct InputReaderConfig { pub rpc_url: String, - pub input_box_address: Address, - pub app_address_filter: Address, - pub genesis_block: u64, + pub app_address: Address, pub poll_interval: Duration, + /// Error codes that trigger `get_logs` retries with a shorter block range. pub long_block_range_error_codes: Vec, } @@ -42,39 +41,32 @@ pub enum InputReaderError { pub struct InputReader { config: InputReaderConfig, + input_box_address: Address, + genesis_block: u64, db_path: String, shutdown: ShutdownSignal, } impl InputReader { - pub async fn discover_input_box( - rpc_url: &str, - application_address: Address, - ) -> Result { + pub async fn new( + db_path: impl Into, + shutdown: ShutdownSignal, + config: InputReaderConfig, + ) -> Result { let provider = ProviderBuilder::new() - .connect(rpc_url) + .connect(config.rpc_url.as_str()) .await .map_err(|e| InputReaderError::Provider(e.to_string()))?; - let application = Application::new(application_address, &provider); + let application = Application::new(config.app_address, &provider); let data_availability = application .getDataAvailability() .call() .await .map_err(|e| InputReaderError::Provider(e.to_string()))?; + let input_box_address = decode_input_box_address(&data_availability)?; - decode_input_box_address(&data_availability) - } - - pub async fn discover_input_box_deployment_block( - rpc_url: &str, - input_box_address: Address, - ) -> Result { - let provider = ProviderBuilder::new() - .connect(rpc_url) - .await - .map_err(|e| InputReaderError::Provider(e.to_string()))?; let input_box = InputBox::new(input_box_address, &provider); - input_box + let genesis_block = input_box .getDeploymentBlockNumber() .call() .await @@ -84,39 +76,54 @@ impl InputReader { InputReaderError::Provider( "input box deployment block number did not fit into u64".to_string(), ) - }) + })?; + + Ok(Self::from_parts( + config, + input_box_address, + genesis_block, + db_path.into(), + shutdown, + )) } - pub fn new(config: InputReaderConfig, db_path: String, shutdown: ShutdownSignal) -> Self { + fn from_parts( + config: InputReaderConfig, + input_box_address: Address, + genesis_block: u64, + db_path: String, + shutdown: ShutdownSignal, + ) -> Self { Self { config, + input_box_address, + genesis_block, db_path, shutdown, } } - pub fn start( - db_path: &str, - config: InputReaderConfig, - shutdown: ShutdownSignal, - ) -> Result>, StorageOpenError> { - let _ = Storage::open(db_path, SQLITE_SYNCHRONOUS_PRAGMA)?; - let reader = Self::new(config, db_path.to_string(), shutdown); - Ok(tokio::spawn(async move { reader.run_forever().await })) + pub fn input_box_address(&self) -> Address { + self.input_box_address } - pub async fn sync_to_current_safe_head( - db_path: &str, - config: InputReaderConfig, - ) -> Result<(), InputReaderError> { - let mut reader = Self::new(config, db_path.to_string(), ShutdownSignal::default()); - reader.bootstrap_safe_head().await?; + pub fn genesis_block(&self) -> u64 { + self.genesis_block + } + + pub fn start(self) -> Result>, StorageOpenError> { + let _ = Storage::open(self.db_path.as_str(), SQLITE_SYNCHRONOUS_PRAGMA)?; + Ok(tokio::spawn(async move { self.run_forever().await })) + } + + pub async fn sync_to_current_safe_head(&mut self) -> Result<(), InputReaderError> { + self.bootstrap_safe_head().await?; let provider = ProviderBuilder::new() - .connect(reader.config.rpc_url.as_str()) + .connect(self.config.rpc_url.as_str()) .await .map_err(|e| InputReaderError::Provider(e.to_string()))?; - reader.advance_once(&provider).await + self.advance_once(&provider).await } async fn run_forever(mut self) -> Result<(), InputReaderError> { @@ -154,6 +161,8 @@ impl InputReader { let current_safe_block = latest_safe_block(provider).await?; let previous_safe_block = self.current_safe_block().await?; + // If our persisted safe head is already at the current safe frontier, + // there is nothing new to scan. if current_safe_block <= previous_safe_block { return Ok(()); } @@ -161,11 +170,11 @@ impl InputReader { let start_block = previous_safe_block + 1; let events = get_input_added_events( provider, - self.config.app_address_filter, - &self.config.input_box_address, + self.config.app_address, + &self.input_box_address, start_block, current_safe_block, - &self.config.long_block_range_error_codes, + self.config.long_block_range_error_codes.as_slice(), ) .await .map_err(|errs| { @@ -178,19 +187,26 @@ impl InputReader { )) })?; - let batch: Vec = events - .into_iter() - .map(|(event, log)| { - let block_number = log.block_number.ok_or_else(|| { - InputReaderError::Provider("InputAdded log missing block_number".to_string()) - })?; - - Ok(StoredDirectInput { - payload: event.input.to_vec(), - block_number, - }) - }) - .collect::, InputReaderError>>()?; + let mut batch = Vec::with_capacity(events.len()); + for (event, log) in events { + let block_number = log.block_number.ok_or_else(|| { + InputReaderError::Provider("InputAdded log missing block_number".to_string()) + })?; + let evm_advance = decode_evm_advance_input(event.input.as_ref()) + .map_err(InputReaderError::Provider)?; + assert_eq!( + evm_advance.blockNumber, + U256::from(block_number), + "InputAdded block number mismatch: log={block_number}, payload={}", + evm_advance.blockNumber + ); + + batch.push(StoredSafeInput { + sender: evm_advance.msgSender, + payload: evm_advance.payload.into(), + block_number, + }); + } info!( block_range = %format!("{}..={}", start_block, current_safe_block), @@ -198,8 +214,7 @@ impl InputReader { "appending safe inputs" ); - self.append_safe_direct_inputs(current_safe_block, batch) - .await + self.append_safe_inputs(current_safe_block, batch).await } async fn current_safe_block(&self) -> Result { @@ -214,7 +229,7 @@ impl InputReader { async fn bootstrap_safe_head(&self) -> Result<(), InputReaderError> { let db_path = self.db_path.clone(); - let minimum_safe_block = self.config.genesis_block.saturating_sub(1); + let minimum_safe_block = self.genesis_block.saturating_sub(1); tokio::task::spawn_blocking(move || { let mut storage = Storage::open(&db_path, SQLITE_SYNCHRONOUS_PRAGMA)?; storage @@ -225,16 +240,16 @@ impl InputReader { .map_err(|err| InputReaderError::Join(err.to_string()))? } - async fn append_safe_direct_inputs( + async fn append_safe_inputs( &self, current_safe_block: u64, - batch: Vec, + batch: Vec, ) -> Result<(), InputReaderError> { let db_path = self.db_path.clone(); tokio::task::spawn_blocking(move || { let mut storage = Storage::open(&db_path, SQLITE_SYNCHRONOUS_PRAGMA)?; storage - .append_safe_direct_inputs(current_safe_block, &batch) + .append_safe_inputs(current_safe_block, &batch) .map_err(InputReaderError::from) }) .await @@ -268,6 +283,27 @@ mod tests { use alloy::node_bindings::Anvil; use tempfile::NamedTempFile; + fn test_reader( + db_path: String, + rpc_url: String, + genesis_block: u64, + poll_interval: Duration, + shutdown: ShutdownSignal, + ) -> InputReader { + InputReader::from_parts( + InputReaderConfig { + rpc_url, + app_address: Address::ZERO, + poll_interval, + long_block_range_error_codes: Vec::new(), + }, + Address::ZERO, + genesis_block, + db_path, + shutdown, + ) + } + fn require_anvil_tests() -> bool { std::env::var_os("RUN_ANVIL_TESTS").is_some() } @@ -276,19 +312,14 @@ mod tests { async fn start_then_request_shutdown_joins_with_ok() { let db_file = NamedTempFile::new().expect("temp file"); let shutdown = ShutdownSignal::default(); - let handle = InputReader::start( - db_file.path().to_string_lossy().as_ref(), - InputReaderConfig { - rpc_url: "http://127.0.0.1:0".to_string(), - input_box_address: Address::ZERO, - app_address_filter: Address::ZERO, - genesis_block: 0, - poll_interval: Duration::from_millis(20), - long_block_range_error_codes: vec![], - }, + let reader = test_reader( + db_file.path().to_string_lossy().into_owned(), + "http://127.0.0.1:0".to_string(), + 0, + Duration::from_millis(20), shutdown.clone(), - ) - .expect("start input reader"); + ); + let handle = reader.start().expect("start input reader"); shutdown.request_shutdown(); let join_result = tokio::time::timeout(Duration::from_secs(2), handle).await; @@ -309,19 +340,14 @@ mod tests { let anvil = Anvil::default().block_time(1).timeout(30_000).spawn(); let shutdown = ShutdownSignal::default(); let db_file = NamedTempFile::new().expect("temp file"); - let handle = InputReader::start( - db_file.path().to_string_lossy().as_ref(), - InputReaderConfig { - rpc_url: anvil.endpoint_url().to_string(), - input_box_address: Address::ZERO, - app_address_filter: Address::ZERO, - genesis_block: 0, - poll_interval: Duration::from_millis(50), - long_block_range_error_codes: vec![], - }, + let reader = test_reader( + db_file.path().to_string_lossy().into_owned(), + anvil.endpoint_url().to_string(), + 0, + Duration::from_millis(50), shutdown.clone(), - ) - .expect("start input reader"); + ); + let handle = reader.start().expect("start input reader"); tokio::time::sleep(Duration::from_millis(200)).await; shutdown.request_shutdown(); @@ -343,16 +369,11 @@ mod tests { let anvil = Anvil::default().block_time(1).timeout(30_000).spawn(); let db_file = NamedTempFile::new().expect("temp file"); - let mut reader = InputReader::new( - InputReaderConfig { - rpc_url: anvil.endpoint_url().to_string(), - input_box_address: Address::ZERO, - app_address_filter: Address::ZERO, - genesis_block: 0, - poll_interval: Duration::from_secs(1), - long_block_range_error_codes: vec![], - }, + let mut reader = test_reader( db_file.path().to_string_lossy().into_owned(), + anvil.endpoint_url().to_string(), + 0, + Duration::from_secs(1), ShutdownSignal::default(), ); let provider = alloy::providers::ProviderBuilder::new() @@ -378,16 +399,11 @@ mod tests { async fn advance_once_with_genesis_block_uses_genesis_as_effective_prev() { let db_file = NamedTempFile::new().expect("temp file"); let genesis_block = 2_u64; - let reader = InputReader::new( - InputReaderConfig { - rpc_url: "http://127.0.0.1:0".to_string(), - input_box_address: Address::ZERO, - app_address_filter: Address::ZERO, - genesis_block, - poll_interval: Duration::from_secs(1), - long_block_range_error_codes: vec![], - }, + let reader = test_reader( db_file.path().to_string_lossy().into_owned(), + "http://127.0.0.1:0".to_string(), + genesis_block, + Duration::from_secs(1), ShutdownSignal::default(), ); @@ -404,19 +420,15 @@ mod tests { async fn sync_to_current_safe_head_with_genesis_block_bootstraps_safe_head() { let db_file = NamedTempFile::new().expect("temp file"); let genesis_block = 5_u64; + let mut reader = test_reader( + db_file.path().to_string_lossy().into_owned(), + "http://127.0.0.1:0".to_string(), + genesis_block, + Duration::from_secs(1), + ShutdownSignal::default(), + ); - let result = InputReader::sync_to_current_safe_head( - db_file.path().to_string_lossy().as_ref(), - InputReaderConfig { - rpc_url: "http://127.0.0.1:0".to_string(), - input_box_address: Address::ZERO, - app_address_filter: Address::ZERO, - genesis_block, - poll_interval: Duration::from_secs(1), - long_block_range_error_codes: vec![], - }, - ) - .await; + let result = reader.sync_to_current_safe_head().await; assert!(matches!(result, Err(InputReaderError::Provider(_)))); @@ -442,19 +454,14 @@ mod tests { let db_path = db_file.path().to_string_lossy().into_owned(); let mut storage = Storage::open(&db_path, SQLITE_SYNCHRONOUS_PRAGMA).expect("open storage"); storage - .append_safe_direct_inputs(1000, &[]) + .append_safe_inputs(1000, &[]) .expect("set safe head ahead of chain"); - let mut reader = InputReader::new( - InputReaderConfig { - rpc_url: anvil.endpoint_url().to_string(), - input_box_address: Address::ZERO, - app_address_filter: Address::ZERO, - genesis_block: 0, - poll_interval: Duration::from_secs(1), - long_block_range_error_codes: vec![], - }, + let mut reader = test_reader( db_path, + anvil.endpoint_url().to_string(), + 0, + Duration::from_secs(1), ShutdownSignal::default(), ); let provider = alloy::providers::ProviderBuilder::new() diff --git a/sequencer/src/l2_tx_feed/feed.rs b/sequencer/src/l2_tx_feed/feed.rs index abece8a..15c5e49 100644 --- a/sequencer/src/l2_tx_feed/feed.rs +++ b/sequencer/src/l2_tx_feed/feed.rs @@ -3,7 +3,9 @@ use std::time::Duration; +use alloy_primitives::Address; pub use sequencer_core::broadcast::BroadcastTxMessage; +use sequencer_core::l2_tx::SequencedL2Tx; use tokio::sync::mpsc; use super::{SubscribeError, SubscriptionError}; @@ -14,6 +16,7 @@ use crate::storage::Storage; pub struct L2TxFeedConfig { pub idle_poll_interval: Duration, pub page_size: usize, + pub batch_submitter_address: Option

, } #[derive(Clone)] @@ -21,6 +24,7 @@ pub struct L2TxFeed { db_path: String, page_size: usize, idle_poll_interval: Duration, + batch_submitter_address: Option
, shutdown: ShutdownSignal, } @@ -41,6 +45,7 @@ impl Default for L2TxFeedConfig { Self { idle_poll_interval: DEFAULT_IDLE_POLL_INTERVAL, page_size: DEFAULT_PAGE_SIZE, + batch_submitter_address: None, } } } @@ -51,6 +56,7 @@ impl L2TxFeed { db_path, page_size: config.page_size.max(1), idle_poll_interval: config.idle_poll_interval, + batch_submitter_address: config.batch_submitter_address, shutdown, } } @@ -74,12 +80,14 @@ impl L2TxFeed { let db_path = self.db_path.clone(); let page_size = self.page_size; let idle_poll_interval = self.idle_poll_interval; + let batch_submitter_address = self.batch_submitter_address; let shutdown = self.shutdown.clone(); let task = tokio::task::spawn_blocking(move || { run_subscription( db_path.as_str(), page_size, idle_poll_interval, + batch_submitter_address, from_offset, shutdown, events_tx, @@ -130,6 +138,7 @@ fn run_subscription( db_path: &str, page_size: usize, idle_poll_interval: Duration, + batch_submitter_address: Option
, from_offset: u64, shutdown: ShutdownSignal, events_tx: mpsc::Sender, @@ -160,6 +169,11 @@ fn run_subscription( return Ok(()); } + if should_filter_from_broadcast(&tx, batch_submitter_address) { + next_offset = next_offset.saturating_add(1); + continue; + } + let event = BroadcastTxMessage::from_offset_and_tx(next_offset, tx); next_offset = next_offset.saturating_add(1); if events_tx.blocking_send(event).is_err() { @@ -168,3 +182,14 @@ fn run_subscription( } } } + +fn should_filter_from_broadcast( + tx: &SequencedL2Tx, + batch_submitter_address: Option
, +) -> bool { + matches!( + (tx, batch_submitter_address), + (SequencedL2Tx::Direct(direct), Some(batch_submitter_address)) + if direct.sender == batch_submitter_address + ) +} diff --git a/sequencer/src/l2_tx_feed/tests.rs b/sequencer/src/l2_tx_feed/tests.rs index 3a6e6ec..d93c8ed 100644 --- a/sequencer/src/l2_tx_feed/tests.rs +++ b/sequencer/src/l2_tx_feed/tests.rs @@ -10,7 +10,7 @@ use tokio::sync::oneshot; use super::{BroadcastTxMessage, L2TxFeed, L2TxFeedConfig, SubscribeError}; use crate::inclusion_lane::{PendingUserOp, SequencerError}; use crate::shutdown::ShutdownSignal; -use crate::storage::{DirectInputRange, Storage, StoredDirectInput}; +use crate::storage::{SafeInputRange, Storage, StoredSafeInput}; use sequencer_core::l2_tx::{DirectInput, SequencedL2Tx, ValidUserOp}; use sequencer_core::user_op::UserOp; @@ -36,12 +36,16 @@ fn broadcast_direct_input_serializes_with_hex_payload() { let msg = BroadcastTxMessage::from_offset_and_tx( 9, SequencedL2Tx::Direct(DirectInput { + sender: Address::ZERO, + block_number: 42, payload: vec![0xcc, 0xdd], }), ); let json = serde_json::to_string(&msg).expect("serialize"); assert!(json.contains("\"kind\":\"direct_input\"")); assert!(json.contains("\"offset\":9")); + assert!(json.contains("\"sender\":\"0x0000000000000000000000000000000000000000\"")); + assert!(json.contains("\"block_number\":42")); assert!(json.contains("\"payload\":\"0xccdd\"")); } @@ -86,6 +90,41 @@ async fn subscription_replays_existing_rows_in_order() { subscription.finish().await.expect("finish subscription"); } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn subscription_filters_batch_submitter_safe_inputs() { + let db = test_db("filters-batch-submitter-inputs"); + let batch_submitter_address = Address::from([0xfe; 20]); + seed_ordered_txs_with_sender(db.path.as_str(), batch_submitter_address); + let feed = L2TxFeed::new( + db.path.clone(), + ShutdownSignal::default(), + L2TxFeedConfig { + idle_poll_interval: Duration::from_millis(2), + page_size: 64, + batch_submitter_address: Some(batch_submitter_address), + }, + ); + + let mut subscription = feed.subscribe_from(0, u64::MAX).expect("subscribe"); + let first = tokio::time::timeout(Duration::from_secs(1), subscription.recv()) + .await + .expect("wait first event") + .expect("first event"); + + assert!(matches!( + first, + BroadcastTxMessage::UserOp { offset: 0, .. } + )); + + let no_second = tokio::time::timeout(Duration::from_millis(50), subscription.recv()).await; + assert!( + no_second.is_err(), + "filtered batch-submitter input should not be broadcast" + ); + + subscription.finish().await.expect("finish subscription"); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shutdown_signal_closes_subscription() { let db = test_db("shutdown-closes"); @@ -113,6 +152,7 @@ fn test_feed(db_path: &str, shutdown: ShutdownSignal) -> L2TxFeed { L2TxFeedConfig { idle_poll_interval: Duration::from_millis(2), page_size: 64, + batch_submitter_address: None, }, ) } @@ -127,9 +167,13 @@ fn test_db(label: &str) -> TestDb { } fn seed_ordered_txs(db_path: &str) { + seed_ordered_txs_with_sender(db_path, Address::ZERO); +} + +fn seed_ordered_txs_with_sender(db_path: &str, direct_sender: Address) { let mut storage = Storage::open(db_path, "NORMAL").expect("open storage"); let mut head = storage - .initialize_open_state(0, DirectInputRange::empty_at(0)) + .initialize_open_state(0, SafeInputRange::empty_at(0)) .expect("initialize open state"); let (respond_to, _recv) = oneshot::channel::>(); @@ -151,16 +195,17 @@ fn seed_ordered_txs(db_path: &str) { .append_user_ops_chunk(&mut head, &[pending]) .expect("append user-op chunk"); storage - .append_safe_direct_inputs( + .append_safe_inputs( 10, - &[StoredDirectInput { + &[StoredSafeInput { + sender: direct_sender, payload: vec![0xaa], block_number: 10, }], ) .expect("append direct input"); storage - .close_frame_only(&mut head, 10, DirectInputRange::new(0, 1)) + .close_frame_only(&mut head, 10, SafeInputRange::new(0, 1)) .expect("close frame with one drained direct input"); } diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 935cb88..17b1257 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -6,10 +6,12 @@ //! Flow: API -> inclusion lane -> SQLite -> catch-up replay. //! The inclusion lane is the single writer that defines execution order. pub mod api; +pub mod batch_submitter; pub mod config; pub mod inclusion_lane; pub mod input_reader; pub mod l2_tx_feed; +pub mod partition; mod runtime; pub mod shutdown; pub mod storage; diff --git a/sequencer/src/input_reader/logs.rs b/sequencer/src/partition.rs similarity index 60% rename from sequencer/src/input_reader/logs.rs rename to sequencer/src/partition.rs index d85953e..0e39d7e 100644 --- a/sequencer/src/input_reader/logs.rs +++ b/sequencer/src/partition.rs @@ -1,16 +1,28 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +//! Block-range partition retry: when a log query fails with a "long block range" RPC error, +//! split the range in half and retry. Shared by the input reader (safe-head advancement) +//! and the batch submitter (latest batch submitted scan). +//! +//! This module is stateless: callers pass the retry error codes explicitly. There is no +//! global mutable state; `RunConfig` owns the codes and passes them down via configs. + +/// Default RPC error codes that trigger partition retry (e.g. Infura -32005, Alchemy -32600/-32602, QuickNode -32616). +pub const DEFAULT_LONG_BLOCK_RANGE_ERROR_CODES: &[&str] = &["-32005", "-32600", "-32602", "-32616"]; + use alloy::contract::Error as ContractError; use alloy::contract::Event; use alloy::providers::Provider; +use alloy::sol_types::SolCall; use alloy::sol_types::SolEvent; use alloy_primitives::Address; use async_recursion::async_recursion; use cartesi_rollups_contracts::input_box::InputBox::InputAdded; +use cartesi_rollups_contracts::inputs::Inputs::EvmAdvanceCall; #[async_recursion] -pub(crate) async fn get_input_added_events( +pub async fn get_input_added_events( provider: &impl Provider, app_address_filter: Address, input_box_address: &Address, @@ -73,13 +85,21 @@ fn should_retry_with_partition(err: &ContractError, codes: &[String]) -> bool { error_message_matches_retry_codes(&format!("{err:?}"), codes) } -pub(crate) fn error_message_matches_retry_codes(error_message: &str, codes: &[String]) -> bool { +pub fn error_message_matches_retry_codes(error_message: &str, codes: &[String]) -> bool { codes.iter().any(|c| error_message.contains(c)) } +pub fn decode_evm_advance_input(input: &[u8]) -> Result { + EvmAdvanceCall::abi_decode(input).map_err(|err| err.to_string()) +} + #[cfg(test)] mod tests { - use super::error_message_matches_retry_codes; + use alloy_primitives::{U256, address}; + use alloy_sol_types::SolCall; + use cartesi_rollups_contracts::inputs::Inputs::EvmAdvanceCall; + + use super::{decode_evm_advance_input, error_message_matches_retry_codes}; #[test] fn error_message_matches_retry_codes_returns_true_when_message_contains_code() { @@ -101,4 +121,27 @@ mod tests { )); assert!(!error_message_matches_retry_codes("ok", &[])); } + + #[test] + fn decode_evm_advance_input_round_trips() { + let encoded = EvmAdvanceCall { + chainId: U256::from(31337_u64), + appContract: address!("0x1111111111111111111111111111111111111111"), + msgSender: address!("0x2222222222222222222222222222222222222222"), + blockNumber: U256::from(99_u64), + blockTimestamp: U256::from(1234_u64), + prevRandao: U256::from(7_u64), + index: U256::from(3_u64), + payload: vec![0xaa, 0xbb].into(), + } + .abi_encode(); + + let decoded = decode_evm_advance_input(encoded.as_slice()).expect("decode evm advance"); + assert_eq!( + decoded.msgSender, + address!("0x2222222222222222222222222222222222222222") + ); + assert_eq!(decoded.blockNumber, U256::from(99_u64)); + assert_eq!(decoded.payload.as_ref(), &[0xaa, 0xbb]); + } } diff --git a/sequencer/src/runtime.rs b/sequencer/src/runtime.rs index 7b4f3f6..246a26e 100644 --- a/sequencer/src/runtime.rs +++ b/sequencer/src/runtime.rs @@ -5,7 +5,9 @@ use thiserror::Error; use tracing::warn; use crate::api::{self, ApiConfig}; -use crate::config::RunConfig; +use crate::batch_submitter::{BatchPosterConfig, EthereumBatchPoster}; +use crate::batch_submitter::{BatchSubmitter, BatchSubmitterConfig, BatchSubmitterError}; +use crate::config::{L1Config, RunConfig}; use crate::inclusion_lane::{InclusionLane, InclusionLaneConfig, InclusionLaneError}; use crate::input_reader::{InputReader, InputReaderConfig, InputReaderError}; use crate::l2_tx_feed::{L2TxFeed, L2TxFeedConfig}; @@ -54,6 +56,18 @@ pub enum RunError { #[source] source: tokio::task::JoinError, }, + #[error("batch submitter stopped unexpectedly")] + BatchSubmitterStoppedUnexpectedly, + #[error("batch submitter exited: {source}")] + BatchSubmitter { + #[source] + source: BatchSubmitterError, + }, + #[error("batch submitter join error: {source}")] + BatchSubmitterJoin { + #[source] + source: tokio::task::JoinError, + }, } enum FirstExit { @@ -61,6 +75,7 @@ enum FirstExit { Server(RunError), InclusionLane(RunError), InputReader(RunError), + BatchSubmitter(RunError), } pub async fn run(app: A, config: RunConfig) -> Result<(), RunError> @@ -69,28 +84,60 @@ where { let domain = config.build_domain(); let shutdown = ShutdownSignal::default(); - let input_box_address = - InputReader::discover_input_box(&config.eth_rpc_url, config.domain_verifying_contract) - .await - .map_err(|source| RunError::InputReader { source })?; - let input_reader_genesis_block = - InputReader::discover_input_box_deployment_block(&config.eth_rpc_url, input_box_address) - .await - .map_err(|source| RunError::InputReader { source })?; - let input_reader_config = - build_input_reader_config(&config, input_box_address, input_reader_genesis_block); - InputReader::sync_to_current_safe_head(&config.db_path, input_reader_config.clone()) + + // Single L1/InputBox config shared by input reader and batch submitter (no duplicate RPC URL or addresses). + // Resolve batch-submitter private key (exactly one of inline or file is required at the CLI layer). + let batch_submitter_private_key = if let Some(file) = &config.batch_submitter_private_key_file { + let contents = + std::fs::read_to_string(file).map_err(|e| RunError::Io(std::io::Error::other(e)))?; + contents.lines().next().unwrap_or("").trim().to_string() + } else { + config + .batch_submitter_private_key + .clone() + .expect("batch submitter private key is required by CLI arg group") + }; + + let batch_submitter_address = { + use alloy::signers::local::PrivateKeySigner; + use std::str::FromStr; + PrivateKeySigner::from_str(&batch_submitter_private_key) + .map_err(|e| RunError::Io(std::io::Error::other(e.to_string())))? + .address() + }; + let mut input_reader = InputReader::new( + config.db_path.clone(), + shutdown.clone(), + InputReaderConfig { + rpc_url: config.eth_rpc_url.clone(), + app_address: config.domain_verifying_contract, + poll_interval: INPUT_READER_POLL_INTERVAL, + long_block_range_error_codes: config.long_block_range_error_codes.clone(), + }, + ) + .await + .map_err(|source| RunError::InputReader { source })?; + let input_reader_genesis_block = input_reader.genesis_block(); + let l1_config = L1Config { + eth_rpc_url: config.eth_rpc_url.clone(), + input_box_address: input_reader.input_box_address(), + app_address: config.domain_verifying_contract, + batch_submitter_private_key, + batch_submitter_address, + }; + input_reader + .sync_to_current_safe_head() .await .map_err(|source| RunError::InputReader { source })?; tracing::info!( http_addr = %config.http_addr, db_path = %config.db_path, - eth_rpc_url = %config.eth_rpc_url, - input_box_address = %input_box_address, + eth_rpc_url = %l1_config.eth_rpc_url, + input_box_address = %l1_config.input_box_address, input_reader_genesis_block, domain_chain_id = config.domain_chain_id, - domain_verifying_contract = %config.domain_verifying_contract, + domain_verifying_contract = %l1_config.app_address, "starting sequencer" ); @@ -100,15 +147,40 @@ where shutdown.clone(), app, storage, - InclusionLaneConfig::for_app::(), + InclusionLaneConfig::for_app::(l1_config.batch_submitter_address), + ); + let mut input_reader_handle = input_reader.start()?; + + // Batch submitter uses the same L1 config (InputBox address and RPC URL) as the input reader. + let batch_submitter_config = BatchSubmitterConfig { + idle_poll_interval_ms: config.batch_submitter_idle_poll_interval_ms, + }; + let poster_config = BatchPosterConfig { + l1_submit_address: l1_config.input_box_address, + app_address: l1_config.app_address, + batch_submitter_address: l1_config.batch_submitter_address, + start_block: input_reader_genesis_block, + confirmation_depth: config.batch_submitter_confirmation_depth, + long_block_range_error_codes: config.long_block_range_error_codes, + }; + let provider = build_batch_submitter_provider(&l1_config).await?; + let poster = std::sync::Arc::new(EthereumBatchPoster::new(provider, poster_config)); + let submitter = BatchSubmitter::new( + config.db_path.clone(), + l1_config.batch_submitter_address, + poster, + shutdown.clone(), + batch_submitter_config, ); - let mut input_reader_handle = - InputReader::start(&config.db_path, input_reader_config, shutdown.clone())?; + let mut batch_submitter_handle = submitter.start().map_err(RunError::OpenStorage)?; let tx_feed = L2TxFeed::new( config.db_path.clone(), shutdown.clone(), - L2TxFeedConfig::default(), + L2TxFeedConfig { + batch_submitter_address: Some(l1_config.batch_submitter_address), + ..L2TxFeedConfig::default() + }, ); let mut server_task = api::start( @@ -140,6 +212,9 @@ where reader_result = &mut input_reader_handle => { FirstExit::InputReader(map_input_reader_exit(reader_result)) } + submitter_result = &mut batch_submitter_handle => { + FirstExit::BatchSubmitter(map_batch_submitter_exit(submitter_result)) + } }; begin_runtime_shutdown(&shutdown); @@ -148,6 +223,7 @@ where server_task, inclusion_lane_handle, input_reader_handle, + batch_submitter_handle, ) .await } @@ -160,10 +236,12 @@ async fn wait_for_clean_shutdown( server_task: tokio::task::JoinHandle>, inclusion_lane_handle: tokio::task::JoinHandle>, input_reader_handle: tokio::task::JoinHandle>, + batch_submitter_handle: tokio::task::JoinHandle>, ) -> Result<(), RunError> { wait_for_server_shutdown(server_task).await?; wait_for_lane_shutdown(inclusion_lane_handle).await?; wait_for_input_reader_shutdown(input_reader_handle).await?; + wait_for_batch_submitter_shutdown(batch_submitter_handle).await?; Ok(()) } @@ -172,12 +250,17 @@ async fn finish_runtime( server_task: tokio::task::JoinHandle>, inclusion_lane_handle: tokio::task::JoinHandle>, input_reader_handle: tokio::task::JoinHandle>, + batch_submitter_handle: tokio::task::JoinHandle>, ) -> Result<(), RunError> { match first_exit { FirstExit::Signal(signal_error) => { - let shutdown_result = - wait_for_clean_shutdown(server_task, inclusion_lane_handle, input_reader_handle) - .await; + let shutdown_result = wait_for_clean_shutdown( + server_task, + inclusion_lane_handle, + input_reader_handle, + batch_submitter_handle, + ) + .await; match (signal_error, shutdown_result) { (Some(err), _) => Err(err), (None, Ok(())) => Ok(()), @@ -193,6 +276,10 @@ async fn finish_runtime( "input reader", wait_for_input_reader_shutdown(input_reader_handle).await, ); + log_cleanup_result( + "batch submitter", + wait_for_batch_submitter_shutdown(batch_submitter_handle).await, + ); Err(primary) } FirstExit::InclusionLane(primary) => { @@ -201,6 +288,10 @@ async fn finish_runtime( "input reader", wait_for_input_reader_shutdown(input_reader_handle).await, ); + log_cleanup_result( + "batch submitter", + wait_for_batch_submitter_shutdown(batch_submitter_handle).await, + ); Err(primary) } FirstExit::InputReader(primary) => { @@ -209,6 +300,22 @@ async fn finish_runtime( "inclusion lane", wait_for_lane_shutdown(inclusion_lane_handle).await, ); + log_cleanup_result( + "batch submitter", + wait_for_batch_submitter_shutdown(batch_submitter_handle).await, + ); + Err(primary) + } + FirstExit::BatchSubmitter(primary) => { + log_cleanup_result("server", wait_for_server_shutdown(server_task).await); + log_cleanup_result( + "inclusion lane", + wait_for_lane_shutdown(inclusion_lane_handle).await, + ); + log_cleanup_result( + "input reader", + wait_for_input_reader_shutdown(input_reader_handle).await, + ); Err(primary) } } @@ -244,6 +351,16 @@ async fn wait_for_input_reader_shutdown( } } +async fn wait_for_batch_submitter_shutdown( + batch_submitter_handle: tokio::task::JoinHandle>, +) -> Result<(), RunError> { + match batch_submitter_handle.await { + Ok(Ok(())) => Ok(()), + Ok(Err(source)) => Err(RunError::BatchSubmitter { source }), + Err(source) => Err(RunError::BatchSubmitterJoin { source }), + } +} + fn map_server_exit(result: Result, tokio::task::JoinError>) -> RunError { match result { Ok(Ok(())) => RunError::ServerStoppedUnexpectedly, @@ -272,18 +389,13 @@ fn map_input_reader_exit( } } -fn build_input_reader_config( - config: &RunConfig, - input_box_address: alloy_primitives::Address, - genesis_block: u64, -) -> InputReaderConfig { - InputReaderConfig { - rpc_url: config.eth_rpc_url.clone(), - input_box_address, - app_address_filter: config.domain_verifying_contract, - genesis_block, - poll_interval: INPUT_READER_POLL_INTERVAL, - long_block_range_error_codes: config.long_block_range_error_codes.clone(), +fn map_batch_submitter_exit( + result: Result, tokio::task::JoinError>, +) -> RunError { + match result { + Ok(Ok(())) => RunError::BatchSubmitterStoppedUnexpectedly, + Ok(Err(source)) => RunError::BatchSubmitter { source }, + Err(source) => RunError::BatchSubmitterJoin { source }, } } @@ -292,3 +404,20 @@ fn log_cleanup_result(component: &str, result: Result<(), RunError>) { warn!(component, error = %err, "component shutdown after primary failure also errored"); } } + +async fn build_batch_submitter_provider( + l1: &L1Config, +) -> Result { + use alloy::providers::ProviderBuilder; + use alloy::signers::local::PrivateKeySigner; + use std::str::FromStr; + + let signer = PrivateKeySigner::from_str(&l1.batch_submitter_private_key) + .map_err(|e| std::io::Error::other(e.to_string()))?; + + ProviderBuilder::new() + .wallet(signer) + .connect(l1.eth_rpc_url.as_str()) + .await + .map_err(|e| std::io::Error::other(e.to_string())) +} diff --git a/sequencer/src/storage/db.rs b/sequencer/src/storage/db.rs index 0f4fb67..7537a2c 100644 --- a/sequencer/src/storage/db.rs +++ b/sequencer/src/storage/db.rs @@ -6,18 +6,24 @@ use rusqlite_migration::{M, Migrations}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use super::sql::{ - sql_count_user_ops_for_frame, sql_insert_direct_inputs_batch, sql_insert_open_batch, - sql_insert_open_batch_with_index, sql_insert_open_frame, + sql_count_user_ops_for_frame, sql_insert_open_batch, sql_insert_open_batch_with_index, + sql_insert_open_frame, sql_insert_safe_inputs_batch, sql_insert_sequenced_direct_inputs_for_frame, sql_insert_user_ops_and_sequenced_batch, + sql_select_frames_for_batch, sql_select_latest_batch_index, sql_select_latest_batch_with_user_op_count, sql_select_latest_frame_in_batch_for_batch, - sql_select_max_direct_input_index, sql_select_ordered_l2_tx_count, - sql_select_ordered_l2_txs_from_offset, sql_select_ordered_l2_txs_page_from_offset, - sql_select_recommended_fee, sql_select_safe_block, sql_select_safe_inputs_range, - sql_select_total_drained_direct_inputs, sql_update_recommended_fee, sql_update_safe_block, + sql_select_max_safe_input_index, sql_select_ordered_l2_tx_count, + sql_select_ordered_l2_txs_for_batch, sql_select_ordered_l2_txs_from_offset, + sql_select_ordered_l2_txs_page_from_offset, sql_select_recommended_fee, sql_select_safe_block, + sql_select_safe_input_payloads_for_sender, sql_select_safe_inputs_range, + sql_select_total_drained_direct_inputs, sql_select_user_ops_for_frame, + sql_update_recommended_fee, sql_update_safe_block, +}; +use super::{ + FrameHeader, SafeFrontier, SafeInputRange, StorageOpenError, StoredSafeInput, WriteHead, }; -use super::{DirectInputRange, SafeFrontier, StorageOpenError, StoredDirectInput, WriteHead}; use crate::inclusion_lane::PendingUserOp; use alloy_primitives::Address; +use sequencer_core::batch::{Batch, BatchForSubmission, Frame as BatchFrame, WireUserOp}; use sequencer_core::l2_tx::{DirectInput, SequencedL2Tx, ValidUserOp}; const MIGRATION_0001_SCHEMA: &str = include_str!("migrations/0001_schema.sql"); @@ -81,13 +87,13 @@ impl Storage { Ok(()) } - pub fn load_next_undrained_direct_input_index(&mut self) -> Result { + pub fn load_next_undrained_safe_input_index(&mut self) -> Result { let value = sql_select_total_drained_direct_inputs(&self.conn)?; Ok(i64_to_u64(value)) } pub fn safe_input_end_exclusive(&mut self) -> Result { - let value = sql_select_max_direct_input_index(&self.conn)?; + let value = sql_select_max_safe_input_index(&self.conn)?; Ok(match value { Some(last_index) => i64_to_u64(last_index).saturating_add(1), None => 0, @@ -119,7 +125,7 @@ impl Storage { .conn .transaction_with_behavior(TransactionBehavior::Deferred)?; let safe_block = query_current_safe_block(&tx)?; - let end_exclusive = query_latest_direct_input_index_exclusive(&tx)?; + let end_exclusive = query_latest_safe_input_index_exclusive(&tx)?; tx.commit()?; Ok(SafeFrontier { safe_block, @@ -127,11 +133,24 @@ impl Storage { }) } + pub fn load_safe_input_payloads_for_sender( + &mut self, + sender: Address, + ) -> Result<(u64, Vec>)> { + let tx = self + .conn + .transaction_with_behavior(TransactionBehavior::Deferred)?; + let safe_block = query_current_safe_block(&tx)?; + let payloads = sql_select_safe_input_payloads_for_sender(&tx, sender.as_slice())?; + tx.commit()?; + Ok((safe_block, payloads)) + } + pub fn fill_safe_inputs( &mut self, from_inclusive: u64, to_exclusive: u64, - out: &mut Vec, + out: &mut Vec, ) -> Result<()> { assert!( from_inclusive <= to_exclusive, @@ -150,7 +169,7 @@ impl Storage { let mut fetched_count = 0_u64; for (offset, row) in rows.into_iter().enumerate() { - let index = i64_to_u64(row.direct_input_index); + let index = i64_to_u64(row.safe_input_index); let expected = from_inclusive.saturating_add(offset as u64); assert_eq!( @@ -158,7 +177,8 @@ impl Storage { "non-contiguous safe-input index: expected {expected}, found {index}" ); - out.push(StoredDirectInput { + out.push(StoredSafeInput { + sender: Address::from_slice(row.sender.as_slice()), payload: row.payload, block_number: i64_to_u64(row.block_number), }); @@ -174,10 +194,10 @@ impl Storage { Ok(()) } - pub fn append_safe_direct_inputs( + pub fn append_safe_inputs( &mut self, safe_block: u64, - inputs: &[StoredDirectInput], + inputs: &[StoredSafeInput], ) -> Result<()> { let tx = self .conn @@ -190,11 +210,11 @@ impl Storage { ); assert!( safe_block > current_safe_block || inputs.is_empty(), - "safe block must advance when appending new safe direct inputs" + "safe block must advance when appending new safe inputs" ); - let next_expected = query_latest_direct_input_index_exclusive(&tx)?; - sql_insert_direct_inputs_batch(&tx, next_expected, inputs)?; + let next_expected = query_latest_safe_input_index_exclusive(&tx)?; + sql_insert_safe_inputs_batch(&tx, next_expected, inputs)?; let changed_rows = sql_update_safe_block(&tx, u64_to_i64(safe_block))?; if changed_rows != 1 { return Err(rusqlite::Error::StatementChangedRows(changed_rows)); @@ -216,7 +236,7 @@ impl Storage { pub fn initialize_open_state( &mut self, safe_block: u64, - leading_direct_range: DirectInputRange, + leading_direct_range: SafeInputRange, ) -> Result { let tx = self .conn @@ -290,7 +310,7 @@ impl Storage { &mut self, head: &mut WriteHead, next_safe_block: u64, - leading_direct_range: DirectInputRange, + leading_direct_range: SafeInputRange, ) -> Result<()> { let tx = self .conn @@ -377,6 +397,78 @@ impl Storage { let value = sql_select_ordered_l2_tx_count(&self.conn)?; Ok(i64_to_u64(value)) } + + pub fn latest_batch_index(&mut self) -> Result> { + let value = sql_select_latest_batch_index(&self.conn)?; + Ok(value.map(i64_to_u64)) + } + + pub fn load_frames_for_batch(&mut self, batch_index: u64) -> Result> { + let rows = sql_select_frames_for_batch(&self.conn, u64_to_i64(batch_index))?; + Ok(rows + .into_iter() + .map(|row| FrameHeader { + frame_in_batch: i64_to_u32(row.frame_in_batch), + fee: i64_to_u64(row.fee), + safe_block: i64_to_u64(row.safe_block), + }) + .collect()) + } + + pub fn load_ordered_l2_txs_for_batch( + &mut self, + batch_index: u64, + ) -> Result> { + let rows = sql_select_ordered_l2_txs_for_batch(&self.conn, u64_to_i64(batch_index))?; + Ok(decode_ordered_l2_txs(rows)) + } + + pub fn load_batch_for_submission(&mut self, batch_index: u64) -> Result { + let created_at_ms: i64 = self.conn.query_row( + "SELECT created_at_ms FROM batches WHERE batch_index = ?1 LIMIT 1", + [u64_to_i64(batch_index)], + |row| row.get(0), + )?; + + let frame_headers = self.load_frames_for_batch(batch_index)?; + let mut frames = Vec::with_capacity(frame_headers.len()); + + for header in frame_headers { + let rows = sql_select_user_ops_for_frame( + &self.conn, + u64_to_i64(batch_index), + i64::from(header.frame_in_batch), + )?; + + let user_ops = rows + .into_iter() + .map(|row| WireUserOp { + nonce: i64_to_u32(row.nonce), + max_fee: i64_to_u32(row.max_fee), + data: row.data, + signature: row.sig, + }) + .collect(); + + frames.push(BatchFrame { + user_ops, + safe_block: header.safe_block, + fee_price: header.fee, + }); + } + + let batch = Batch { + nonce: batch_index, + frames, + }; + let created_at_ms_u64 = created_at_ms.max(0) as u64; + + Ok(BatchForSubmission { + batch_index, + created_at_ms: created_at_ms_u64, + batch, + }) + } } fn decode_ordered_l2_txs(rows: Vec) -> Vec { @@ -400,6 +492,15 @@ fn decode_ordered_l2_txs(rows: Vec) -> Vec Result { - let value = sql_select_max_direct_input_index(tx)?; +fn query_latest_safe_input_index_exclusive(tx: &Connection) -> Result { + let value = sql_select_max_safe_input_index(tx)?; Ok(match value { Some(last_index) => i64_to_u64(last_index).saturating_add(1), None => 0, @@ -514,7 +615,7 @@ fn persist_frame_direct_sequence( tx: &Transaction<'_>, batch_index: u64, frame_in_batch: u32, - drained_direct_range: DirectInputRange, + drained_direct_range: SafeInputRange, ) -> Result<()> { sql_insert_sequenced_direct_inputs_for_frame( tx, @@ -592,8 +693,10 @@ fn i64_to_u32(value: i64) -> u32 { #[cfg(test)] mod tests { + use alloy_primitives::Address; + use super::Storage; - use crate::storage::{DirectInputRange, StoredDirectInput}; + use crate::storage::{SafeInputRange, StoredSafeInput}; use sequencer_core::l2_tx::SequencedL2Tx; use tempfile::TempDir; @@ -628,7 +731,7 @@ mod tests { ); let head_a = storage - .initialize_open_state(0, DirectInputRange::empty_at(0)) + .initialize_open_state(0, SafeInputRange::empty_at(0)) .expect("initialize open state"); let head_b = storage .load_open_state() @@ -643,7 +746,7 @@ mod tests { let mut head_c = head_b; let next_safe_block = head_c.safe_block; storage - .close_frame_only(&mut head_c, next_safe_block, DirectInputRange::empty_at(0)) + .close_frame_only(&mut head_c, next_safe_block, SafeInputRange::empty_at(0)) .expect("rotate within same batch"); assert_eq!(head_c.batch_index, head_b.batch_index); assert_eq!(head_c.frame_in_batch, 1); @@ -666,7 +769,7 @@ mod tests { storage.set_recommended_fee(7).expect("set recommended fee"); let mut head = storage - .initialize_open_state(0, DirectInputRange::empty_at(0)) + .initialize_open_state(0, SafeInputRange::empty_at(0)) .expect("initialize open state"); let next_safe_block = head.safe_block; storage @@ -682,29 +785,27 @@ mod tests { let db = temp_db("replay-order"); let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); let head = storage - .initialize_open_state(0, DirectInputRange::empty_at(0)) + .initialize_open_state(0, SafeInputRange::empty_at(0)) .expect("initialize open state"); let drained = vec![ - StoredDirectInput { + StoredSafeInput { + sender: Address::ZERO, payload: vec![0xaa], block_number: 10, }, - StoredDirectInput { + StoredSafeInput { + sender: Address::ZERO, payload: vec![0xbb], block_number: 10, }, ]; storage - .append_safe_direct_inputs(10, drained.as_slice()) + .append_safe_inputs(10, drained.as_slice()) .expect("insert direct inputs"); let mut head = head; storage - .close_frame_only( - &mut head, - 10, - DirectInputRange::new(0, drained.len() as u64), - ) + .close_frame_only(&mut head, 10, SafeInputRange::new(0, drained.len() as u64)) .expect("close frame with directs"); let replay = storage.load_ordered_l2_txs_from(0).expect("load replay"); @@ -720,44 +821,42 @@ mod tests { } #[test] - fn next_undrained_direct_input_index_is_derived_from_sequenced_directs() { + fn next_undrained_safe_input_index_is_derived_from_sequenced_directs() { let db = temp_db("safe-cursor"); let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); assert_eq!( storage - .load_next_undrained_direct_input_index() + .load_next_undrained_safe_input_index() .expect("empty cursor"), 0 ); let head = storage - .initialize_open_state(0, DirectInputRange::empty_at(0)) + .initialize_open_state(0, SafeInputRange::empty_at(0)) .expect("initialize open state"); let drained = vec![ - StoredDirectInput { - payload: vec![0x01], + StoredSafeInput { + sender: Address::ZERO, + payload: vec![0x00], block_number: 10, }, - StoredDirectInput { + StoredSafeInput { + sender: Address::ZERO, payload: vec![0x02], block_number: 10, }, ]; storage - .append_safe_direct_inputs(10, drained.as_slice()) + .append_safe_inputs(10, drained.as_slice()) .expect("insert direct inputs"); let mut head = head; storage - .close_frame_only( - &mut head, - 10, - DirectInputRange::new(0, drained.len() as u64), - ) + .close_frame_only(&mut head, 10, SafeInputRange::new(0, drained.len() as u64)) .expect("close frame with directs"); assert_eq!( storage - .load_next_undrained_direct_input_index() + .load_next_undrained_safe_input_index() .expect("derived cursor"), 2 ); @@ -776,17 +875,19 @@ mod tests { assert!(out.is_empty()); let inserted = vec![ - StoredDirectInput { + StoredSafeInput { + sender: Address::ZERO, payload: vec![0xa0], block_number: 10, }, - StoredDirectInput { + StoredSafeInput { + sender: Address::ZERO, payload: vec![0xb1], block_number: 10, }, ]; storage - .append_safe_direct_inputs(10, inserted.as_slice()) + .append_safe_inputs(10, inserted.as_slice()) .expect("insert safe directs"); assert_eq!(storage.safe_input_end_exclusive().expect("safe head"), 2); @@ -825,7 +926,7 @@ mod tests { let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); let head = storage - .initialize_open_state(12, DirectInputRange::empty_at(0)) + .initialize_open_state(12, SafeInputRange::empty_at(0)) .expect("initialize open state"); assert_eq!(head.batch_index, 0); @@ -840,4 +941,74 @@ mod tests { assert_eq!(loaded.frame_in_batch, 0); assert_eq!(loaded.safe_block, 12); } + + #[test] + fn batch_for_submission_builds_from_storage() { + let db = temp_db("batch-for-submission"); + let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); + + let head = storage + .initialize_open_state(12, SafeInputRange::empty_at(0)) + .expect("initialize open state"); + assert_eq!(head.batch_index, 0); + + let batch = storage + .load_batch_for_submission(0) + .expect("load batch for submission"); + + assert_eq!(batch.batch_index, 0); + assert_eq!(batch.batch.frames.len(), 1); + let frame = &batch.batch.frames[0]; + assert!(frame.user_ops.is_empty()); + assert_eq!(frame.safe_block, 12); + assert_eq!(frame.fee_price, 0); + assert!(batch.created_at_ms > 0); + } + + #[test] + fn batch_level_helpers_expose_latest_index_frames_and_txs() { + let db = temp_db("batch-level-helpers"); + let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); + + // Before initialization there should be no batches. + assert!( + storage + .latest_batch_index() + .expect("query latest batch nonce on empty db") + .is_none() + ); + + // Initialize first batch/frame and append some data. + let mut head = storage + .initialize_open_state(0, SafeInputRange::empty_at(0)) + .expect("initialize open state"); + + // Close current batch and move to next so batch 0 becomes closed. + let next_safe_block = head.safe_block; + storage + .close_frame_and_batch(&mut head, next_safe_block) + .expect("close batch and rotate"); + + // Latest batch nonce should now be 1 (open), with batch 0 closed. + let latest = storage + .latest_batch_index() + .expect("query latest batch nonce") + .expect("latest batch should exist"); + assert_eq!(latest, 1); + + // Batch 0 should still have at least one frame header. + let frames = storage + .load_frames_for_batch(0) + .expect("load frames for batch 0"); + assert!(!frames.is_empty()); + + // Ordered L2 txs for batch 0 should be queryable (even if empty). + let txs = storage + .load_ordered_l2_txs_for_batch(0) + .expect("load l2 txs for batch 0"); + assert!( + txs.is_empty(), + "fresh batch should not have sequenced txs yet" + ); + } } diff --git a/sequencer/src/storage/migrations/0001_schema.sql b/sequencer/src/storage/migrations/0001_schema.sql index 0587652..2d47551 100644 --- a/sequencer/src/storage/migrations/0001_schema.sql +++ b/sequencer/src/storage/migrations/0001_schema.sql @@ -29,8 +29,9 @@ CREATE TABLE IF NOT EXISTS user_ops ( UNIQUE(sender, nonce) ); -CREATE TABLE IF NOT EXISTS direct_inputs ( - direct_input_index INTEGER PRIMARY KEY, +CREATE TABLE IF NOT EXISTS safe_inputs ( + safe_input_index INTEGER PRIMARY KEY, + sender BLOB NOT NULL CHECK (length(sender) = 20), payload BLOB NOT NULL, -- Block number of the chain block where this direct input was included (e.g. InputAdded event block). block_number INTEGER NOT NULL CHECK (block_number >= 0) @@ -45,26 +46,26 @@ CREATE TABLE IF NOT EXISTS sequenced_l2_txs ( -- User-op branch: references user_ops(..., pos_in_frame). user_op_pos_in_frame INTEGER, - -- Direct-input branch: references direct_inputs(direct_input_index). - direct_input_index INTEGER, + -- Direct-input branch: references safe_inputs(safe_input_index). + safe_input_index INTEGER, FOREIGN KEY(batch_index, frame_in_batch) REFERENCES frames(batch_index, frame_in_batch), FOREIGN KEY(batch_index, frame_in_batch, user_op_pos_in_frame) REFERENCES user_ops(batch_index, frame_in_batch, pos_in_frame), - FOREIGN KEY(direct_input_index) - REFERENCES direct_inputs(direct_input_index), + FOREIGN KEY(safe_input_index) + REFERENCES safe_inputs(safe_input_index), -- XOR invariant: row is either a sequenced user-op OR a drained direct input. CHECK ( - (user_op_pos_in_frame IS NOT NULL AND direct_input_index IS NULL) OR - (user_op_pos_in_frame IS NULL AND direct_input_index IS NOT NULL) + (user_op_pos_in_frame IS NOT NULL AND safe_input_index IS NULL) OR + (user_op_pos_in_frame IS NULL AND safe_input_index IS NOT NULL) ), -- At most one sequenced user-op row for each user-op key. UNIQUE(batch_index, frame_in_batch, user_op_pos_in_frame), -- A direct input can only be sequenced once. - UNIQUE(direct_input_index) + UNIQUE(safe_input_index) ); CREATE INDEX IF NOT EXISTS idx_sequenced_l2_txs_frame diff --git a/sequencer/src/storage/mod.rs b/sequencer/src/storage/mod.rs index 9b3011a..31fc29f 100644 --- a/sequencer/src/storage/mod.rs +++ b/sequencer/src/storage/mod.rs @@ -10,23 +10,24 @@ use thiserror::Error; pub use db::Storage; #[derive(Debug, Clone, PartialEq, Eq)] -pub struct StoredDirectInput { +pub struct StoredSafeInput { + pub sender: alloy_primitives::Address, pub payload: Vec, /// Chain block number where this input was included (e.g. InputAdded event block). pub block_number: u64, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DirectInputRange { +pub struct SafeInputRange { pub start_inclusive: u64, pub end_exclusive: u64, } -impl DirectInputRange { +impl SafeInputRange { pub fn new(start_inclusive: u64, end_exclusive: u64) -> Self { assert!( end_exclusive >= start_inclusive, - "direct-input range must be half-open and non-negative: start={start_inclusive}, end={end_exclusive}" + "safe-input range must be half-open and non-negative: start={start_inclusive}, end={end_exclusive}" ); Self { start_inclusive, @@ -53,6 +54,13 @@ pub struct SafeFrontier { pub end_exclusive: u64, } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct FrameHeader { + pub frame_in_batch: u32, + pub fee: u64, + pub safe_block: u64, +} + #[derive(Debug, Error)] pub enum StorageOpenError { #[error(transparent)] diff --git a/sequencer/src/storage/queries/insert_sequenced_direct_input.sql b/sequencer/src/storage/queries/insert_sequenced_direct_input.sql index 0c34ee2..b382c5a 100644 --- a/sequencer/src/storage/queries/insert_sequenced_direct_input.sql +++ b/sequencer/src/storage/queries/insert_sequenced_direct_input.sql @@ -2,5 +2,5 @@ INSERT INTO sequenced_l2_txs ( batch_index, frame_in_batch, user_op_pos_in_frame, - direct_input_index + safe_input_index ) VALUES (?1, ?2, NULL, ?3) diff --git a/sequencer/src/storage/queries/insert_sequenced_user_op.sql b/sequencer/src/storage/queries/insert_sequenced_user_op.sql index 53eb402..1f9e485 100644 --- a/sequencer/src/storage/queries/insert_sequenced_user_op.sql +++ b/sequencer/src/storage/queries/insert_sequenced_user_op.sql @@ -2,5 +2,5 @@ INSERT INTO sequenced_l2_txs ( batch_index, frame_in_batch, user_op_pos_in_frame, - direct_input_index + safe_input_index ) VALUES (?1, ?2, ?3, NULL) diff --git a/sequencer/src/storage/queries/select_ordered_l2_txs_for_batch.sql b/sequencer/src/storage/queries/select_ordered_l2_txs_for_batch.sql new file mode 100644 index 0000000..3dd8361 --- /dev/null +++ b/sequencer/src/storage/queries/select_ordered_l2_txs_for_batch.sql @@ -0,0 +1,23 @@ +SELECT + CASE WHEN s.user_op_pos_in_frame IS NOT NULL THEN 0 ELSE 1 END AS kind, + CASE + WHEN s.user_op_pos_in_frame IS NOT NULL THEN u.sender + WHEN s.safe_input_index IS NOT NULL THEN d.sender + ELSE NULL + END AS sender, + CASE WHEN s.user_op_pos_in_frame IS NOT NULL THEN u.data ELSE NULL END AS data, + CASE WHEN s.user_op_pos_in_frame IS NOT NULL THEN f.fee ELSE NULL END AS fee, + CASE WHEN s.safe_input_index IS NOT NULL THEN d.payload ELSE NULL END AS payload, + CASE WHEN s.safe_input_index IS NOT NULL THEN d.block_number ELSE NULL END AS block_number +FROM sequenced_l2_txs s +LEFT JOIN user_ops u + ON u.batch_index = s.batch_index + AND u.frame_in_batch = s.frame_in_batch + AND u.pos_in_frame = s.user_op_pos_in_frame +LEFT JOIN frames f + ON f.batch_index = s.batch_index + AND f.frame_in_batch = s.frame_in_batch +LEFT JOIN safe_inputs d + ON d.safe_input_index = s.safe_input_index +WHERE s.batch_index = ?1 +ORDER BY s.offset ASC diff --git a/sequencer/src/storage/queries/select_ordered_l2_txs_from_offset.sql b/sequencer/src/storage/queries/select_ordered_l2_txs_from_offset.sql index 271fcb8..5c3d52a 100644 --- a/sequencer/src/storage/queries/select_ordered_l2_txs_from_offset.sql +++ b/sequencer/src/storage/queries/select_ordered_l2_txs_from_offset.sql @@ -1,9 +1,14 @@ SELECT CASE WHEN s.user_op_pos_in_frame IS NOT NULL THEN 0 ELSE 1 END AS kind, - CASE WHEN s.user_op_pos_in_frame IS NOT NULL THEN u.sender ELSE NULL END AS sender, + CASE + WHEN s.user_op_pos_in_frame IS NOT NULL THEN u.sender + WHEN s.safe_input_index IS NOT NULL THEN d.sender + ELSE NULL + END AS sender, CASE WHEN s.user_op_pos_in_frame IS NOT NULL THEN u.data ELSE NULL END AS data, CASE WHEN s.user_op_pos_in_frame IS NOT NULL THEN f.fee ELSE NULL END AS fee, - CASE WHEN s.direct_input_index IS NOT NULL THEN d.payload ELSE NULL END AS payload + CASE WHEN s.safe_input_index IS NOT NULL THEN d.payload ELSE NULL END AS payload, + CASE WHEN s.safe_input_index IS NOT NULL THEN d.block_number ELSE NULL END AS block_number FROM sequenced_l2_txs s LEFT JOIN user_ops u ON u.batch_index = s.batch_index @@ -12,7 +17,7 @@ LEFT JOIN user_ops u LEFT JOIN frames f ON f.batch_index = s.batch_index AND f.frame_in_batch = s.frame_in_batch -LEFT JOIN direct_inputs d - ON d.direct_input_index = s.direct_input_index +LEFT JOIN safe_inputs d + ON d.safe_input_index = s.safe_input_index WHERE s.offset > ?1 ORDER BY s.offset ASC diff --git a/sequencer/src/storage/queries/select_ordered_l2_txs_page_from_offset.sql b/sequencer/src/storage/queries/select_ordered_l2_txs_page_from_offset.sql index 3f752ff..9b3d8a6 100644 --- a/sequencer/src/storage/queries/select_ordered_l2_txs_page_from_offset.sql +++ b/sequencer/src/storage/queries/select_ordered_l2_txs_page_from_offset.sql @@ -1,9 +1,14 @@ SELECT CASE WHEN s.user_op_pos_in_frame IS NOT NULL THEN 0 ELSE 1 END AS kind, - CASE WHEN s.user_op_pos_in_frame IS NOT NULL THEN u.sender ELSE NULL END AS sender, + CASE + WHEN s.user_op_pos_in_frame IS NOT NULL THEN u.sender + WHEN s.safe_input_index IS NOT NULL THEN d.sender + ELSE NULL + END AS sender, CASE WHEN s.user_op_pos_in_frame IS NOT NULL THEN u.data ELSE NULL END AS data, CASE WHEN s.user_op_pos_in_frame IS NOT NULL THEN f.fee ELSE NULL END AS fee, - CASE WHEN s.direct_input_index IS NOT NULL THEN d.payload ELSE NULL END AS payload + CASE WHEN s.safe_input_index IS NOT NULL THEN d.payload ELSE NULL END AS payload, + CASE WHEN s.safe_input_index IS NOT NULL THEN d.block_number ELSE NULL END AS block_number FROM sequenced_l2_txs s LEFT JOIN user_ops u ON u.batch_index = s.batch_index @@ -12,8 +17,8 @@ LEFT JOIN user_ops u LEFT JOIN frames f ON f.batch_index = s.batch_index AND f.frame_in_batch = s.frame_in_batch -LEFT JOIN direct_inputs d - ON d.direct_input_index = s.direct_input_index +LEFT JOIN safe_inputs d + ON d.safe_input_index = s.safe_input_index WHERE s.offset > ?1 ORDER BY s.offset ASC LIMIT ?2 diff --git a/sequencer/src/storage/queries/select_safe_inputs_range.sql b/sequencer/src/storage/queries/select_safe_inputs_range.sql index 281decd..3d82d7e 100644 --- a/sequencer/src/storage/queries/select_safe_inputs_range.sql +++ b/sequencer/src/storage/queries/select_safe_inputs_range.sql @@ -1,4 +1,4 @@ -SELECT direct_input_index, payload, block_number -FROM direct_inputs -WHERE direct_input_index >= ?1 AND direct_input_index < ?2 -ORDER BY direct_input_index ASC +SELECT safe_input_index, sender, payload, block_number +FROM safe_inputs +WHERE safe_input_index >= ?1 AND safe_input_index < ?2 +ORDER BY safe_input_index ASC diff --git a/sequencer/src/storage/sql.rs b/sequencer/src/storage/sql.rs index 5e5cbe8..1710753 100644 --- a/sequencer/src/storage/sql.rs +++ b/sequencer/src/storage/sql.rs @@ -4,10 +4,12 @@ use rusqlite::{Connection, Result, Row, Transaction, params}; use std::time::{SystemTime, UNIX_EPOCH}; -use super::{DirectInputRange, StoredDirectInput}; +use super::{SafeInputRange, StoredSafeInput}; use crate::inclusion_lane::PendingUserOp; const SQL_SELECT_SAFE_INPUTS_RANGE: &str = include_str!("queries/select_safe_inputs_range.sql"); +const SQL_SELECT_SAFE_INPUT_PAYLOADS_FOR_SENDER: &str = + "SELECT payload FROM safe_inputs WHERE sender = ?1 ORDER BY safe_input_index ASC"; const SQL_SELECT_ORDERED_L2_TXS_FROM_OFFSET: &str = include_str!("queries/select_ordered_l2_txs_from_offset.sql"); const SQL_SELECT_ORDERED_L2_TXS_PAGE_FROM_OFFSET: &str = @@ -18,14 +20,17 @@ const SQL_SELECT_LATEST_FRAME_IN_BATCH_FOR_BATCH: &str = include_str!("queries/select_latest_frame_in_batch_for_batch.sql"); const SQL_SELECT_USER_OP_COUNT_FOR_FRAME: &str = include_str!("queries/select_user_op_count_for_frame.sql"); -const SQL_SELECT_MAX_DIRECT_INPUT_INDEX: &str = "SELECT MAX(direct_input_index) FROM direct_inputs"; +const SQL_SELECT_ORDERED_L2_TXS_FOR_BATCH: &str = + include_str!("queries/select_ordered_l2_txs_for_batch.sql"); +const SQL_SELECT_LATEST_BATCH_INDEX: &str = "SELECT MAX(batch_index) FROM batches"; +const SQL_SELECT_USER_OPS_FOR_FRAME: &str = "SELECT nonce, max_fee, data, sig FROM user_ops WHERE batch_index = ?1 AND frame_in_batch = ?2 ORDER BY pos_in_frame ASC"; +const SQL_SELECT_MAX_SAFE_INPUT_INDEX: &str = "SELECT MAX(safe_input_index) FROM safe_inputs"; const SQL_SELECT_ORDERED_L2_TX_COUNT: &str = "SELECT COUNT(*) FROM sequenced_l2_txs"; const SQL_SELECT_RECOMMENDED_FEE: &str = "SELECT fee FROM recommended_fees WHERE singleton_id = 0 LIMIT 1"; const SQL_SELECT_SAFE_BLOCK: &str = "SELECT block_number FROM l1_safe_head WHERE singleton_id = 0 LIMIT 1"; -const SQL_INSERT_DIRECT_INPUT: &str = - "INSERT INTO direct_inputs (direct_input_index, payload, block_number) VALUES (?1, ?2, ?3)"; +const SQL_INSERT_SAFE_INPUT: &str = "INSERT INTO safe_inputs (safe_input_index, sender, payload, block_number) VALUES (?1, ?2, ?3, ?4)"; const SQL_INSERT_USER_OP: &str = include_str!("queries/insert_user_op.sql"); const SQL_INSERT_SEQUENCED_USER_OP: &str = include_str!("queries/insert_sequenced_user_op.sql"); const SQL_INSERT_SEQUENCED_DIRECT_INPUT: &str = @@ -34,7 +39,6 @@ const SQL_UPDATE_RECOMMENDED_FEE: &str = "UPDATE recommended_fees SET fee = ?1 WHERE singleton_id = 0"; const SQL_UPDATE_SAFE_BLOCK: &str = "UPDATE l1_safe_head SET block_number = ?1 WHERE singleton_id = 0"; - #[derive(Debug, Clone)] pub(super) struct OrderedL2TxRow { pub kind: i64, @@ -42,23 +46,48 @@ pub(super) struct OrderedL2TxRow { pub data: Option>, pub fee: Option, pub payload: Option>, + pub block_number: Option, } #[derive(Debug, Clone)] pub(super) struct SafeInputRow { - pub direct_input_index: i64, + pub safe_input_index: i64, + pub sender: Vec, pub payload: Vec, pub block_number: i64, } +#[derive(Debug, Clone)] +pub(super) struct FrameHeaderRow { + pub frame_in_batch: i64, + pub fee: i64, + pub safe_block: i64, +} + +#[derive(Debug, Clone)] +pub(super) struct FrameUserOpRow { + pub nonce: i64, + pub max_fee: i64, + pub data: Vec, + pub sig: Vec, +} + pub(super) fn sql_select_total_drained_direct_inputs(conn: &Connection) -> Result { - const SQL: &str = "SELECT COUNT(*) FROM sequenced_l2_txs WHERE direct_input_index IS NOT NULL"; + const SQL: &str = "SELECT COUNT(*) FROM sequenced_l2_txs WHERE safe_input_index IS NOT NULL"; conn.query_row(SQL, [], |row| row.get(0)) } -pub(super) fn sql_select_max_direct_input_index(conn: &Connection) -> Result> { +pub(super) fn sql_select_max_safe_input_index(conn: &Connection) -> Result> { + conn.query_row( + SQL_SELECT_MAX_SAFE_INPUT_INDEX, + [], + convert_row_to_optional_i64, + ) +} + +pub(super) fn sql_select_latest_batch_index(conn: &Connection) -> Result> { conn.query_row( - SQL_SELECT_MAX_DIRECT_INPUT_INDEX, + SQL_SELECT_LATEST_BATCH_INDEX, [], convert_row_to_optional_i64, ) @@ -93,19 +122,52 @@ pub(super) fn sql_select_safe_inputs_range( mapped.collect() } -pub(super) fn sql_insert_direct_inputs_batch( +pub(super) fn sql_select_safe_input_payloads_for_sender( + conn: &Connection, + sender: &[u8], +) -> Result>> { + let mut stmt = conn.prepare_cached(SQL_SELECT_SAFE_INPUT_PAYLOADS_FOR_SENDER)?; + let mapped = stmt.query_map(params![sender], |row| row.get(0))?; + mapped.collect() +} + +pub(super) fn sql_select_frames_for_batch( + conn: &Connection, + batch_index: i64, +) -> Result> { + const SQL: &str = "SELECT frame_in_batch, fee, safe_block FROM frames WHERE batch_index = ?1 ORDER BY frame_in_batch ASC"; + let mut stmt = conn.prepare_cached(SQL)?; + let mapped = stmt.query_map(params![batch_index], convert_row_to_frame_header_row)?; + mapped.collect() +} + +pub(super) fn sql_select_user_ops_for_frame( + conn: &Connection, + batch_index: i64, + frame_in_batch: i64, +) -> Result> { + let mut stmt = conn.prepare_cached(SQL_SELECT_USER_OPS_FOR_FRAME)?; + let mapped = stmt.query_map( + params![batch_index, frame_in_batch], + convert_row_to_frame_user_op_row, + )?; + mapped.collect() +} + +pub(super) fn sql_insert_safe_inputs_batch( tx: &Transaction<'_>, start_index: u64, - direct_inputs: &[StoredDirectInput], + safe_inputs: &[StoredSafeInput], ) -> Result<()> { - if direct_inputs.is_empty() { + if safe_inputs.is_empty() { return Ok(()); } - let mut stmt = tx.prepare_cached(SQL_INSERT_DIRECT_INPUT)?; - for (offset, input) in direct_inputs.iter().enumerate() { + let mut stmt = tx.prepare_cached(SQL_INSERT_SAFE_INPUT)?; + for (offset, input) in safe_inputs.iter().enumerate() { stmt.execute(params![ u64_to_i64(start_index.saturating_add(offset as u64)), + input.sender.as_slice(), input.payload.as_slice(), u64_to_i64(input.block_number) ])?; @@ -153,18 +215,18 @@ pub(super) fn sql_insert_sequenced_direct_inputs( tx: &Transaction<'_>, batch_index: i64, frame_in_batch: i64, - direct_range: DirectInputRange, + direct_range: SafeInputRange, ) -> Result<()> { if direct_range.is_empty() { return Ok(()); } let mut stmt = tx.prepare_cached(SQL_INSERT_SEQUENCED_DIRECT_INPUT)?; - for direct_input_index in direct_range.start_inclusive..direct_range.end_exclusive { + for safe_input_index in direct_range.start_inclusive..direct_range.end_exclusive { stmt.execute(params![ batch_index, frame_in_batch, - u64_to_i64(direct_input_index), + u64_to_i64(safe_input_index), ])?; } Ok(()) @@ -179,6 +241,15 @@ pub(super) fn sql_select_ordered_l2_txs_from_offset( mapped.collect() } +pub(super) fn sql_select_ordered_l2_txs_for_batch( + conn: &Connection, + batch_index: i64, +) -> Result> { + let mut stmt = conn.prepare_cached(SQL_SELECT_ORDERED_L2_TXS_FOR_BATCH)?; + let mapped = stmt.query_map(params![batch_index], convert_row_to_ordered_l2_tx_row)?; + mapped.collect() +} + pub(super) fn sql_select_ordered_l2_txs_page_from_offset( conn: &Connection, offset: i64, @@ -230,7 +301,7 @@ pub(super) fn sql_insert_sequenced_direct_inputs_for_frame( tx: &Transaction<'_>, batch_index: i64, frame_in_batch: i64, - direct_range: DirectInputRange, + direct_range: SafeInputRange, ) -> Result<()> { sql_insert_sequenced_direct_inputs(tx, batch_index, frame_in_batch, direct_range) } @@ -270,9 +341,27 @@ fn convert_row_to_optional_i64(row: &Row<'_>) -> Result> { fn convert_row_to_safe_input_row(row: &Row<'_>) -> Result { Ok(SafeInputRow { - direct_input_index: row.get(0)?, - payload: row.get(1)?, - block_number: row.get(2)?, + safe_input_index: row.get(0)?, + sender: row.get(1)?, + payload: row.get(2)?, + block_number: row.get(3)?, + }) +} + +fn convert_row_to_frame_header_row(row: &Row<'_>) -> Result { + Ok(FrameHeaderRow { + frame_in_batch: row.get(0)?, + fee: row.get(1)?, + safe_block: row.get(2)?, + }) +} + +fn convert_row_to_frame_user_op_row(row: &Row<'_>) -> Result { + Ok(FrameUserOpRow { + nonce: row.get(0)?, + max_fee: row.get(1)?, + data: row.get(2)?, + sig: row.get(3)?, }) } @@ -283,6 +372,7 @@ fn convert_row_to_ordered_l2_tx_row(row: &Row<'_>) -> Result { data: row.get(2)?, fee: row.get(3)?, payload: row.get(4)?, + block_number: row.get(5)?, }) } @@ -305,19 +395,21 @@ fn u64_to_i64(value: u64) -> i64 { #[cfg(test)] mod tests { use super::{ - SQL_INSERT_DIRECT_INPUT, SQL_INSERT_SEQUENCED_DIRECT_INPUT, SQL_INSERT_SEQUENCED_USER_OP, - SQL_INSERT_USER_OP, sql_insert_direct_inputs_batch, sql_insert_open_batch, - sql_insert_open_batch_with_index, sql_insert_open_frame, + FrameHeaderRow, SQL_INSERT_SAFE_INPUT, SQL_INSERT_SEQUENCED_DIRECT_INPUT, + SQL_INSERT_SEQUENCED_USER_OP, SQL_INSERT_USER_OP, sql_insert_open_batch, + sql_insert_open_batch_with_index, sql_insert_open_frame, sql_insert_safe_inputs_batch, sql_insert_sequenced_direct_inputs_for_frame, sql_insert_user_ops_and_sequenced_batch, - sql_select_latest_batch_with_user_op_count, sql_select_max_direct_input_index, + sql_select_frames_for_batch, sql_select_latest_batch_index, + sql_select_latest_batch_with_user_op_count, sql_select_max_safe_input_index, sql_select_ordered_l2_tx_count, sql_select_ordered_l2_txs_from_offset, sql_select_ordered_l2_txs_page_from_offset, sql_select_recommended_fee, sql_select_safe_block, sql_select_safe_inputs_range, - sql_select_total_drained_direct_inputs, sql_update_recommended_fee, sql_update_safe_block, + sql_select_total_drained_direct_inputs, sql_select_user_ops_for_frame, + sql_update_recommended_fee, sql_update_safe_block, }; use crate::inclusion_lane::PendingUserOp; use crate::storage::db::Storage; - use crate::storage::{DirectInputRange, StoredDirectInput}; + use crate::storage::{SafeInputRange, StoredSafeInput}; use alloy_primitives::{Address, Signature}; use rusqlite::{Connection, params}; use sequencer_core::user_op::{SignedUserOp, UserOp}; @@ -365,22 +457,22 @@ mod tests { 0 ); assert_eq!( - sql_select_max_direct_input_index(&conn).expect("query max direct input"), + sql_select_max_safe_input_index(&conn).expect("query max direct input"), None ); conn.execute( - SQL_INSERT_DIRECT_INPUT, - params![0_i64, vec![0xaa_u8], 10_i64], + SQL_INSERT_SAFE_INPUT, + params![0_i64, vec![0x11_u8; 20], vec![0xaa_u8], 10_i64], ) .expect("insert direct input 0"); conn.execute( - SQL_INSERT_DIRECT_INPUT, - params![1_i64, vec![0xbb_u8], 11_i64], + SQL_INSERT_SAFE_INPUT, + params![1_i64, vec![0x22_u8; 20], vec![0xbb_u8], 11_i64], ) .expect("insert direct input 1"); assert_eq!( - sql_select_max_direct_input_index(&conn).expect("query max direct input"), + sql_select_max_safe_input_index(&conn).expect("query max direct input"), Some(1) ); @@ -400,7 +492,7 @@ mod tests { let tx = conn.transaction().expect("start tx"); assert_eq!( - sql_select_max_direct_input_index(&tx).expect("query max direct input in tx"), + sql_select_max_safe_input_index(&tx).expect("query max direct input in tx"), Some(1) ); } @@ -410,18 +502,18 @@ mod tests { let conn = setup_conn(); conn.execute( - SQL_INSERT_DIRECT_INPUT, - params![0_i64, vec![0xaa_u8], 10_i64], + SQL_INSERT_SAFE_INPUT, + params![0_i64, vec![0x11_u8; 20], vec![0xaa_u8], 10_i64], ) .expect("insert direct input 0"); conn.execute( - SQL_INSERT_DIRECT_INPUT, - params![1_i64, vec![0xbb_u8], 11_i64], + SQL_INSERT_SAFE_INPUT, + params![1_i64, vec![0x22_u8; 20], vec![0xbb_u8], 11_i64], ) .expect("insert direct input 1"); conn.execute( - SQL_INSERT_DIRECT_INPUT, - params![2_i64, vec![0xcc_u8], 12_i64], + SQL_INSERT_SAFE_INPUT, + params![2_i64, vec![0x33_u8; 20], vec![0xcc_u8], 12_i64], ) .expect("insert direct input 2"); @@ -430,8 +522,8 @@ mod tests { let rows = sql_select_safe_inputs_range(&conn, 0, 2).expect("query non-empty interval"); assert_eq!(rows.len(), 2); - assert_eq!(rows[0].direct_input_index, 0); - assert_eq!(rows[1].direct_input_index, 1); + assert_eq!(rows[0].safe_input_index, 0); + assert_eq!(rows[1].safe_input_index, 1); } #[test] @@ -455,8 +547,8 @@ mod tests { ) .expect("insert user op"); conn.execute( - SQL_INSERT_DIRECT_INPUT, - params![0_i64, vec![0xaa_u8], 10_i64], + SQL_INSERT_SAFE_INPUT, + params![0_i64, vec![0x11_u8; 20], vec![0xaa_u8], 10_i64], ) .expect("insert direct input"); conn.execute(SQL_INSERT_SEQUENCED_USER_OP, params![0_i64, 0_i64, 0_i64]) @@ -492,6 +584,87 @@ mod tests { assert!(matches!(err, rusqlite::Error::QueryReturnedNoRows)); } + #[test] + fn latest_batch_index_and_frames_for_batch_helpers_work() { + let mut conn = setup_conn(); + // No batches yet. + assert_eq!( + sql_select_latest_batch_index(&conn).expect("query latest batch nonce"), + None + ); + + // Seed batch 0 / frame 0, then batch 1 / frame 0. + seed_open_batch0_frame0(&mut conn); + { + let tx = conn.transaction().expect("start tx"); + sql_insert_open_batch(&tx, 456).expect("insert batch 1"); + let next_batch = tx.last_insert_rowid(); + sql_insert_open_frame(&tx, next_batch, 0, 456, 3, 5) + .expect("insert frame 0 for batch 1"); + tx.commit().expect("commit tx"); + } + + let latest = sql_select_latest_batch_index(&conn) + .expect("query latest batch nonce") + .expect("latest batch should exist"); + assert_eq!(latest, 1); + + let frames = sql_select_frames_for_batch(&conn, 1).expect("query frames for batch 1"); + assert_eq!(frames.len(), 1); + let FrameHeaderRow { + frame_in_batch, + fee, + safe_block, + } = frames[0].clone(); + assert_eq!(frame_in_batch, 0); + assert_eq!(fee, 3); + assert_eq!(safe_block, 5); + } + + #[test] + fn user_ops_for_frame_helper_returns_ordered_rows() { + let mut conn = setup_conn(); + seed_open_batch0_frame0(&mut conn); + + // Insert two user-ops with different pos_in_frame values. + conn.execute( + SQL_INSERT_USER_OP, + params![ + 0_i64, + 0_i64, + 1_i64, + vec![0x10_u8; 20], + 0_i64, + 1_i64, + vec![0x01_u8], + vec![0x55_u8; 65], + 0_i64 + ], + ) + .expect("insert first user op"); + conn.execute( + SQL_INSERT_USER_OP, + params![ + 0_i64, + 0_i64, + 0_i64, + vec![0x20_u8; 20], + 1_i64, + 2_i64, + vec![0x02_u8], + vec![0x66_u8; 65], + 0_i64 + ], + ) + .expect("insert second user op"); + + let rows = sql_select_user_ops_for_frame(&conn, 0, 0).expect("query user ops for frame"); + assert_eq!(rows.len(), 2); + // Ordered by pos_in_frame ASC: nonce 1 comes from pos 1, then nonce 0 from pos 0. + assert_eq!(rows[0].nonce, 1); + assert_eq!(rows[1].nonce, 0); + } + #[test] fn open_batch_and_frame_insert_helpers_work() { let mut conn = setup_conn(); @@ -537,17 +710,19 @@ mod tests { seed_open_batch0_frame0(&mut conn); let tx = conn.transaction().expect("start tx"); - let direct_inputs = vec![ - StoredDirectInput { + let safe_inputs = vec![ + StoredSafeInput { + sender: Address::ZERO, payload: vec![0xaa_u8], block_number: 10, }, - StoredDirectInput { + StoredSafeInput { + sender: Address::ZERO, payload: vec![0xbb_u8], block_number: 11, }, ]; - sql_insert_direct_inputs_batch(&tx, 0, direct_inputs.as_slice()) + sql_insert_safe_inputs_batch(&tx, 0, safe_inputs.as_slice()) .expect("insert direct inputs batch"); let user_ops = vec![ @@ -561,14 +736,14 @@ mod tests { &tx, 0, 0, - DirectInputRange::new(0, direct_inputs.len() as u64), + SafeInputRange::new(0, safe_inputs.len() as u64), ) .expect("insert sequenced direct inputs batch"); tx.commit().expect("commit tx"); let direct_inputs_count: i64 = conn - .query_row("SELECT COUNT(*) FROM direct_inputs", [], |row| row.get(0)) + .query_row("SELECT COUNT(*) FROM safe_inputs", [], |row| row.get(0)) .expect("count direct inputs"); let user_ops_count: i64 = conn .query_row("SELECT COUNT(*) FROM user_ops", [], |row| row.get(0)) diff --git a/sequencer/tests/batch_submitter_integration.rs b/sequencer/tests/batch_submitter_integration.rs new file mode 100644 index 0000000..945ab7a --- /dev/null +++ b/sequencer/tests/batch_submitter_integration.rs @@ -0,0 +1,126 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +//! Integration tests for the batch submitter: worker loop with real storage and mock poster. + +use std::sync::Arc; +use std::time::Duration; + +use alloy_primitives::Address; +use async_trait::async_trait; +use sequencer::batch_submitter::{BatchPoster, BatchPosterError, TxHash}; +use sequencer::batch_submitter::{BatchSubmitter, BatchSubmitterConfig}; +use sequencer::shutdown::ShutdownSignal; +use sequencer::storage::{SafeInputRange, Storage}; +use sequencer_core::batch::Batch; +use tempfile::TempDir; + +const BATCH_SUBMITTER_ADDRESS: Address = Address::repeat_byte(0x11); + +/// Minimal mock for integration tests: records submissions. +struct TestMock { + submissions: std::sync::Mutex>, +} + +impl TestMock { + fn new() -> Arc { + Arc::new(Self { + submissions: std::sync::Mutex::new(Vec::new()), + }) + } + fn submissions(&self) -> Vec<(u64, usize)> { + self.submissions.lock().expect("lock").clone() + } +} + +#[async_trait] +impl BatchPoster for TestMock { + async fn submit_batch(&self, payload: Vec) -> Result { + let batch_index = ssz::Decode::from_ssz_bytes(payload.as_slice()) + .map(|b: Batch| b.nonce) + .unwrap_or(0); + self.submissions + .lock() + .expect("lock") + .push((batch_index, payload.len())); + Ok(TxHash::ZERO) + } + + async fn observed_submitted_batch_nonces( + &self, + _from_block: u64, + ) -> Result, BatchPosterError> { + Ok(self + .submissions + .lock() + .expect("lock") + .iter() + .map(|(nonce, _)| *nonce) + .collect()) + } +} + +const SQLITE_SYNCHRONOUS_PRAGMA: &str = "NORMAL"; + +fn temp_db(name: &str) -> (TempDir, String) { + let dir = tempfile::Builder::new() + .prefix(format!("sequencer-batch-submitter-it-{name}-").as_str()) + .tempdir() + .expect("create temporary test directory"); + let path = dir.path().join("sequencer.sqlite"); + (dir, path.to_string_lossy().into_owned()) +} + +/// Seeds storage so batches 1 and 2 are closed and batch 3 is open. +fn seed_two_closed_batches(db_path: &str) { + let mut storage = Storage::open(db_path, SQLITE_SYNCHRONOUS_PRAGMA).expect("open storage"); + let mut head = storage + .initialize_open_state(0, SafeInputRange::empty_at(0)) + .expect("initialize open state"); + let next_safe = head.safe_block; + storage + .close_frame_and_batch(&mut head, next_safe) + .expect("close batch 0"); + storage + .close_frame_and_batch(&mut head, next_safe) + .expect("close batch 1"); + storage + .close_frame_and_batch(&mut head, next_safe) + .expect("close batch 2"); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn submitter_loop_submits_closed_batches_then_exits_on_shutdown() { + let (_dir, path) = temp_db("loop-submits"); + seed_two_closed_batches(&path); + + let mock = TestMock::new(); + let shutdown = ShutdownSignal::default(); + let config = BatchSubmitterConfig { + idle_poll_interval_ms: 5000, + }; + let submitter = BatchSubmitter::new( + path, + BATCH_SUBMITTER_ADDRESS, + mock.clone(), + shutdown.clone(), + config, + ); + let handle = submitter.start().expect("start batch submitter"); + + // Allow at least one tick to run (worker may submit batch 1 and 2 in one tick). + tokio::time::sleep(Duration::from_millis(200)).await; + + shutdown.request_shutdown(); + let _ = tokio::time::timeout(Duration::from_secs(2), handle).await; + + let submissions = mock.submissions(); + assert!( + submissions.len() >= 3, + "submitter should have submitted at least batch 0, 1, and 2, got {:?}", + submissions + ); + assert_eq!(submissions[0].0, 0, "first submission should be batch 0"); + assert_eq!(submissions[1].0, 1, "second submission should be batch 1"); + assert_eq!(submissions[2].0, 2, "third submission should be batch 2"); +} diff --git a/sequencer/tests/e2e_sequencer.rs b/sequencer/tests/e2e_sequencer.rs index bb79e0f..dd82fed 100644 --- a/sequencer/tests/e2e_sequencer.rs +++ b/sequencer/tests/e2e_sequencer.rs @@ -13,10 +13,12 @@ use futures_util::StreamExt; use k256::ecdsa::SigningKey; use k256::ecdsa::signature::hazmat::PrehashSigner; use sequencer::api::{self, ApiConfig}; -use sequencer::inclusion_lane::{InclusionLane, InclusionLaneConfig, PendingUserOp}; +use sequencer::inclusion_lane::{ + InclusionLane, InclusionLaneConfig, InclusionLaneError, PendingUserOp, +}; use sequencer::l2_tx_feed::{L2TxFeed, L2TxFeedConfig}; use sequencer::shutdown::ShutdownSignal; -use sequencer::storage::{DirectInputRange, Storage, StoredDirectInput}; +use sequencer::storage::{SafeInputRange, Storage, StoredSafeInput}; use sequencer_core::api::{TxRequest, TxResponse, WsTxMessage}; use sequencer_core::l2_tx::SequencedL2Tx; use sequencer_core::user_op::UserOp; @@ -425,8 +427,9 @@ async fn start_full_server_with_max_body( WalletApp::new(WalletConfig), storage, InclusionLaneConfig { + batch_submitter_address: Address::from([0xff; 20]), max_user_ops_per_chunk: 32, - safe_direct_buffer_capacity: 32, + safe_input_buffer_capacity: 32, max_batch_open: Duration::from_secs(60 * 60), max_batch_user_op_bytes: 1_048_576, idle_poll_interval: Duration::from_millis(2), @@ -439,6 +442,7 @@ async fn start_full_server_with_max_body( L2TxFeedConfig { idle_poll_interval: Duration::from_millis(2), page_size: 64, + batch_submitter_address: None, }, ); @@ -489,6 +493,7 @@ async fn start_api_only_server( L2TxFeedConfig { idle_poll_interval: Duration::from_millis(2), page_size: 64, + batch_submitter_address: None, }, ); let server_task = api::start_on_listener( @@ -520,9 +525,11 @@ async fn shutdown_runtime(mut runtime: FullServerRuntime) { .await .expect("wait for inclusion lane") .expect("join inclusion lane task"); + let ok = + lane_result.is_ok() || matches!(lane_result, Err(InclusionLaneError::ChannelClosed)); assert!( - lane_result.is_ok(), - "expected clean shutdown, got {lane_result:?}" + ok, + "expected clean shutdown (Ok or ChannelClosed), got {lane_result:?}" ); } if let Some(task) = runtime.server_task.take() { @@ -541,7 +548,7 @@ fn bootstrap_open_frame_fee_zero(db_path: &str) { let mut storage = Storage::open(db_path, "NORMAL").expect("open storage"); storage.set_recommended_fee(0).expect("set recommended fee"); let head = storage - .initialize_open_state(0, DirectInputRange::empty_at(0)) + .initialize_open_state(0, SafeInputRange::empty_at(0)) .expect("initialize open state"); assert_eq!(head.frame_fee, 0); } @@ -568,9 +575,10 @@ fn make_valid_request(domain: &Eip712Domain) -> TxRequest { fn seed_safe_direct_input(db_path: &str, safe_block: u64, payload: Vec) { let mut storage = Storage::open(db_path, "NORMAL").expect("open storage"); storage - .append_safe_direct_inputs( + .append_safe_inputs( safe_block, - &[StoredDirectInput { + &[StoredSafeInput { + sender: Address::ZERO, payload, block_number: safe_block, }], @@ -611,8 +619,21 @@ fn assert_ws_message_matches_tx( assert_eq!(fee, expected.fee); assert_eq!(decode_hex_prefixed(data.as_str()), expected.data.as_slice()); } - (WsTxMessage::DirectInput { offset, payload }, SequencedL2Tx::Direct(expected)) => { + ( + WsTxMessage::DirectInput { + offset, + sender, + block_number, + payload, + }, + SequencedL2Tx::Direct(expected), + ) => { assert_eq!(offset, expected_offset); + assert_eq!( + decode_hex_prefixed(sender.as_str()), + expected.sender.as_slice() + ); + assert_eq!(block_number, expected.block_number); assert_eq!( decode_hex_prefixed(payload.as_str()), expected.payload.as_slice() diff --git a/sequencer/tests/ws_broadcaster.rs b/sequencer/tests/ws_broadcaster.rs index e36a0b7..5b25f4f 100644 --- a/sequencer/tests/ws_broadcaster.rs +++ b/sequencer/tests/ws_broadcaster.rs @@ -12,7 +12,7 @@ use sequencer::api::{self, ApiConfig, WS_CATCHUP_WINDOW_EXCEEDED_REASON}; use sequencer::inclusion_lane::{PendingUserOp, SequencerError}; use sequencer::l2_tx_feed::{L2TxFeed, L2TxFeedConfig}; use sequencer::shutdown::ShutdownSignal; -use sequencer::storage::{DirectInputRange, Storage, StoredDirectInput}; +use sequencer::storage::{SafeInputRange, Storage, StoredSafeInput}; use sequencer_core::api::WsTxMessage; use sequencer_core::l2_tx::SequencedL2Tx; use sequencer_core::user_op::{SignedUserOp, UserOp}; @@ -306,7 +306,7 @@ async fn ws_subscribe_closes_on_oversized_inbound_message() { fn seed_ordered_txs(db_path: &str) { let mut storage = Storage::open(db_path, "NORMAL").expect("open storage"); let mut head = storage - .initialize_open_state(0, DirectInputRange::empty_at(0)) + .initialize_open_state(0, SafeInputRange::empty_at(0)) .expect("initialize open state"); let (respond_to, _recv) = oneshot::channel::>(); @@ -328,16 +328,17 @@ fn seed_ordered_txs(db_path: &str) { .append_user_ops_chunk(&mut head, &[pending]) .expect("append user-op chunk"); storage - .append_safe_direct_inputs( + .append_safe_inputs( 10, - &[StoredDirectInput { + &[StoredSafeInput { + sender: Address::ZERO, payload: vec![0xaa], block_number: 10, }], ) .expect("append direct input"); storage - .close_frame_only(&mut head, 10, DirectInputRange::new(0, 1)) + .close_frame_only(&mut head, 10, SafeInputRange::new(0, 1)) .expect("close frame with one drained direct input"); } @@ -355,9 +356,10 @@ fn append_drained_direct_input(db_path: &str, payload: Vec) { .safe_input_end_exclusive() .expect("read next direct input index"); storage - .append_safe_direct_inputs( + .append_safe_inputs( safe_block, - &[StoredDirectInput { + &[StoredSafeInput { + sender: Address::ZERO, payload, block_number: safe_block, }], @@ -367,7 +369,7 @@ fn append_drained_direct_input(db_path: &str, payload: Vec) { .close_frame_only( &mut head, safe_block, - DirectInputRange::new(next_direct_index, next_direct_index.saturating_add(1)), + SafeInputRange::new(next_direct_index, next_direct_index.saturating_add(1)), ) .expect("close frame with one drained direct input"); } @@ -416,6 +418,7 @@ async fn start_test_server_with_limits( L2TxFeedConfig { idle_poll_interval: Duration::from_millis(2), page_size: 64, + batch_submitter_address: None, }, ); let task = api::start_on_listener( @@ -538,8 +541,21 @@ fn assert_ws_message_matches_tx( assert_eq!(fee, expected.fee); assert_eq!(decode_hex_prefixed(data.as_str()), expected.data.as_slice()); } - (WsTxMessage::DirectInput { offset, payload }, SequencedL2Tx::Direct(expected)) => { + ( + WsTxMessage::DirectInput { + offset, + sender, + block_number, + payload, + }, + SequencedL2Tx::Direct(expected), + ) => { assert_eq!(offset, expected_offset); + assert_eq!( + decode_hex_prefixed(sender.as_str()), + expected.sender.as_slice() + ); + assert_eq!(block_number, expected.block_number); assert_eq!( decode_hex_prefixed(payload.as_str()), expected.payload.as_slice()