From dce8d840f1b6e17b6f8eef9f20888dd287e82cec Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Fri, 6 Mar 2026 13:21:49 +0100 Subject: [PATCH 1/4] review flow and align parts --- bin/testapp/src/eth_eoa.rs | 50 +++++-- bin/testapp/src/sim_testing.rs | 48 ++++--- bin/testapp/tests/authentication_tests.rs | 60 +++++++++ crates/app/node/src/lib.rs | 89 ++++++++++++- crates/app/server/src/dev.rs | 44 +++++-- crates/app/tx/eth/src/mempool.rs | 5 +- crates/rpc/chain-index/src/index.rs | 15 ++- crates/rpc/chain-index/src/integration.rs | 152 +++++++++++++++++----- crates/rpc/chain-index/src/provider.rs | 1 + crates/rpc/chain-index/src/types.rs | 4 +- crates/rpc/types/src/log.rs | 12 +- 11 files changed, 389 insertions(+), 91 deletions(-) diff --git a/bin/testapp/src/eth_eoa.rs b/bin/testapp/src/eth_eoa.rs index d1263d3..c60ab96 100644 --- a/bin/testapp/src/eth_eoa.rs +++ b/bin/testapp/src/eth_eoa.rs @@ -18,11 +18,12 @@ use evolve_core::account_impl; #[account_impl(EthEoaAccount)] pub mod eth_eoa_account { + use core::cmp::Ordering; use evolve_authentication::auth_interface::AuthenticationInterface; use evolve_collections::item::Item; use evolve_core::{AccountId, Environment, Message, SdkResult}; use evolve_macros::{exec, init, query}; - use evolve_tx_eth::TxContext; + use evolve_tx_eth::{TxContext, ERR_NONCE_TOO_HIGH, ERR_NONCE_TOO_LOW}; /// An Ethereum-compatible externally owned account. /// @@ -53,6 +54,33 @@ pub mod eth_eoa_account { } } + fn increment_nonce(&self, env: &mut dyn Environment) -> SdkResult<()> { + self.nonce + .update(|v| Ok(v.unwrap_or_default().saturating_add(1)), env)?; + Ok(()) + } + + fn authenticate_canonical_tx( + &self, + tx: &TxContext, + expected_address: [u8; 20], + env: &mut dyn Environment, + ) -> SdkResult<()> { + let sender_address: [u8; 20] = tx.sender_address().into(); + if sender_address != expected_address { + return Err(evolve_core::ErrorCode::new(0x51)); // Sender mismatch + } + + let current_nonce = self.nonce.may_get(env)?.unwrap_or(0); + match tx.nonce().cmp(¤t_nonce) { + Ordering::Less => return Err(ERR_NONCE_TOO_LOW), + Ordering::Greater => return Err(ERR_NONCE_TOO_HIGH), + Ordering::Equal => {} + } + + self.increment_nonce(env) + } + /// Initialize the account with an Ethereum address. #[init] pub fn initialize( @@ -86,7 +114,8 @@ pub mod eth_eoa_account { /// /// For TxContext (Ethereum transactions): /// 1. Verifies the recovered sender address matches this account's address - /// 2. Increments nonce + /// 2. Verifies the transaction nonce matches the stored account nonce + /// 3. Increments nonce on success /// /// For other transaction types: /// - Just increments nonce (test mode, no signature verification) @@ -94,28 +123,23 @@ pub mod eth_eoa_account { fn authenticate(&self, tx: Message, env: &mut dyn Environment) -> SdkResult<()> { let expected_address = self.eth_address.may_get(env)?.unwrap_or([0u8; 20]); + if let Ok(mempool_tx) = tx.get::() { + return self.authenticate_canonical_tx(&mempool_tx, expected_address, env); + } + if let Ok(sender_address) = tx.get::<[u8; 20]>() { if sender_address != expected_address { return Err(evolve_core::ErrorCode::new(0x51)); // Sender mismatch } - // Fast path: validator passes sender AccountId directly. } else if let Ok(sender_id) = tx.get::() { + // Fast path: validator passes sender AccountId directly. if sender_id != env.whoami() { return Err(evolve_core::ErrorCode::new(0x51)); // Sender mismatch } - // Backward-compatible fallback for older validator payloads. - } else if let Ok(mempool_tx) = tx.get::() { - let sender_bytes: [u8; 20] = mempool_tx.sender_address().into(); - if sender_bytes != expected_address { - return Err(evolve_core::ErrorCode::new(0x51)); // Sender mismatch - } } // For other tx types, skip verification (test mode) - // Increment nonce - self.nonce.update(|v| Ok(v.unwrap_or_default() + 1), env)?; - - Ok(()) + self.increment_nonce(env) } } } diff --git a/bin/testapp/src/sim_testing.rs b/bin/testapp/src/sim_testing.rs index f33d4fe..1d182aa 100644 --- a/bin/testapp/src/sim_testing.rs +++ b/bin/testapp/src/sim_testing.rs @@ -43,6 +43,7 @@ pub struct SimTestApp { const SIM_CHAIN_ID: u64 = 1337; const MAX_SIGNING_KEY_ATTEMPTS: usize = 16; +const DEFAULT_TEST_BLOCK_GAS_LIMIT: u64 = 30_000_000; fn keccak256(data: &[u8]) -> [u8; 32] { let mut keccak = Keccak::v256(); @@ -460,10 +461,10 @@ impl SimTestApp { Ok(alloy_primitives::B256::from(tx_id)) } - fn take_mempool_batch(&mut self, max_txs: usize) -> (Vec<[u8; 32]>, Vec) { + fn propose_mempool_batch(&mut self, max_txs: usize) -> (Vec<[u8; 32]>, Vec) { let selected = { let mut pool = self.mempool.blocking_write(); - pool.select(max_txs) + pool.propose(DEFAULT_TEST_BLOCK_GAS_LIMIT, max_txs).0 }; let mut tx_hashes = Vec::with_capacity(selected.len()); @@ -475,12 +476,9 @@ impl SimTestApp { (tx_hashes, transactions) } - fn remove_many_from_mempool(&mut self, tx_hashes: &[[u8; 32]]) { - if tx_hashes.is_empty() { - return; - } + fn finalize_mempool_batch(&mut self, executed_tx_hashes: &[[u8; 32]]) { let mut pool = self.mempool.blocking_write(); - pool.remove_many(tx_hashes); + pool.finalize(executed_tx_hashes); } fn produce_block_internal( @@ -498,10 +496,15 @@ impl SimTestApp { } pub fn produce_block_from_mempool(&mut self, max_txs: usize) -> BlockResult { - let (tx_hashes, transactions) = self.take_mempool_batch(max_txs); + let (tx_hashes, transactions) = self.propose_mempool_batch(max_txs); let height = self.sim.time().block_height(); let result = self.produce_block_internal(height, transactions, true); - self.remove_many_from_mempool(&tx_hashes); + let executed: Vec<_> = tx_hashes + .iter() + .copied() + .take(result.tx_results.len()) + .collect(); + self.finalize_mempool_batch(&executed); result } @@ -627,9 +630,14 @@ impl SimTestApp { for raw_tx in &txs { app.submit_raw_tx(raw_tx).expect("submit tx"); } - let (tx_hashes, transactions) = app.take_mempool_batch(max_txs); + let (tx_hashes, transactions) = app.propose_mempool_batch(max_txs); let result = app.produce_block_internal(height, transactions, false); - app.remove_many_from_mempool(&tx_hashes); + let executed: Vec<_> = tx_hashes + .iter() + .copied() + .take(result.tx_results.len()) + .collect(); + app.finalize_mempool_batch(&executed); result }, |app| app.sim.advance_block(), @@ -651,9 +659,14 @@ impl SimTestApp { for raw_tx in &txs { app.submit_raw_tx(raw_tx).expect("submit tx"); } - let (tx_hashes, transactions) = app.take_mempool_batch(max_txs); + let (tx_hashes, transactions) = app.propose_mempool_batch(max_txs); let result = app.produce_block_internal(height, transactions, false); - app.remove_many_from_mempool(&tx_hashes); + let executed: Vec<_> = tx_hashes + .iter() + .copied() + .take(result.tx_results.len()) + .collect(); + app.finalize_mempool_batch(&executed); result }, |app| app.sim.advance_block(), @@ -684,11 +697,16 @@ impl SimTestApp { app.submit_raw_tx(raw_tx).expect("submit tx"); } - let (tx_hashes, transactions) = app.take_mempool_batch(max_txs); + let (tx_hashes, transactions) = app.propose_mempool_batch(max_txs); let block = Block::for_testing(height, transactions); let result = app.apply_block_with_trace(&block, &mut builder); - app.remove_many_from_mempool(&tx_hashes); + let executed: Vec<_> = tx_hashes + .iter() + .copied() + .take(result.tx_results.len()) + .collect(); + app.finalize_mempool_batch(&executed); result }, |app| app.sim.advance_block(), diff --git a/bin/testapp/tests/authentication_tests.rs b/bin/testapp/tests/authentication_tests.rs index 8f66659..4e60c1d 100644 --- a/bin/testapp/tests/authentication_tests.rs +++ b/bin/testapp/tests/authentication_tests.rs @@ -2,6 +2,7 @@ use evolve_authentication::auth_interface::AuthenticationInterfaceRef; use evolve_authentication::ERR_NOT_EOA; use evolve_core::Message; use evolve_testapp::sim_testing::SimTestApp; +use evolve_tx_eth::{ERR_NONCE_TOO_HIGH, ERR_NONCE_TOO_LOW}; #[test] fn test_successful_transaction() { @@ -48,3 +49,62 @@ fn test_forged_sender_account_id_payload_rejected() { Ok(_) => panic!("expected forged sender payload to be rejected"), } } + +#[test] +fn test_replay_transaction_rejected_by_nonce() { + let mut app = SimTestApp::default(); + let accounts = app.accounts(); + + let raw_tx = app + .build_token_transfer_tx(accounts.alice, accounts.atom, accounts.bob, 200, 100_000) + .expect("build tx"); + + app.submit_raw_tx(&raw_tx).expect("submit tx"); + let initial_result = app.produce_block_from_mempool(1); + let initial_tx = initial_result.tx_results.first().expect("tx result"); + assert!(initial_tx.response.is_ok(), "{:?}", initial_tx.response); + + app.submit_raw_tx(&raw_tx).expect("resubmit replay tx"); + let replay_result = app.produce_block_from_mempool(1); + let replay_tx = replay_result.tx_results.first().expect("tx result"); + match &replay_tx.response { + Err(err) => assert_eq!(*err, ERR_NONCE_TOO_LOW), + Ok(resp) => panic!("expected nonce too low, got {:?}", resp), + } + + let next_raw_tx = app + .build_token_transfer_tx(accounts.alice, accounts.atom, accounts.bob, 25, 100_000) + .expect("build tx with next nonce"); + app.submit_raw_tx(&next_raw_tx).expect("submit tx"); + let next_result = app.produce_block_from_mempool(1); + let next_tx = next_result.tx_results.first().expect("tx result"); + assert!(next_tx.response.is_ok(), "{:?}", next_tx.response); +} + +#[test] +fn test_nonce_gap_rejected_by_canonical_authentication() { + let mut app = SimTestApp::default(); + let accounts = app.accounts(); + + let first_raw_tx = app + .build_token_transfer_tx(accounts.alice, accounts.atom, accounts.bob, 10, 100_000) + .expect("build first tx"); + let skipped_raw_tx = app + .build_token_transfer_tx(accounts.alice, accounts.atom, accounts.bob, 20, 100_000) + .expect("build second tx"); + + app.submit_raw_tx(&skipped_raw_tx) + .expect("submit higher nonce tx"); + let gap_result = app.produce_block_from_mempool(1); + let gap_tx = gap_result.tx_results.first().expect("tx result"); + match &gap_tx.response { + Err(err) => assert_eq!(*err, ERR_NONCE_TOO_HIGH), + Ok(resp) => panic!("expected nonce too high, got {:?}", resp), + } + + app.submit_raw_tx(&first_raw_tx) + .expect("submit now-valid lower nonce tx"); + let recovery_result = app.produce_block_from_mempool(1); + let recovery_tx = recovery_result.tx_results.first().expect("tx result"); + assert!(recovery_tx.response.is_ok(), "{:?}", recovery_tx.response); +} diff --git a/crates/app/node/src/lib.rs b/crates/app/node/src/lib.rs index bd7d211..35ec906 100644 --- a/crates/app/node/src/lib.rs +++ b/crates/app/node/src/lib.rs @@ -1252,7 +1252,7 @@ mod tests { use evolve_mempool::MempoolTx; use evolve_rpc_types::block::BlockTransactions; use evolve_rpc_types::SyncStatus; - use evolve_server::{Block, DevConsensus, StfExecutor}; + use evolve_server::{Block, DevConsensus, NoopChainIndex, StfExecutor}; use evolve_stf::execution_state::ExecutionState; use evolve_stf::results::{BlockResult, TxResult}; use evolve_stf_traits::{AccountsCodeStorage, Block as _}; @@ -1319,12 +1319,16 @@ mod tests { } fn make_signed_legacy_tx(chain_id: u64) -> Vec { + make_signed_legacy_tx_with_gas(chain_id, 21_000) + } + + fn make_signed_legacy_tx_with_gas(chain_id: u64, gas_limit: u64) -> Vec { let signing_key = SigningKey::random(&mut OsRng); let tx = TxLegacy { chain_id: Some(chain_id), nonce: 0, gas_price: 1, - gas_limit: 21_000, + gas_limit, to: alloy_primitives::TxKind::Call(Address::repeat_byte(0x11)), value: U256::from(1u64), input: Bytes::new(), @@ -1408,6 +1412,11 @@ mod tests { .expect("transaction should be indexed"); assert_eq!(tx.hash, tx_hash); assert_eq!(tx.block_number, Some(U64::from(1u64))); + assert_eq!(tx.nonce, U64::ZERO); + assert_eq!(tx.gas_price, Some(U256::from(1u64))); + assert_eq!(tx.tx_type, U64::ZERO); + assert_ne!(tx.r, U256::ZERO); + assert_ne!(tx.s, U256::ZERO); let receipt = provider .get_transaction_receipt(tx_hash) @@ -1417,6 +1426,8 @@ mod tests { assert_eq!(receipt.transaction_hash, tx_hash); assert_eq!(receipt.block_number, U64::from(1u64)); assert_eq!(receipt.status, U64::from(1u64)); + assert_eq!(receipt.effective_gas_price, U256::from(1u64)); + assert_eq!(receipt.tx_type, U64::ZERO); let block = provider .get_block_by_number(1, false) @@ -1429,6 +1440,75 @@ mod tests { } } + #[tokio::test] + async fn produce_block_from_mempool_keeps_over_budget_transactions_queued() { + let chain_index = Arc::new(PersistentChainIndex::in_memory().expect("in-memory index")); + let mempool = new_shared_mempool::(); + let provider = ChainStateProvider::with_mempool( + Arc::clone(&chain_index), + test_provider_config(), + Arc::new(NoopAccountCodes), + mempool.clone(), + ); + + let low_gas_dev: DevConsensus<_, _, _, _, NoopChainIndex> = DevConsensus::with_mempool( + MockStf, + MockStorage::new(), + MockCodes, + DevConfig { + gas_limit: 21_000, + ..DevConfig::manual() + }, + mempool.clone(), + ); + let high_gas_dev: DevConsensus<_, _, _, _, NoopChainIndex> = DevConsensus::with_mempool( + MockStf, + MockStorage::new(), + MockCodes, + DevConfig { + gas_limit: 30_000, + ..DevConfig::manual() + }, + mempool.clone(), + ); + + let executed_hash = provider + .send_raw_transaction(&make_signed_legacy_tx_with_gas(1, 21_000)) + .await + .expect("21k tx should be accepted into mempool"); + let deferred_hash = provider + .send_raw_transaction(&make_signed_legacy_tx_with_gas(1, 30_000)) + .await + .expect("30k tx should be accepted into mempool"); + + assert_eq!(mempool.read().await.len(), 2); + + let first_block = low_gas_dev + .produce_block_from_mempool(100) + .await + .expect("low-gas block production should succeed"); + assert_eq!(first_block.tx_count, 1); + assert_eq!(first_block.gas_used, 21_000); + + let pool = mempool.read().await; + assert_eq!(pool.len(), 1); + assert!(!pool.contains(&executed_hash.0)); + assert!(pool.contains(&deferred_hash.0)); + drop(pool); + + let second_block = high_gas_dev + .produce_block_from_mempool(100) + .await + .expect("higher-gas block production should execute the deferred tx"); + assert_eq!(second_block.tx_count, 1); + assert_eq!(second_block.gas_used, 30_000); + + let pool = mempool.read().await; + assert_eq!(pool.len(), 0); + assert!(!pool.contains(&executed_hash.0)); + assert!(!pool.contains(&deferred_hash.0)); + } + #[tokio::test] async fn provider_rejects_wrong_chain_id_without_mempool_insert() { let chain_index = Arc::new(PersistentChainIndex::in_memory().expect("in-memory index")); @@ -1509,6 +1589,11 @@ mod tests { assert_eq!(txs.len(), 1); assert_eq!(txs[0].hash, tx_hash); assert_eq!(txs[0].block_number, Some(U64::from(1u64))); + assert_eq!(txs[0].gas_price, Some(U256::from(1u64))); + assert_eq!(txs[0].nonce, U64::ZERO); + assert_eq!(txs[0].tx_type, U64::ZERO); + assert_ne!(txs[0].r, U256::ZERO); + assert_ne!(txs[0].s, U256::ZERO); } _ => panic!("expected full transactions in block"), } diff --git a/crates/app/server/src/dev.rs b/crates/app/server/src/dev.rs index 3490a3f..5f79c8a 100644 --- a/crates/app/server/src/dev.rs +++ b/crates/app/server/src/dev.rs @@ -384,7 +384,7 @@ where let height = self.state.height.fetch_add(1, Ordering::SeqCst); let parent_hash = *self.state.last_hash.read().await; let timestamp = current_timestamp(); - let tx_count = transactions.len(); + let proposed_tx_count = transactions.len(); // Build the block let block = BlockBuilder::::new() @@ -407,12 +407,13 @@ where // Calculate gas used and success/failure counts let gas_used: u64 = result.tx_results.iter().map(|r| r.gas_used).sum(); + let executed_tx_count = result.tx_results.len(); let successful_txs = result .tx_results .iter() .filter(|r| r.response.is_ok()) .count(); - let failed_txs = tx_count - successful_txs; + let failed_txs = executed_tx_count.saturating_sub(successful_txs); // Convert StateChange to Operation and commit via async storage let operations = state_changes_to_operations(changes); @@ -506,16 +507,18 @@ where } tracing::info!( - "Produced block {} with {} txs, {} gas used", + "Produced block {} with {} executed txs ({} proposed, {} skipped), {} gas used", height, - tx_count, + executed_tx_count, + proposed_tx_count, + result.txs_skipped, gas_used ); Ok(ProducedBlock { height, hash: block_hash, - tx_count, + tx_count: executed_tx_count, gas_used, successful_txs, failed_txs, @@ -594,7 +597,7 @@ where /// Produce a block with transactions from the mempool. /// /// Selects up to `max_txs` transactions from the mempool, - /// produces a block, and removes the included transactions from the mempool. + /// produces a block, and finalizes only the transactions that were executed. /// /// Returns an error if no mempool is configured. pub async fn produce_block_from_mempool( @@ -606,13 +609,18 @@ where .as_ref() .ok_or_else(|| ServerError::Execution("no mempool configured".to_string()))?; - // Select transactions from mempool + if max_txs == 0 { + return self.produce_block_with_txs(vec![]).await; + } + + // Build a gas-aware proposal so over-budget transactions remain queued. let selected = { let mut pool = mempool.write().await; - pool.select(max_txs) + pool.propose(self.config.gas_limit, max_txs).0 }; - // Get transaction hashes before converting (for removal after block production) + // Record the proposal before cloning the owned transactions so we can finalize + // executed transactions and requeue anything that was not committed. let tx_hashes: Vec<_> = selected.iter().map(|tx| tx.tx_id()).collect(); // Convert Arc to Tx @@ -621,13 +629,23 @@ where .map(|arc_tx| (*arc_tx).clone()) .collect(); - // Produce the block - let result = self.produce_block_with_txs(transactions).await?; + // Produce the block. On failure, requeue the entire proposal. + let result = match self.produce_block_with_txs(transactions).await { + Ok(result) => result, + Err(err) => { + if !tx_hashes.is_empty() { + let mut pool = mempool.write().await; + pool.finalize(&[]); + } + return Err(err); + } + }; - // Remove included transactions from mempool + // Confirm only the transactions STF actually executed and requeue the rest. if !tx_hashes.is_empty() { + let executed: Vec<_> = tx_hashes.iter().copied().take(result.tx_count).collect(); let mut pool = mempool.write().await; - pool.remove_many(&tx_hashes); + pool.finalize(&executed); } Ok(result) diff --git a/crates/app/tx/eth/src/mempool.rs b/crates/app/tx/eth/src/mempool.rs index d602fbc..77290b6 100644 --- a/crates/app/tx/eth/src/mempool.rs +++ b/crates/app/tx/eth/src/mempool.rs @@ -166,8 +166,9 @@ impl Transaction for TxContext { impl AuthenticationPayload for TxContext { fn authentication_payload(&self) -> SdkResult { - let sender: [u8; 20] = self.sender_address().into(); - Message::new(&sender) + // Pass the canonical transaction context through authentication so EOAs + // can verify both the recovered sender and the transaction nonce. + Message::new(self) } } diff --git a/crates/rpc/chain-index/src/index.rs b/crates/rpc/chain-index/src/index.rs index 62a8e4e..5bf9a72 100644 --- a/crates/rpc/chain-index/src/index.rs +++ b/crates/rpc/chain-index/src/index.rs @@ -335,6 +335,7 @@ impl PersistentChainIndex { let contract_address_bytes: Option> = row.get(8)?; let status: i64 = row.get(9)?; let tx_type: i64 = row.get(10)?; + let effective_gas_price_bytes: Vec = row.get(11)?; let to = to_bytes .as_deref() @@ -354,6 +355,7 @@ impl PersistentChainIndex { to, cumulative_gas_used: cumulative_gas_used as u64, gas_used: gas_used as u64, + effective_gas_price: alloy_primitives::U256::from_be_slice(&effective_gas_price_bytes), contract_address, logs: vec![], // logs are stored separately status: status as u8, @@ -491,10 +493,14 @@ impl ChainIndex for PersistentChainIndex { let conn = self.read_conn()?; let result = conn.query_row( - "SELECT transaction_hash, transaction_index, block_hash, block_number, - from_addr, to_addr, cumulative_gas_used, gas_used, contract_address, - status, tx_type - FROM receipts WHERE transaction_hash = ?", + "SELECT receipts.transaction_hash, receipts.transaction_index, receipts.block_hash, + receipts.block_number, receipts.from_addr, receipts.to_addr, + receipts.cumulative_gas_used, receipts.gas_used, + receipts.contract_address, receipts.status, receipts.tx_type, + transactions.gas_price + FROM receipts + INNER JOIN transactions ON transactions.hash = receipts.transaction_hash + WHERE receipts.transaction_hash = ?", params![hash.as_slice()], Self::row_to_stored_receipt, ); @@ -837,6 +843,7 @@ mod tests { to: Some(Address::repeat_byte(0x02)), cumulative_gas_used: 21000 * (index as u64 + 1), gas_used: 21000, + effective_gas_price: U256::ZERO, contract_address: None, logs: vec![], status: if success { 1 } else { 0 }, diff --git a/crates/rpc/chain-index/src/integration.rs b/crates/rpc/chain-index/src/integration.rs index 6ce19ae..3e4a282 100644 --- a/crates/rpc/chain-index/src/integration.rs +++ b/crates/rpc/chain-index/src/integration.rs @@ -3,12 +3,15 @@ //! This module provides utilities to bridge the STF execution output with the //! chain indexer, converting blocks, transactions, and events into storable formats. -use alloy_primitives::{Address, Bytes, B256, U256}; +use std::any::Any; + +use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; use evolve_core::events_api::Event; use evolve_core::AccountId; use evolve_rpc_types::account_id_to_address; use evolve_stf::results::{BlockResult, TxResult}; use evolve_stf_traits::{Block, Transaction}; +use evolve_tx_eth::{TxContext, TxEnvelope}; use sha2::{Digest, Sha256}; use crate::types::{StoredBlock, StoredLog, StoredReceipt, StoredTransaction}; @@ -79,6 +82,67 @@ impl BlockMetadata { } } +#[derive(Debug, Clone)] +struct EthereumIndexedTransaction { + value: U256, + gas_price: U256, + input: Bytes, + nonce: u64, + v: u64, + r: U256, + s: U256, + tx_type: u8, + chain_id: Option, +} + +fn legacy_signature_v(y_parity: bool, chain_id: Option) -> u64 { + match chain_id { + Some(chain_id) => 35u64 + .saturating_add(chain_id.saturating_mul(2)) + .saturating_add(u64::from(y_parity)), + None => 27 + u64::from(y_parity), + } +} + +fn ethereum_tx_fields(tx: &Tx) -> Option { + let tx = (tx as &dyn Any).downcast_ref::()?; + let gas_price = U256::from(tx.effective_gas_price()); + + Some(match tx.envelope() { + TxEnvelope::Legacy(legacy) => { + let signature = legacy.signature(); + let chain_id = legacy.tx().chain_id; + + EthereumIndexedTransaction { + value: legacy.tx().value, + gas_price, + input: Bytes::copy_from_slice(legacy.tx().input.as_ref()), + nonce: legacy.tx().nonce, + v: legacy_signature_v(signature.v(), chain_id), + r: signature.r(), + s: signature.s(), + tx_type: 0, + chain_id, + } + } + TxEnvelope::Eip1559(eip1559) => { + let signature = eip1559.signature(); + + EthereumIndexedTransaction { + value: eip1559.tx().value, + gas_price, + input: Bytes::copy_from_slice(eip1559.tx().input.as_ref()), + nonce: eip1559.tx().nonce, + v: u64::from(signature.v()), + r: signature.r(), + s: signature.s(), + tx_type: 2, + chain_id: Some(eip1559.tx().chain_id), + } + } + }) +} + /// Convert an STF Event to a StoredLog. /// /// Events in the Evolve system have: @@ -90,12 +154,7 @@ pub fn event_to_stored_log(event: &Event) -> StoredLog { let address = account_id_to_address(event.source); // Create topic[0] from event name hash - let name_hash = { - let mut hasher = Sha256::new(); - hasher.update(event.name.as_bytes()); - let result = hasher.finalize(); - B256::from_slice(&result) - }; + let name_hash = keccak256(event.name.as_bytes()); // Event contents become the data let data = Bytes::from(event.contents.as_vec().unwrap_or_default()); @@ -118,7 +177,7 @@ pub fn build_index_data( ) -> (StoredBlock, Vec, Vec) where B: Block, - Tx: Transaction, + Tx: Transaction + Any, { let block_number = block.context().height; let block_hash = metadata.hash; @@ -128,8 +187,8 @@ where let total_gas_used: u64 = result.tx_results.iter().map(|r| r.gas_used).sum(); // Build stored transactions and receipts - let mut stored_txs = Vec::with_capacity(txs.len()); - let mut stored_receipts = Vec::with_capacity(txs.len()); + let mut stored_txs = Vec::with_capacity(result.tx_results.len()); + let mut stored_receipts = Vec::with_capacity(result.tx_results.len()); let mut cumulative_gas = 0u64; for (idx, (tx, tx_result)) in txs.iter().zip(result.tx_results.iter()).enumerate() { @@ -175,7 +234,7 @@ where timestamp: metadata.timestamp, gas_used: total_gas_used, gas_limit: metadata.gas_limit, - transaction_count: txs.len() as u32, + transaction_count: stored_txs.len() as u32, miner: metadata.miner, extra_data: metadata.extra_data.clone(), }; @@ -184,7 +243,7 @@ where } /// Build a StoredTransaction from an STF Transaction. -fn build_stored_transaction( +fn build_stored_transaction( tx: &Tx, tx_hash: B256, block_number: u64, @@ -194,15 +253,23 @@ fn build_stored_transaction( ) -> StoredTransaction { let from = resolve_sender_address(tx); let to = resolve_recipient_address(tx); + let eth_fields = ethereum_tx_fields(tx); // Extract value from funds (sum of all fungible assets as a simple approach) - let value = tx - .funds() - .iter() - .fold(U256::ZERO, |acc, fa| acc + U256::from(fa.amount)); - - // Serialize the request as input data - let input = Bytes::from(borsh::to_vec(tx.request()).unwrap_or_default()); + let value = eth_fields + .as_ref() + .map(|fields| fields.value) + .unwrap_or_else(|| { + tx.funds() + .iter() + .fold(U256::ZERO, |acc, fa| acc + U256::from(fa.amount)) + }); + + // Preserve the original Ethereum calldata when available. + let input = eth_fields + .as_ref() + .map(|fields| fields.input.clone()) + .unwrap_or_else(|| Bytes::from(borsh::to_vec(tx.request()).unwrap_or_default())); StoredTransaction { hash: tx_hash, @@ -213,19 +280,34 @@ fn build_stored_transaction( to, value, gas: tx.gas_limit(), - gas_price: U256::ZERO, // TODO: Support gas pricing + gas_price: eth_fields + .as_ref() + .map(|fields| fields.gas_price) + .unwrap_or(U256::ZERO), input, - nonce: 0, // TODO: Track nonces if needed - v: 0, - r: U256::ZERO, - s: U256::ZERO, - tx_type: 0, // Legacy type - chain_id: Some(chain_id), + nonce: eth_fields.as_ref().map(|fields| fields.nonce).unwrap_or(0), + v: eth_fields.as_ref().map(|fields| fields.v).unwrap_or(0), + r: eth_fields + .as_ref() + .map(|fields| fields.r) + .unwrap_or(U256::ZERO), + s: eth_fields + .as_ref() + .map(|fields| fields.s) + .unwrap_or(U256::ZERO), + tx_type: eth_fields + .as_ref() + .map(|fields| fields.tx_type) + .unwrap_or(0), + chain_id: match eth_fields.as_ref() { + Some(fields) => fields.chain_id, + None => Some(chain_id), + }, } } /// Build a StoredReceipt from an STF TxResult. -fn build_stored_receipt( +fn build_stored_receipt( tx: &Tx, tx_result: &TxResult, tx_hash: B256, @@ -236,6 +318,7 @@ fn build_stored_receipt( ) -> StoredReceipt { let from = resolve_sender_address(tx); let to = resolve_recipient_address(tx); + let eth_fields = ethereum_tx_fields(tx); // Convert events to logs let logs: Vec = tx_result.events.iter().map(event_to_stored_log).collect(); @@ -252,10 +335,17 @@ fn build_stored_receipt( to, cumulative_gas_used, gas_used: tx_result.gas_used, + effective_gas_price: eth_fields + .as_ref() + .map(|fields| fields.gas_price) + .unwrap_or(U256::ZERO), contract_address: None, // TODO: Detect contract creation logs, status, - tx_type: 0, + tx_type: eth_fields + .as_ref() + .map(|fields| fields.tx_type) + .unwrap_or(0), } } @@ -317,7 +407,7 @@ pub fn index_block( ) -> crate::error::ChainIndexResult<()> where B: Block, - Tx: Transaction, + Tx: Transaction + Any, I: crate::index::ChainIndex, { let (stored_block, stored_txs, stored_receipts) = build_index_data(block, result, metadata); @@ -341,8 +431,8 @@ mod tests { // Address should be derived from AccountId assert_eq!(log.address, account_id_to_address(AccountId::from_u64(42))); - // Should have one topic (the name hash) - assert_eq!(log.topics.len(), 1); + // Topic[0] should match the Ethereum-compatible event-name hash. + assert_eq!(log.topics, vec![keccak256(b"Transfer")]); // Data should match contents assert_eq!(log.data.as_ref(), &[1, 2, 3, 4]); } diff --git a/crates/rpc/chain-index/src/provider.rs b/crates/rpc/chain-index/src/provider.rs index 2bde5c5..a802ca9 100644 --- a/crates/rpc/chain-index/src/provider.rs +++ b/crates/rpc/chain-index/src/provider.rs @@ -849,6 +849,7 @@ mod tests { to: Some(Address::repeat_byte(0x42)), cumulative_gas_used: 21_000, gas_used: 21_000, + effective_gas_price: U256::from(1u64), contract_address: None, logs: vec![], status: 1, diff --git a/crates/rpc/chain-index/src/types.rs b/crates/rpc/chain-index/src/types.rs index 6e39092..e3a3b39 100644 --- a/crates/rpc/chain-index/src/types.rs +++ b/crates/rpc/chain-index/src/types.rs @@ -153,6 +153,8 @@ pub struct StoredReceipt { pub cumulative_gas_used: u64, /// Gas used by this transaction. pub gas_used: u64, + /// Effective gas price paid by this transaction. + pub effective_gas_price: U256, /// Contract address if this was a contract creation. pub contract_address: Option
, /// Logs emitted by this transaction. @@ -190,7 +192,7 @@ impl StoredReceipt { to: self.to, cumulative_gas_used: U64::from(self.cumulative_gas_used), gas_used: U64::from(self.gas_used), - effective_gas_price: U256::ZERO, + effective_gas_price: self.effective_gas_price, contract_address: self.contract_address, logs, logs_bloom: Bytes::new(), diff --git a/crates/rpc/types/src/log.rs b/crates/rpc/types/src/log.rs index 2de7758..2d7ed66 100644 --- a/crates/rpc/types/src/log.rs +++ b/crates/rpc/types/src/log.rs @@ -93,15 +93,10 @@ pub fn event_to_log( tx_index: u64, log_index: u64, ) -> Option { - use sha2::{Digest, Sha256}; - let address = crate::account_id_to_address(event.source); // Hash the event name to create the first topic (similar to Solidity event signature) - let mut hasher = Sha256::new(); - hasher.update(event.name.as_bytes()); - let name_hash = hasher.finalize(); - let topic0 = B256::from_slice(&name_hash); + let topic0 = alloy_primitives::keccak256(event.name.as_bytes()); // Event contents become the data let data_bytes = event.contents.as_bytes().ok()?; @@ -124,7 +119,6 @@ mod tests { use super::*; use evolve_core::{events_api::Event, AccountId, Message}; use serde_json::Value; - use sha2::{Digest, Sha256}; #[test] fn test_log_serialization() { @@ -166,9 +160,7 @@ mod tests { let log = event_to_log(&event, 7, block_hash, tx_hash, 3, 2).expect("conversion must work"); - let mut hasher = Sha256::new(); - hasher.update(b"Transfer"); - let expected_topic0 = B256::from_slice(&hasher.finalize()); + let expected_topic0 = alloy_primitives::keccak256(b"Transfer"); assert_eq!(log.address, crate::account_id_to_address(event.source)); assert_eq!(log.topics, vec![expected_topic0]); From 25651acf6bb8ff62d7e47f217ed90e59f66a37e1 Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Fri, 6 Mar 2026 15:45:29 +0100 Subject: [PATCH 2/4] deduplicate --- bin/evd/Cargo.toml | 22 +- bin/evd/src/main.rs | 840 ++----------------------------- bin/testapp/src/lib.rs | 235 ++++++++- crates/app/node/src/config.rs | 22 + crates/app/node/src/lib.rs | 180 ++++++- crates/rpc/evnode/Cargo.toml | 9 + crates/rpc/evnode/src/lib.rs | 2 + crates/rpc/evnode/src/runner.rs | 543 ++++++++++++++++++++ crates/rpc/evnode/src/service.rs | 120 ++++- 9 files changed, 1122 insertions(+), 851 deletions(-) create mode 100644 crates/rpc/evnode/src/runner.rs diff --git a/bin/evd/Cargo.toml b/bin/evd/Cargo.toml index 00b44fd..8c1c1c4 100644 --- a/bin/evd/Cargo.toml +++ b/bin/evd/Cargo.toml @@ -12,35 +12,15 @@ name = "evd" path = "src/main.rs" [dependencies] -# Workspace dependencies -evolve_core.workspace = true -evolve_stf.workspace = true -evolve_stf_traits.workspace = true -evolve_mempool.workspace = true -evolve_server.workspace = true -evolve_storage.workspace = true evolve_node.workspace = true evolve_evnode = { workspace = true, features = ["testapp"] } +evolve_storage.workspace = true evolve_testapp.workspace = true -evolve_token.workspace = true -evolve_scheduler.workspace = true -evolve_testing.workspace = true -evolve_tx_eth.workspace = true -evolve_chain_index.workspace = true -evolve_eth_jsonrpc.workspace = true -evolve_rpc_types.workspace = true -# External dependencies -alloy-primitives.workspace = true commonware-runtime.workspace = true -# External dependencies -futures.workspace = true clap = { workspace = true } -tokio.workspace = true tracing.workspace = true -tracing-subscriber.workspace = true -borsh.workspace = true [lints] workspace = true diff --git a/bin/evd/src/main.rs b/bin/evd/src/main.rs index a5c6c3e..3fa8dd6 100644 --- a/bin/evd/src/main.rs +++ b/bin/evd/src/main.rs @@ -1,103 +1,19 @@ -//! Evolve Node Daemon (evd) -//! -//! A full node implementation that exposes the Evolve execution layer via gRPC -//! for external consensus integration, with JSON-RPC for queries and mempool -//! for transaction collection. -//! -//! ## Architecture -//! -//! ```text -//! ┌─────────────────┐ gRPC ┌──────────────────────────────────┐ -//! │ External │◄─────────────►│ evd │ -//! │ Consensus │ │ │ -//! │ (ev-node) │ │ ┌────────┐ ┌────────┐ │ -//! └─────────────────┘ │ │ STF │ │Mempool │ │ -//! │ └────────┘ └────────┘ │ -//! ┌──────────┐ JSON-RPC │ ┌────────┐ ┌────────┐ │ -//! │ Client │◄─────────────►│ │ RPC │ │ QMDB │ │ -//! └──────────┘ │ │ Server │ │Storage │ │ -//! │ └────────┘ └────────┘ │ -//! └──────────────────────────────────┘ -//! ``` -//! -//! ## Transaction Formats -//! -//! - **ETH**: Standard Ethereum RLP-encoded transactions (type 0x02) -//! - **Micro**: Minimal fixed-layout transactions (type 0x83) for high throughput -//! -//! ## Usage -//! -//! ```bash -//! # Start with default testapp genesis -//! evd run -//! -//! # Start with a custom genesis file -//! evd run --genesis-file path/to/genesis.json -//! -//! # Custom addresses -//! evd run --grpc-addr 0.0.0.0:50051 --rpc-addr 0.0.0.0:8545 -//! -//! # Initialize genesis only -//! evd init -//! ``` -//! -//! ## Custom Genesis JSON Format -//! -//! When `--genesis-file` is provided, accounts are pre-registered as ETH EOAs -//! with deterministic IDs derived from their addresses. -//! -//! ```json -//! { -//! "token": { -//! "name": "evolve", -//! "symbol": "ev", -//! "decimals": 6, -//! "icon_url": "https://lol.wtf", -//! "description": "The evolve coin" -//! }, -//! "minter_id": 100002, -//! "accounts": [ -//! { "eth_address": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", "balance": 1000000000 } -//! ] -//! } -//! ``` +//! Evolve external-consensus node daemon entrypoint. -use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::Arc; -use std::time::Duration; - -use alloy_primitives::{keccak256, Address, B256, U256}; use clap::{Args, Parser, Subcommand}; -use commonware_runtime::tokio::{Config as TokioConfig, Runner}; -use commonware_runtime::{Runner as RunnerTrait, Spawner}; -use evolve_chain_index::{ - build_index_data, BlockMetadata, ChainIndex, ChainStateProvider, ChainStateProviderConfig, - PersistentChainIndex, StateQuerier, StorageStateQuerier, -}; -use evolve_core::{AccountId, ReadonlyKV}; -use evolve_eth_jsonrpc::{start_server_with_subscriptions, RpcServerConfig, SubscriptionManager}; -use evolve_evnode::{EvnodeServer, EvnodeServerConfig, ExecutorServiceConfig, OnBlockExecuted}; -use evolve_mempool::{new_shared_mempool, Mempool, SharedMempool}; +use commonware_runtime::tokio::Context as TokioContext; +use evolve_evnode::run_external_consensus_node_eth; use evolve_node::{ - init_tracing as init_node_tracing, resolve_node_config, resolve_node_config_init, - GenesisOutput, InitArgs, NodeConfig, RunArgs, -}; -use evolve_rpc_types::SyncStatus; -use evolve_scheduler::scheduler_account::SchedulerRef; -use evolve_server::{ - load_chain_state, save_chain_state, BlockBuilder, ChainState, CHAIN_STATE_KEY, + init_dev_node, init_tracing as init_node_tracing, resolve_node_config, + resolve_node_config_init, InitArgs, NodeConfig, RunArgs, }; -use evolve_stf_traits::{AccountsCodeStorage, StateChange}; -use evolve_storage::{Operation, QmdbStorage, Storage, StorageConfig}; +use evolve_storage::{QmdbStorage, StorageConfig}; use evolve_testapp::genesis_config::{load_genesis_config, EvdGenesisConfig, EvdGenesisResult}; use evolve_testapp::{ - build_mempool_stf, default_gas_config, do_eth_genesis_inner, install_account_codes, - PLACEHOLDER_ACCOUNT, + build_genesis_mempool_stf, build_mempool_stf_from_scheduler, build_testapp_codes, + run_evd_genesis_output, }; -use evolve_testing::server_mocks::AccountStorageMock; -use evolve_token::account::TokenRef; -use evolve_tx_eth::TxContext; -use evolve_tx_eth::{register_runtime_contract_account, resolve_or_create_eoa_account}; + #[derive(Parser)] #[command(name = "evd")] #[command(about = "Evolve node daemon with gRPC execution layer")] @@ -132,6 +48,9 @@ struct EvdInitCustom { genesis_file: Option, } +type NodeStorage = QmdbStorage; +type StorageError = Box; + fn main() { let cli = Cli::parse(); @@ -139,729 +58,56 @@ fn main() { Commands::Run(args) => { let config = resolve_node_config(&args.common, &args.native); init_node_tracing(&config.observability.log_level); - let genesis_config = match load_genesis_config(args.custom.genesis_file.as_deref()) { - Ok(genesis_config) => genesis_config, - Err(err) => { - tracing::error!("{err}"); - std::process::exit(2); - } - }; + let genesis_config = parse_genesis_config(args.custom.genesis_file.as_deref()); run_node(config, genesis_config); } Commands::Init(args) => { let config = resolve_node_config_init(&args.common); init_node_tracing(&config.observability.log_level); - let genesis_config = match load_genesis_config(args.custom.genesis_file.as_deref()) { - Ok(genesis_config) => genesis_config, - Err(err) => { - tracing::error!("{err}"); - std::process::exit(2); - } - }; + let genesis_config = parse_genesis_config(args.custom.genesis_file.as_deref()); init_genesis(&config.storage.path, genesis_config); } } } -type TokioContext = commonware_runtime::tokio::Context; -type NodeStorage = QmdbStorage; -type SharedChainIndex = Arc; -type RpcMempool = SharedMempool>; - -struct RpcRuntimeHandle { - stop_fn: Option>, -} - -impl RpcRuntimeHandle { - fn new(stop_fn: impl FnOnce() + Send + 'static) -> Self { - Self { - stop_fn: Some(Box::new(stop_fn)), - } - } - - fn stop(mut self) { - if let Some(stop_fn) = self.stop_fn.take() { - stop_fn(); - } - } -} - -async fn init_storage_and_genesis( - context: TokioContext, - storage_config: StorageConfig, - genesis_config: Option, -) -> (NodeStorage, EvdGenesisResult, u64) { - let storage = QmdbStorage::new(context, storage_config) - .await - .expect("failed to create storage"); - - let codes = build_codes(); - tracing::info!("Installed account codes: {:?}", codes.list_identifiers()); - - match load_chain_state::(&storage) { - Some(state) => { - tracing::info!("Resuming from existing state at height {}", state.height); - (storage, state.genesis_result, state.height) - } - None => { - tracing::info!("No existing state found, running genesis..."); - let output = run_genesis(&storage, &codes, genesis_config.as_ref()); - commit_genesis(&storage, output.changes, &output.genesis_result) - .await - .expect("genesis commit failed"); - tracing::info!("Genesis complete: {:?}", output.genesis_result); - (storage, output.genesis_result, 1) - } - } -} - -fn init_chain_index(config: &NodeConfig) -> Option { - if !config.rpc.enabled && !config.rpc.enable_block_indexing { - return None; - } - - let chain_index_db_path = - std::path::PathBuf::from(&config.storage.path).join("chain-index.sqlite"); - let index = Arc::new( - PersistentChainIndex::new(&chain_index_db_path) - .expect("failed to open chain index database"), - ); - if let Err(err) = index.initialize() { - tracing::warn!("Failed to initialize chain index: {:?}", err); - } - Some(index) -} - -async fn start_rpc_server( - config: &NodeConfig, - storage: NodeStorage, - mempool: RpcMempool, - chain_index: &Option, - token_account_id: AccountId, -) -> Option { - if !config.rpc.enabled { - return None; - } - - let subscriptions = Arc::new(SubscriptionManager::new()); - let codes_for_rpc = Arc::new(build_codes()); - let state_provider_config = ChainStateProviderConfig { - chain_id: config.chain.chain_id, - protocol_version: "0x1".to_string(), - gas_price: U256::ZERO, - sync_status: SyncStatus::NotSyncing(false), - }; - - let state_querier: Arc = - Arc::new(StorageStateQuerier::new(storage, token_account_id)); - let state_provider = ChainStateProvider::with_mempool( - Arc::clone(chain_index.as_ref().expect("chain index required for RPC")), - state_provider_config, - codes_for_rpc, - mempool, - ) - .with_state_querier(state_querier); - - let rpc_addr = config.parsed_rpc_addr(); - let server_config = RpcServerConfig { - http_addr: rpc_addr, - chain_id: config.chain.chain_id, - }; - - tracing::info!("Starting JSON-RPC server on {}", rpc_addr); - let handle = - start_server_with_subscriptions(server_config, state_provider, Arc::clone(&subscriptions)) - .await - .expect("failed to start RPC server"); - - Some(RpcRuntimeHandle::new(move || { - handle.stop().expect("failed to stop RPC server"); - })) -} - -fn build_on_block_executed( - storage: NodeStorage, - chain_index: Option, - initial_height: u64, - callback_chain_id: u64, - callback_max_gas: u64, - callback_indexing_enabled: bool, -) -> (OnBlockExecuted, Arc) { - let initial_parent_hash = resolve_initial_parent_hash(chain_index.as_ref(), initial_height); - let parent_hash = Arc::new(std::sync::RwLock::new(initial_parent_hash)); - let current_height = Arc::new(AtomicU64::new(initial_height)); - let parent_hash_for_callback = Arc::clone(&parent_hash); - let current_height_for_callback = Arc::clone(¤t_height); - - let on_block_executed: OnBlockExecuted = Arc::new(move |info| { - let operations = state_changes_to_operations(info.state_changes); - let commit_hash = futures::executor::block_on(async { - storage - .batch(operations) - .await - .expect("storage batch failed"); - storage.commit().await.expect("storage commit failed") - }); - let state_root = B256::from_slice(commit_hash.as_bytes()); - - let prev_parent = *parent_hash_for_callback.read().unwrap(); - let block_hash = compute_block_hash(info.height, info.timestamp, prev_parent); - let metadata = BlockMetadata::new( - block_hash, - prev_parent, - state_root, - info.timestamp, - callback_max_gas, - Address::ZERO, - callback_chain_id, - ); - - let block = BlockBuilder::::new() - .number(info.height) - .timestamp(info.timestamp) - .transactions(info.transactions) - .build(); - let (stored_block, stored_txs, stored_receipts) = - build_index_data(&block, &info.block_result, &metadata); - - if callback_indexing_enabled { - if let Some(ref index) = chain_index { - if let Err(err) = index.store_block(stored_block, stored_txs, stored_receipts) { - tracing::warn!("Failed to index block {}: {:?}", info.height, err); - } else { - tracing::debug!( - "Indexed block {} (hash={}, state_root={})", - info.height, - block_hash, - state_root - ); - } - } - } - - *parent_hash_for_callback.write().unwrap() = block_hash; - current_height_for_callback.store(info.height, Ordering::SeqCst); - }); - - (on_block_executed, current_height) -} - -fn resolve_initial_parent_hash( - chain_index: Option<&SharedChainIndex>, - initial_height: u64, -) -> B256 { - let Some(index) = chain_index else { - return B256::ZERO; - }; - - match index.get_block(initial_height) { - Ok(Some(block)) => { - tracing::info!("Seeding parent hash from indexed block {}", initial_height); - return block.hash; - } - Ok(None) => {} - Err(err) => { - tracing::warn!( - "Failed to read indexed block {} while seeding parent hash: {:?}", - initial_height, - err - ); - } - } - - let latest_indexed = match index.latest_block_number() { - Ok(latest) => latest, - Err(err) => { - tracing::warn!( - "Failed to read latest indexed block while seeding parent hash: {:?}", - err - ); - return B256::ZERO; - } - }; - - let Some(latest_height) = latest_indexed else { - return B256::ZERO; - }; - - if latest_height != initial_height { - tracing::warn!( - "Chain state height {} does not match indexed head {}; seeding parent hash from indexed head", - initial_height, - latest_height - ); - } - - match index.get_block(latest_height) { - Ok(Some(block)) => block.hash, - Ok(None) => { - tracing::warn!( - "Indexed head {} missing block payload while seeding parent hash", - latest_height - ); - B256::ZERO - } +fn parse_genesis_config(path: Option<&str>) -> Option { + match load_genesis_config(path) { + Ok(genesis_config) => genesis_config, Err(err) => { - tracing::warn!( - "Failed to read indexed head block {} while seeding parent hash: {:?}", - latest_height, - err - ); - B256::ZERO - } - } -} - -fn log_server_configuration(config: &NodeConfig, initial_height: u64) { - let grpc_addr = config.parsed_grpc_addr(); - tracing::info!("Starting gRPC server on {}", grpc_addr); - tracing::info!("Configuration:"); - tracing::info!(" - Chain ID: {}", config.chain.chain_id); - tracing::info!(" - gRPC compression: {}", config.grpc.enable_gzip); - tracing::info!(" - JSON-RPC: {}", config.rpc.enabled); - tracing::info!(" - Block indexing: {}", config.rpc.enable_block_indexing); - tracing::info!(" - Initial height: {}", initial_height); -} - -async fn run_server_with_shutdown( - serve_future: F, - context_for_shutdown: TokioContext, - shutdown_timeout_secs: u64, -) where - F: std::future::Future>, - E: std::fmt::Display, -{ - tokio::pin!(serve_future); - tokio::select! { - result = &mut serve_future => { - if let Err(err) = result { - tracing::error!("gRPC server error: {}", err); - } - } - _ = tokio::signal::ctrl_c() => { - tracing::info!("Received Ctrl+C, shutting down..."); - context_for_shutdown - .stop(0, Some(Duration::from_secs(shutdown_timeout_secs))) - .await - .expect("shutdown failed"); + tracing::error!("{err}"); + std::process::exit(2); } } } -async fn persist_chain_state( - storage: &NodeStorage, - current_height: &Arc, - genesis_result: EvdGenesisResult, -) { - let chain_state = ChainState { - height: current_height.load(Ordering::SeqCst), - genesis_result, - }; - if let Err(err) = save_chain_state(storage, &chain_state).await { - tracing::error!("Failed to save chain state: {}", err); - } -} - -fn stop_rpc_server(rpc_handle: Option) { - if let Some(handle) = rpc_handle { - tracing::info!("Stopping JSON-RPC server..."); - handle.stop(); - } -} - fn run_node(config: NodeConfig, genesis_config: Option) { - tracing::info!("=== Evolve Node Daemon (evd) ==="); - std::fs::create_dir_all(&config.storage.path).expect("failed to create data directory"); - - let storage_config = StorageConfig { - path: config.storage.path.clone().into(), - ..Default::default() - }; - let runtime_config = TokioConfig::default() - .with_storage_directory(&config.storage.path) - .with_worker_threads(4); - let runner = Runner::new(runtime_config); - - runner.start(move |context| async move { - let context_for_shutdown = context.clone(); - let (storage, genesis_result, initial_height) = - init_storage_and_genesis(context, storage_config, genesis_config).await; - - let stf = build_mempool_stf(default_gas_config(), genesis_result.scheduler); - let mempool: RpcMempool = new_shared_mempool(); - let chain_index = init_chain_index(&config); - let rpc_handle = start_rpc_server( - &config, - storage.clone(), - mempool.clone(), - &chain_index, - genesis_result.token, - ) - .await; - - let executor_config = ExecutorServiceConfig::default(); - let (on_block_executed, current_height) = build_on_block_executed( - storage.clone(), - chain_index, - initial_height, - config.chain.chain_id, - executor_config.max_gas, - config.rpc.enable_block_indexing, - ); - log_server_configuration(&config, initial_height); - - let grpc_config = EvnodeServerConfig { - addr: config.parsed_grpc_addr(), - enable_gzip: config.grpc.enable_gzip, - max_message_size: config.grpc_max_message_size_usize(), - executor_config, - }; - let server = - EvnodeServer::with_mempool(grpc_config, stf, storage.clone(), build_codes(), mempool) - .with_on_block_executed(on_block_executed); - - tracing::info!("Server ready. Press Ctrl+C to stop."); - run_server_with_shutdown( - server.serve(), - context_for_shutdown, - config.operations.shutdown_timeout_secs, - ) - .await; - - persist_chain_state(&storage, ¤t_height, genesis_result).await; - stop_rpc_server(rpc_handle); - tracing::info!("Shutdown complete."); - }); + run_external_consensus_node_eth( + config, + build_genesis_mempool_stf, + |genesis: &EvdGenesisResult| build_mempool_stf_from_scheduler(genesis.scheduler), + build_testapp_codes, + move |stf, codes, storage| { + run_evd_genesis_output(stf, codes, storage, genesis_config.as_ref()) + }, + build_storage, + ); } fn init_genesis(data_dir: &str, genesis_config: Option) { - tracing::info!("=== Evolve Node Daemon - Genesis Init ==="); - - std::fs::create_dir_all(data_dir).expect("failed to create data directory"); - - let storage_config = StorageConfig { - path: data_dir.into(), - ..Default::default() - }; - - let runtime_config = TokioConfig::default() - .with_storage_directory(data_dir) - .with_worker_threads(1); - - let runner = Runner::new(runtime_config); - - runner.start(move |context| async move { - let storage = QmdbStorage::new(context, storage_config) - .await - .expect("failed to create storage"); - - if load_chain_state::(&storage).is_some() { - tracing::error!("State already initialized; refusing to re-run genesis"); - return; - } - - let codes = build_codes(); - let output = run_genesis(&storage, &codes, genesis_config.as_ref()); - - commit_genesis(&storage, output.changes, &output.genesis_result) - .await - .expect("genesis commit failed"); - - tracing::info!("Genesis complete!"); - tracing::info!(" Token: {:?}", output.genesis_result.token); - tracing::info!(" Scheduler: {:?}", output.genesis_result.scheduler); - }); -} - -fn build_codes() -> AccountStorageMock { - let mut codes = AccountStorageMock::default(); - install_account_codes(&mut codes); - codes -} - -/// Run genesis using the default testapp genesis or a custom genesis config. -fn run_genesis( - storage: &S, - codes: &AccountStorageMock, - genesis_config: Option<&EvdGenesisConfig>, -) -> GenesisOutput { - match genesis_config { - Some(config) => run_custom_genesis(storage, codes, config), - None => run_default_genesis(storage, codes), - } -} - -/// Default genesis using ETH-address-derived AccountIds for EOA balances. -fn run_default_genesis( - storage: &S, - codes: &AccountStorageMock, -) -> GenesisOutput { - use evolve_core::BlockContext; - use std::str::FromStr; - - tracing::info!("Running default ETH-mapped genesis..."); - - let gas_config = default_gas_config(); - let stf = build_mempool_stf(gas_config, PLACEHOLDER_ACCOUNT); - let genesis_block = BlockContext::new(0, 0); - let alice_eth_address = std::env::var("GENESIS_ALICE_ETH_ADDRESS") - .ok() - .and_then(|s| Address::from_str(s.trim()).ok()) - .map(Into::into) - .unwrap_or([0xAA; 20]); - let bob_eth_address = std::env::var("GENESIS_BOB_ETH_ADDRESS") - .ok() - .and_then(|s| Address::from_str(s.trim()).ok()) - .map(Into::into) - .unwrap_or([0xBB; 20]); - - let (accounts, state) = stf - .system_exec(storage, codes, genesis_block, |env| { - do_eth_genesis_inner(alice_eth_address, bob_eth_address, env) - }) - .expect("genesis failed"); - - let changes = state.into_changes().expect("failed to get state changes"); - - let genesis_result = EvdGenesisResult { - token: accounts.evolve, - scheduler: accounts.scheduler, - }; - - GenesisOutput { - genesis_result, - changes, - } -} - -/// Custom genesis with ETH EOA accounts from a genesis JSON file. -/// -/// Funds balances at ETH-address-derived AccountIds. -fn run_custom_genesis( - storage: &S, - codes: &AccountStorageMock, - genesis_config: &EvdGenesisConfig, -) -> GenesisOutput { - use evolve_core::BlockContext; - - let funded_accounts: Vec<([u8; 20], u128)> = genesis_config - .accounts - .iter() - .filter(|acc| acc.balance > 0) - .map(|acc| { - let addr = acc - .parse_address() - .expect("invalid address in genesis config"); - (addr.into_array(), acc.balance) - }) - .collect(); - let minter = AccountId::from_u64(genesis_config.minter_id); - let metadata = genesis_config.token.to_metadata(); - - let gas_config = default_gas_config(); - let stf = build_mempool_stf(gas_config, PLACEHOLDER_ACCOUNT); - let genesis_block = BlockContext::new(0, 0); - - let (genesis_result, state) = stf - .system_exec(storage, codes, genesis_block, |env| { - let balances: Vec<(AccountId, u128)> = funded_accounts - .iter() - .map( - |(eth_addr, balance)| -> evolve_core::SdkResult<(AccountId, u128)> { - let addr = Address::from(*eth_addr); - Ok((resolve_or_create_eoa_account(addr, env)?, *balance)) - }, - ) - .collect::>>()?; - - let token = TokenRef::initialize(metadata.clone(), balances, Some(minter), env)?.0; - let _token_eth_addr = register_runtime_contract_account(token.0, env)?; - - let scheduler_acc = SchedulerRef::initialize(vec![], vec![], env)?.0; - let _scheduler_eth_addr = register_runtime_contract_account(scheduler_acc.0, env)?; - scheduler_acc.update_begin_blockers(vec![], env)?; - - Ok(EvdGenesisResult { - token: token.0, - scheduler: scheduler_acc.0, - }) - }) - .expect("genesis failed"); - - let changes = state.into_changes().expect("failed to get state changes"); - - GenesisOutput { - genesis_result, - changes, - } -} - -fn compute_block_hash(height: u64, timestamp: u64, parent_hash: B256) -> B256 { - let mut data = Vec::with_capacity(48); - data.extend_from_slice(&height.to_le_bytes()); - data.extend_from_slice(×tamp.to_le_bytes()); - data.extend_from_slice(parent_hash.as_slice()); - keccak256(&data) -} - -async fn commit_genesis( - storage: &S, - changes: Vec, - genesis_result: &EvdGenesisResult, -) -> Result<(), Box> { - let mut operations = state_changes_to_operations(changes); - - let chain_state = ChainState { - height: 1, - genesis_result: *genesis_result, - }; - operations.push(Operation::Set { - key: CHAIN_STATE_KEY.to_vec(), - value: borsh::to_vec(&chain_state).map_err(|e| format!("serialize: {e}"))?, - }); - - storage - .batch(operations) - .await - .map_err(|e| format!("batch failed: {:?}", e))?; - - storage - .commit() - .await - .map_err(|e| format!("commit failed: {:?}", e))?; - - Ok(()) -} - -fn state_changes_to_operations(changes: Vec) -> Vec { - changes - .into_iter() - .map(|change| match change { - StateChange::Set { key, value } => Operation::Set { key, value }, - StateChange::Remove { key } => Operation::Remove { key }, - }) - .collect() + init_dev_node( + data_dir, + build_genesis_mempool_stf, + build_testapp_codes, + move |stf, codes, storage| { + run_evd_genesis_output(stf, codes, storage, genesis_config.as_ref()) + }, + build_storage, + ); } -#[cfg(test)] -mod tests { - use super::*; - use evolve_core::encoding::Encodable; - use evolve_core::runtime_api::ACCOUNT_IDENTIFIER_PREFIX; - use evolve_core::Message; - use evolve_storage::MockStorage; - use std::collections::BTreeMap; - use std::sync::{Mutex, MutexGuard}; - - static ENV_VAR_LOCK: Mutex<()> = Mutex::new(()); - - struct EnvVarGuard { - entries: Vec<(&'static str, Option)>, - _guard: MutexGuard<'static, ()>, - } - - impl EnvVarGuard { - fn acquire() -> Self { - let guard = ENV_VAR_LOCK.lock().expect("env var lock poisoned"); - Self { - entries: Vec::new(), - _guard: guard, - } - } - - fn set(&mut self, key: &'static str, value: &str) { - let old = std::env::var(key).ok(); - std::env::set_var(key, value); - self.entries.push((key, old)); - } - } - - impl Drop for EnvVarGuard { - fn drop(&mut self) { - for (key, old) in self.entries.iter().rev() { - if let Some(value) = old { - std::env::set_var(key, value); - } else { - std::env::remove_var(key); - } - } - } - } - - fn apply_changes_to_map(changes: Vec) -> BTreeMap, Vec> { - let mut out = BTreeMap::new(); - for change in changes { - match change { - StateChange::Set { key, value } => { - out.insert(key, value); - } - StateChange::Remove { key } => { - out.remove(&key); - } - } - } - out - } - - fn read_token_balance( - state: &BTreeMap, Vec>, - token_account_id: AccountId, - account_id: AccountId, - ) -> u128 { - let mut key = token_account_id.as_bytes().to_vec(); - key.push(1u8); // Token::balances storage prefix - key.extend(account_id.encode().expect("encode account id")); - - match state.get(&key) { - Some(value) => Message::from_bytes(value.clone()) - .get::() - .expect("decode balance"), - None => 0, - } - } - - fn eoa_account_ids(state: &BTreeMap, Vec>) -> Vec { - state - .iter() - .filter_map(|(key, value)| { - if key.len() != 33 || key[0] != ACCOUNT_IDENTIFIER_PREFIX { - return None; - } - let code_id = Message::from_bytes(value.clone()).get::().ok()?; - if code_id != "EthEoaAccount" { - return None; - } - let account_bytes: [u8; 32] = key[1..33].try_into().ok()?; - Some(AccountId::from_bytes(account_bytes)) - }) - .collect() - } - - #[test] - fn default_genesis_funds_eth_mapped_sender_account() { - let mut env = EnvVarGuard::acquire(); - env.set( - "GENESIS_ALICE_ETH_ADDRESS", - "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - ); - env.set( - "GENESIS_BOB_ETH_ADDRESS", - "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", - ); - env.set("GENESIS_ALICE_TOKEN_BALANCE", "1234"); - env.set("GENESIS_BOB_TOKEN_BALANCE", "5678"); - - let storage = MockStorage::new(); - let codes = build_codes(); - let output = run_default_genesis(&storage, &codes); - let state = apply_changes_to_map(output.changes); - - let eoa_ids = eoa_account_ids(&state); - assert_eq!(eoa_ids.len(), 2); - assert!(eoa_ids.iter().any(|id| read_token_balance( - &state, - output.genesis_result.token, - *id - ) == 1234)); - } +async fn build_storage( + context: TokioContext, + config: StorageConfig, +) -> Result { + Ok(QmdbStorage::new(context, config).await?) } diff --git a/bin/testapp/src/lib.rs b/bin/testapp/src/lib.rs index b7b03c8..f4c9943 100644 --- a/bin/testapp/src/lib.rs +++ b/bin/testapp/src/lib.rs @@ -3,16 +3,19 @@ pub mod genesis_config; pub mod sim_testing; use crate::eth_eoa::eth_eoa_account::{EthEoaAccount, EthEoaAccountRef}; +use crate::genesis_config::{EvdGenesisConfig, EvdGenesisResult}; use evolve_authentication::AuthenticationTxValidator; use evolve_core::{AccountId, BlockContext, Environment, InvokeResponse, ReadonlyKV, SdkResult}; use evolve_fungible_asset::FungibleAssetMetadata; -use evolve_node::HasTokenAccountId; +use evolve_node::{GenesisOutput, HasTokenAccountId}; use evolve_scheduler::scheduler_account::{Scheduler, SchedulerRef}; use evolve_scheduler::server::{SchedulerBeginBlocker, SchedulerEndBlocker}; use evolve_server::Block; use evolve_stf::execution_state::ExecutionState; use evolve_stf::{Stf, StorageGasConfig}; use evolve_stf_traits::{AccountsCodeStorage, PostTxExecution, WritableAccountsCodeStorage}; +use evolve_storage::Storage; +use evolve_testing::server_mocks::AccountStorageMock; use evolve_token::account::{Token, TokenRef}; use evolve_tx_eth::TxContext; use evolve_tx_eth::{register_runtime_contract_account, resolve_or_create_eoa_account}; @@ -71,6 +74,23 @@ pub fn install_account_codes(codes: &mut impl WritableAccountsCodeStorage) { codes.add_code(EthEoaAccount::new()).unwrap(); } +/// Build the standard account-code storage used by the test app binaries. +pub fn build_testapp_codes() -> AccountStorageMock { + let mut codes = AccountStorageMock::default(); + install_account_codes(&mut codes); + codes +} + +/// Build the bootstrap STF used before the scheduler account exists. +pub fn build_genesis_mempool_stf() -> MempoolStf { + build_mempool_stf(default_gas_config(), PLACEHOLDER_ACCOUNT) +} + +/// Build the steady-state STF once the scheduler account is known. +pub fn build_mempool_stf_from_scheduler(scheduler: AccountId) -> MempoolStf { + build_mempool_stf(default_gas_config(), scheduler) +} + #[derive(Clone, Copy, Debug, borsh::BorshSerialize, borsh::BorshDeserialize)] pub struct GenesisAccounts { pub alice: AccountId, @@ -244,3 +264,216 @@ pub fn do_eth_genesis<'a, S: ReadonlyKV, A: AccountsCodeStorage>( Ok((state, accounts)) } + +/// Run the evd genesis flow, producing the persisted evd genesis result. +pub fn run_evd_genesis_output( + stf: &MempoolStf, + codes: &AccountStorageMock, + storage: &S, + genesis_config: Option<&EvdGenesisConfig>, +) -> Result, Box> { + match genesis_config { + Some(config) => run_custom_evd_genesis(stf, codes, storage, config), + None => run_default_evd_genesis(stf, codes, storage), + } +} + +fn run_default_evd_genesis( + stf: &MempoolStf, + codes: &AccountStorageMock, + storage: &S, +) -> Result, Box> { + tracing::info!("Running default ETH-mapped genesis..."); + + let alice_eth_address = + parse_genesis_address_env("GENESIS_ALICE_ETH_ADDRESS").unwrap_or([0xAA; 20]); + let bob_eth_address = + parse_genesis_address_env("GENESIS_BOB_ETH_ADDRESS").unwrap_or([0xBB; 20]); + let genesis_block = BlockContext::new(0, 0); + + let (accounts, state) = stf + .system_exec(storage, codes, genesis_block, |env| { + do_eth_genesis_inner(alice_eth_address, bob_eth_address, env) + }) + .map_err(|e| format!("genesis failed: {e:?}"))?; + let changes = state.into_changes().map_err(|e| format!("{e:?}"))?; + + Ok(GenesisOutput { + genesis_result: EvdGenesisResult { + token: accounts.evolve, + scheduler: accounts.scheduler, + }, + changes, + }) +} + +fn run_custom_evd_genesis( + stf: &MempoolStf, + codes: &AccountStorageMock, + storage: &S, + genesis_config: &EvdGenesisConfig, +) -> Result, Box> { + let funded_accounts = genesis_config.funded_accounts()?; + let minter = AccountId::from_u64(genesis_config.minter_id); + let metadata = genesis_config.token.to_metadata(); + let genesis_block = BlockContext::new(0, 0); + + let (genesis_result, state) = stf + .system_exec(storage, codes, genesis_block, |env| { + let balances: Vec<(AccountId, u128)> = funded_accounts + .iter() + .map( + |(eth_addr, balance)| -> evolve_core::SdkResult<(AccountId, u128)> { + let addr = alloy_primitives::Address::from(*eth_addr); + Ok((resolve_or_create_eoa_account(addr, env)?, *balance)) + }, + ) + .collect::>>()?; + + let token = TokenRef::initialize(metadata.clone(), balances, Some(minter), env)?.0; + let _token_eth_addr = register_runtime_contract_account(token.0, env)?; + + let scheduler_acc = SchedulerRef::initialize(vec![], vec![], env)?.0; + let _scheduler_eth_addr = register_runtime_contract_account(scheduler_acc.0, env)?; + scheduler_acc.update_begin_blockers(vec![], env)?; + + Ok(EvdGenesisResult { + token: token.0, + scheduler: scheduler_acc.0, + }) + }) + .map_err(|e| format!("genesis failed: {e:?}"))?; + let changes = state.into_changes().map_err(|e| format!("{e:?}"))?; + + Ok(GenesisOutput { + genesis_result, + changes, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use evolve_core::encoding::Encodable; + use evolve_core::runtime_api::ACCOUNT_IDENTIFIER_PREFIX; + use evolve_core::Message; + use evolve_storage::MockStorage; + use std::collections::BTreeMap; + use std::sync::{Mutex, MutexGuard}; + + static ENV_VAR_LOCK: Mutex<()> = Mutex::new(()); + + struct EnvVarGuard { + entries: Vec<(&'static str, Option)>, + _guard: MutexGuard<'static, ()>, + } + + impl EnvVarGuard { + fn acquire() -> Self { + let guard = ENV_VAR_LOCK.lock().expect("env var lock poisoned"); + Self { + entries: Vec::new(), + _guard: guard, + } + } + + fn set(&mut self, key: &'static str, value: &str) { + let old = std::env::var(key).ok(); + std::env::set_var(key, value); + self.entries.push((key, old)); + } + } + + impl Drop for EnvVarGuard { + fn drop(&mut self) { + for (key, old) in self.entries.iter().rev() { + if let Some(value) = old { + std::env::set_var(key, value); + } else { + std::env::remove_var(key); + } + } + } + } + + fn apply_changes_to_map( + changes: Vec, + ) -> BTreeMap, Vec> { + let mut out = BTreeMap::new(); + for change in changes { + match change { + evolve_stf_traits::StateChange::Set { key, value } => { + out.insert(key, value); + } + evolve_stf_traits::StateChange::Remove { key } => { + out.remove(&key); + } + } + } + out + } + + fn read_token_balance( + state: &BTreeMap, Vec>, + token_account_id: AccountId, + account_id: AccountId, + ) -> u128 { + let mut key = token_account_id.as_bytes().to_vec(); + key.push(1u8); + key.extend(account_id.encode().expect("encode account id")); + + match state.get(&key) { + Some(value) => Message::from_bytes(value.clone()) + .get::() + .expect("decode balance"), + None => 0, + } + } + + fn eoa_account_ids(state: &BTreeMap, Vec>) -> Vec { + state + .iter() + .filter_map(|(key, value)| { + if key.len() != 33 || key[0] != ACCOUNT_IDENTIFIER_PREFIX { + return None; + } + let code_id = Message::from_bytes(value.clone()).get::().ok()?; + if code_id != "EthEoaAccount" { + return None; + } + let account_bytes: [u8; 32] = key[1..33].try_into().ok()?; + Some(AccountId::from_bytes(account_bytes)) + }) + .collect() + } + + #[test] + fn default_evd_genesis_funds_eth_mapped_sender_account() { + let mut env = EnvVarGuard::acquire(); + env.set( + "GENESIS_ALICE_ETH_ADDRESS", + "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", + ); + env.set( + "GENESIS_BOB_ETH_ADDRESS", + "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", + ); + env.set("GENESIS_ALICE_TOKEN_BALANCE", "1234"); + env.set("GENESIS_BOB_TOKEN_BALANCE", "5678"); + + let storage = MockStorage::new(); + let codes = build_testapp_codes(); + let stf = build_genesis_mempool_stf(); + let output = + run_evd_genesis_output(&stf, &codes, &storage, None).expect("genesis should succeed"); + let state = apply_changes_to_map(output.changes); + + let eoa_ids = eoa_account_ids(&state); + assert_eq!(eoa_ids.len(), 2); + assert_eq!( + read_token_balance(&state, output.genesis_result.token, eoa_ids[0]) + + read_token_balance(&state, output.genesis_result.token, eoa_ids[1]), + 1234 + 5678, + ); + } +} diff --git a/crates/app/node/src/config.rs b/crates/app/node/src/config.rs index 8361fa2..a8ab105 100644 --- a/crates/app/node/src/config.rs +++ b/crates/app/node/src/config.rs @@ -74,6 +74,9 @@ impl NodeConfig { enabled: self.rpc.enabled, enable_block_indexing: self.rpc.enable_block_indexing, grpc_addr: None, + grpc_enable_gzip: self.grpc.enable_gzip, + grpc_max_message_size: self.grpc_max_message_size_usize(), + shutdown_timeout_secs: self.operations.shutdown_timeout_secs, } } } @@ -257,6 +260,25 @@ mod tests { assert_eq!(config.chain.chain_id, 42); } + #[test] + fn to_rpc_config_preserves_grpc_and_shutdown_settings() { + let mut config = NodeConfig::default(); + config.chain.chain_id = 77; + config.rpc.enabled = false; + config.rpc.enable_block_indexing = false; + config.grpc.enable_gzip = false; + config.grpc.max_message_size = 8 * 1024 * 1024; + config.operations.shutdown_timeout_secs = 42; + + let rpc = config.to_rpc_config(); + assert_eq!(rpc.chain_id, 77); + assert!(!rpc.enabled); + assert!(!rpc.enable_block_indexing); + assert!(!rpc.grpc_enable_gzip); + assert_eq!(rpc.grpc_max_message_size, 8 * 1024 * 1024); + assert_eq!(rpc.shutdown_timeout_secs, 42); + } + #[test] fn validate_catches_invalid_socket_addr() { let mut config = NodeConfig::default(); diff --git a/crates/app/node/src/lib.rs b/crates/app/node/src/lib.rs index 35ec906..afb00c3 100644 --- a/crates/app/node/src/lib.rs +++ b/crates/app/node/src/lib.rs @@ -123,6 +123,12 @@ pub struct RpcConfig { /// Optional gRPC server address. When set, a gRPC server is started /// alongside JSON-RPC, sharing the same state provider and subscriptions. pub grpc_addr: Option, + /// Enable gzip compression for the gRPC server. + pub grpc_enable_gzip: bool, + /// Maximum gRPC request/response size in bytes. + pub grpc_max_message_size: usize, + /// Graceful shutdown timeout in seconds. + pub shutdown_timeout_secs: u64, } impl Default for RpcConfig { @@ -133,6 +139,9 @@ impl Default for RpcConfig { enabled: true, enable_block_indexing: true, grpc_addr: None, + grpc_enable_gzip: true, + grpc_max_message_size: 4 * 1024 * 1024, + shutdown_timeout_secs: 10, } } } @@ -169,6 +178,19 @@ impl RpcConfig { self.grpc_addr = Some(addr); self } + + /// Configure gRPC compression and message sizing. + pub fn with_grpc_settings(mut self, enable_gzip: bool, max_message_size: usize) -> Self { + self.grpc_enable_gzip = enable_gzip; + self.grpc_max_message_size = max_message_size; + self + } + + /// Set the graceful shutdown timeout in seconds. + pub fn with_shutdown_timeout_secs(mut self, shutdown_timeout_secs: u64) -> Self { + self.shutdown_timeout_secs = shutdown_timeout_secs; + self + } } /// Result of a genesis run, including the state changes to commit. @@ -230,6 +252,41 @@ async fn build_block_archive(context: TokioContext) -> OnBlockArchive { }) } +fn grpc_server_config(rpc_config: &RpcConfig, addr: SocketAddr) -> GrpcServerConfig { + GrpcServerConfig { + addr, + chain_id: rpc_config.chain_id, + enable_gzip: rpc_config.grpc_enable_gzip, + max_message_size: rpc_config.grpc_max_message_size, + } +} + +fn shutdown_timeout(rpc_config: &RpcConfig) -> Duration { + Duration::from_secs(rpc_config.shutdown_timeout_secs) +} + +#[derive(Clone, Copy)] +enum EthRunnerMode { + PersistentSidecars, + EphemeralSidecars, +} + +impl EthRunnerMode { + fn enable_block_archive(self) -> bool { + matches!(self, Self::PersistentSidecars) + } + + fn persistent_chain_index_path(self, data_dir: &Path) -> Option { + matches!(self, Self::PersistentSidecars).then(|| data_dir.join("chain-index.sqlite")) + } +} + +#[derive(Clone)] +struct EthRunnerConfig { + rpc: RpcConfig, + runner_mode: EthRunnerMode, +} + /// Run the dev node with default settings (RPC enabled). pub fn run_dev_node< Stf, @@ -465,11 +522,7 @@ pub fn run_dev_node_with_rpc< codes_for_rpc, ) .with_state_querier(state_querier); - let grpc_config = GrpcServerConfig { - addr: grpc_addr, - chain_id: rpc_config.chain_id, - ..Default::default() - }; + let grpc_config = grpc_server_config(&rpc_config, grpc_addr); tracing::info!("Starting gRPC server on {}", grpc_addr); let grpc_server = GrpcServer::with_subscription_manager( grpc_config, @@ -516,7 +569,7 @@ pub fn run_dev_node_with_rpc< _ = tokio::signal::ctrl_c() => { tracing::info!("Received Ctrl+C, initiating graceful shutdown..."); context_for_shutdown - .stop(0, Some(Duration::from_secs(10))) + .stop(0, Some(shutdown_timeout(&rpc_config))) .await .expect("shutdown failed"); } @@ -560,7 +613,7 @@ pub fn run_dev_node_with_rpc< _ = tokio::signal::ctrl_c() => { tracing::info!("Received Ctrl+C, initiating graceful shutdown..."); context_for_shutdown - .stop(0, Some(Duration::from_secs(10))) + .stop(0, Some(shutdown_timeout(&rpc_config))) .await .expect("shutdown failed"); } @@ -741,7 +794,7 @@ pub fn run_dev_node_with_rpc_and_mempool< _ = tokio::signal::ctrl_c() => { tracing::info!("Received Ctrl+C, initiating graceful shutdown..."); context_for_shutdown - .stop(0, Some(Duration::from_secs(10))) + .stop(0, Some(shutdown_timeout(&rpc_config))) .await .expect("shutdown failed"); } @@ -809,6 +862,62 @@ pub fn run_dev_node_with_rpc_and_mempool_eth< BuildStorage: Fn(RuntimeContext, StorageConfig) -> BuildStorageFut + Send + Sync + 'static, BuildStorageFut: Future>> + Send + 'static, +{ + run_dev_node_with_rpc_and_mempool_eth_impl( + data_dir, + build_genesis_stf, + build_stf, + build_codes, + run_genesis, + build_storage, + EthRunnerConfig { + rpc: rpc_config, + runner_mode: EthRunnerMode::PersistentSidecars, + }, + ) +} + +fn run_dev_node_with_rpc_and_mempool_eth_impl< + Stf, + Codes, + G, + S, + BuildGenesisStf, + BuildStf, + BuildCodes, + RunGenesis, + BuildStorage, + BuildStorageFut, +>( + data_dir: impl AsRef, + build_genesis_stf: BuildGenesisStf, + build_stf: BuildStf, + build_codes: BuildCodes, + run_genesis: RunGenesis, + build_storage: BuildStorage, + runner_config: EthRunnerConfig, +) where + Codes: AccountsCodeStorage + Send + Sync + 'static, + S: ReadonlyKV + Storage + Clone + Send + Sync + 'static, + Stf: StfExecutor + Send + Sync + 'static, + G: BorshSerialize + + BorshDeserialize + + Clone + + Debug + + HasTokenAccountId + + Send + + Sync + + 'static, + BuildGenesisStf: Fn() -> Stf + Send + Sync + 'static, + BuildStf: Fn(&G) -> Stf + Send + Sync + 'static, + BuildCodes: Fn() -> Codes + Clone + Send + Sync + 'static, + RunGenesis: Fn(&Stf, &Codes, &S) -> Result, Box> + + Send + + Sync + + 'static, + BuildStorage: Fn(RuntimeContext, StorageConfig) -> BuildStorageFut + Send + Sync + 'static, + BuildStorageFut: + Future>> + Send + 'static, { tracing::info!("=== Evolve Dev Node (ETH mempool) ==="); let data_dir = data_dir.as_ref(); @@ -818,7 +927,9 @@ pub fn run_dev_node_with_rpc_and_mempool_eth< path: data_dir.to_path_buf(), ..Default::default() }; - let chain_index_db_path = data_dir.join("chain-index.sqlite"); + let chain_index_db_path = runner_config + .runner_mode + .persistent_chain_index_path(data_dir); let runtime_config = TokioConfig::default() .with_storage_directory(data_dir) @@ -838,8 +949,9 @@ pub fn run_dev_node_with_rpc_and_mempool_eth< let build_codes = Arc::clone(&build_codes); let run_genesis = Arc::clone(&run_genesis); let build_storage = Arc::clone(&build_storage); - let rpc_config = rpc_config.clone(); + let rpc_config = runner_config.rpc.clone(); let chain_index_db_path = chain_index_db_path.clone(); + let runner_mode = runner_config.runner_mode; async move { let context_for_shutdown = context.clone(); @@ -884,13 +996,22 @@ pub fn run_dev_node_with_rpc_and_mempool_eth< let mempool: SharedMempool> = new_shared_mempool(); - // Build block archive callback (always on) - let archive_cb = build_block_archive(context_for_archive).await; + let archive_cb = if runner_mode.enable_block_archive() { + Some(build_block_archive(context_for_archive).await) + } else { + tracing::info!( + "Block archive disabled for mock-storage runner to keep sidecars ephemeral" + ); + None + }; let rpc_handle = if rpc_config.enabled { let chain_index = Arc::new( - PersistentChainIndex::new(&chain_index_db_path) - .expect("failed to open chain index database"), + match chain_index_db_path.as_ref() { + Some(path) => PersistentChainIndex::new(path), + None => PersistentChainIndex::in_memory(), + } + .expect("failed to open chain index database"), ); if let Err(e) = chain_index.initialize() { @@ -946,11 +1067,7 @@ pub fn run_dev_node_with_rpc_and_mempool_eth< mempool.clone(), ) .with_state_querier(state_querier); - let grpc_config = GrpcServerConfig { - addr: grpc_addr, - chain_id: rpc_config.chain_id, - ..Default::default() - }; + let grpc_config = grpc_server_config(&rpc_config, grpc_addr); tracing::info!("Starting gRPC server on {}", grpc_addr); let grpc_server = GrpcServer::with_subscription_manager( grpc_config, @@ -974,8 +1091,11 @@ pub fn run_dev_node_with_rpc_and_mempool_eth< subscriptions, mempool.clone(), ) - .with_indexing_enabled(rpc_config.enable_block_indexing) - .with_block_archive(archive_cb); + .with_indexing_enabled(rpc_config.enable_block_indexing); + let consensus = match archive_cb { + Some(archive_cb) => consensus.with_block_archive(archive_cb), + None => consensus, + }; let dev: Arc> = Arc::new(consensus); @@ -994,7 +1114,7 @@ pub fn run_dev_node_with_rpc_and_mempool_eth< _ = tokio::signal::ctrl_c() => { tracing::info!("Received Ctrl+C, initiating graceful shutdown..."); context_for_shutdown - .stop(0, Some(Duration::from_secs(10))) + .stop(0, Some(shutdown_timeout(&rpc_config))) .await .expect("shutdown failed"); } @@ -1015,8 +1135,11 @@ pub fn run_dev_node_with_rpc_and_mempool_eth< Some((handle, grpc_handle)) } else { - let consensus = DevConsensus::with_mempool(stf, storage, codes, dev_config, mempool) - .with_block_archive(archive_cb); + let consensus = DevConsensus::with_mempool(stf, storage, codes, dev_config, mempool); + let consensus = match archive_cb { + Some(archive_cb) => consensus.with_block_archive(archive_cb), + None => consensus, + }; let dev: Arc> = Arc::new(consensus); @@ -1035,7 +1158,7 @@ pub fn run_dev_node_with_rpc_and_mempool_eth< _ = tokio::signal::ctrl_c() => { tracing::info!("Received Ctrl+C, initiating graceful shutdown..."); context_for_shutdown - .stop(0, Some(Duration::from_secs(10))) + .stop(0, Some(shutdown_timeout(&rpc_config))) .await .expect("shutdown failed"); } @@ -1110,14 +1233,17 @@ pub fn run_dev_node_with_rpc_and_mempool_mock_storage< + Sync + 'static, { - run_dev_node_with_rpc_and_mempool_eth( + run_dev_node_with_rpc_and_mempool_eth_impl( data_dir, build_genesis_stf, build_stf, build_codes, run_genesis, |_context, _config| async { Ok(MockStorage::new()) }, - rpc_config, + EthRunnerConfig { + rpc: rpc_config, + runner_mode: EthRunnerMode::EphemeralSidecars, + }, ) } diff --git a/crates/rpc/evnode/Cargo.toml b/crates/rpc/evnode/Cargo.toml index 83dda82..f9b3027 100644 --- a/crates/rpc/evnode/Cargo.toml +++ b/crates/rpc/evnode/Cargo.toml @@ -19,6 +19,11 @@ evolve_stf_traits.workspace = true evolve_mempool.workspace = true evolve_server.workspace = true evolve_tx_eth.workspace = true +evolve_node.workspace = true +evolve_chain_index.workspace = true +evolve_eth_jsonrpc.workspace = true +evolve_rpc_types.workspace = true +evolve_storage.workspace = true # External dependencies tonic = { version = "0.12", features = ["gzip"] } @@ -28,6 +33,8 @@ tokio = { workspace = true, features = ["sync", "rt-multi-thread"] } async-trait.workspace = true tracing.workspace = true thiserror = "2" +commonware-runtime.workspace = true +borsh.workspace = true # For alloy types alloy-primitives.workspace = true @@ -42,6 +49,8 @@ tonic-build = "0.12" tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } tracing-subscriber = "0.3" evolve_testapp.workspace = true +alloy-consensus = { workspace = true, features = ["k256"] } +k256 = { version = "0.13", features = ["ecdsa"] } [[example]] name = "run_server" diff --git a/crates/rpc/evnode/src/lib.rs b/crates/rpc/evnode/src/lib.rs index c6c12f2..d03818f 100644 --- a/crates/rpc/evnode/src/lib.rs +++ b/crates/rpc/evnode/src/lib.rs @@ -32,6 +32,7 @@ //! ``` pub mod error; +pub mod runner; pub mod server; pub mod service; @@ -50,6 +51,7 @@ pub mod proto { // Re-export key types pub use error::EvnodeError; +pub use runner::run_external_consensus_node_eth; pub use server::{ start_evnode_server, start_evnode_server_with_mempool, EvnodeServer, EvnodeServerConfig, }; diff --git a/crates/rpc/evnode/src/runner.rs b/crates/rpc/evnode/src/runner.rs new file mode 100644 index 0000000..22c774e --- /dev/null +++ b/crates/rpc/evnode/src/runner.rs @@ -0,0 +1,543 @@ +//! Shared external-consensus node runner. +//! +//! This module owns the orchestration around the EVNode gRPC server: +//! storage bootstrap, optional JSON-RPC startup, serialized commit/indexing, +//! and shutdown-time chain-state persistence. + +use std::fmt::Debug; +use std::future::Future; +use std::path::Path; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{mpsc, Arc}; +use std::thread::JoinHandle; +use std::time::Duration; + +use alloy_primitives::{keccak256, Address, B256, U256}; +use borsh::{BorshDeserialize, BorshSerialize}; +use commonware_runtime::tokio::{Config as TokioConfig, Context as TokioContext, Runner}; +use commonware_runtime::{Runner as RunnerTrait, Spawner}; +use evolve_chain_index::{ + build_index_data, BlockMetadata, ChainIndex, ChainStateProvider, ChainStateProviderConfig, + PersistentChainIndex, StateQuerier, StorageStateQuerier, +}; +use evolve_core::{AccountId, ReadonlyKV}; +use evolve_eth_jsonrpc::{start_server_with_subscriptions, RpcServerConfig, SubscriptionManager}; +use evolve_mempool::{new_shared_mempool, Mempool, SharedMempool}; +use evolve_node::{GenesisOutput, HasTokenAccountId, NodeConfig}; +use evolve_rpc_types::SyncStatus; +use evolve_server::{load_chain_state, save_chain_state, BlockBuilder, ChainState, StfExecutor}; +use evolve_stf_traits::{AccountsCodeStorage, StateChange}; +use evolve_storage::{Operation, Storage, StorageConfig}; +use evolve_tx_eth::TxContext; + +use crate::{ + BlockExecutedInfo, EvnodeServer, EvnodeServerConfig, EvnodeStfExecutor, ExecutorServiceConfig, + OnBlockExecuted, +}; + +type SharedChainIndex = Arc; + +struct RpcRuntimeHandle { + stop_fn: Option>, +} + +impl RpcRuntimeHandle { + fn new(stop_fn: impl FnOnce() + Send + 'static) -> Self { + Self { + stop_fn: Some(Box::new(stop_fn)), + } + } + + fn stop(mut self) { + if let Some(stop_fn) = self.stop_fn.take() { + stop_fn(); + } + } +} + +struct ExternalConsensusSinkConfig { + initial_height: u64, + chain_id: u64, + max_gas: u64, + indexing_enabled: bool, +} + +struct ExternalConsensusCommitSink { + sender: Option>, + worker: Option>, + current_height: Arc, +} + +impl ExternalConsensusCommitSink { + fn spawn( + storage: S, + chain_index: Option, + config: ExternalConsensusSinkConfig, + ) -> Self + where + S: Storage + Clone + Send + 'static, + { + const MAX_PENDING_BLOCKS: usize = 16; + + let (sender, receiver) = mpsc::sync_channel::(MAX_PENDING_BLOCKS); + let current_height = Arc::new(AtomicU64::new(config.initial_height)); + let current_height_for_worker = Arc::clone(¤t_height); + + let worker = std::thread::spawn(move || { + let mut parent_hash = + resolve_initial_parent_hash(chain_index.as_ref(), config.initial_height); + let runtime = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .expect("failed to build commit sink runtime"); + + while let Ok(info) = receiver.recv() { + let operations = state_changes_to_operations(info.state_changes); + let commit_hash = runtime.block_on(async { + storage + .batch(operations) + .await + .expect("storage batch failed"); + storage.commit().await.expect("storage commit failed") + }); + let state_root = B256::from_slice(commit_hash.as_bytes()); + let block_hash = + compute_external_consensus_block_hash(info.height, info.timestamp, parent_hash); + let metadata = BlockMetadata::new( + block_hash, + parent_hash, + state_root, + info.timestamp, + config.max_gas, + Address::ZERO, + config.chain_id, + ); + + let block = BlockBuilder::::new() + .number(info.height) + .timestamp(info.timestamp) + .transactions(info.transactions) + .build(); + let (stored_block, stored_txs, stored_receipts) = + build_index_data(&block, &info.block_result, &metadata); + + if config.indexing_enabled { + if let Some(ref index) = chain_index { + if let Err(err) = + index.store_block(stored_block, stored_txs, stored_receipts) + { + tracing::warn!("Failed to index block {}: {:?}", info.height, err); + } else { + tracing::debug!( + "Indexed block {} (hash={}, state_root={})", + info.height, + block_hash, + state_root + ); + } + } + } + + parent_hash = block_hash; + current_height_for_worker.store(info.height, Ordering::SeqCst); + } + }); + + Self { + sender: Some(sender), + worker: Some(worker), + current_height, + } + } + + fn callback(&self) -> OnBlockExecuted { + let sender = self + .sender + .as_ref() + .expect("external consensus sink sender missing") + .clone(); + + Arc::new(move |info| { + sender + .send(info) + .expect("external consensus commit sink stopped unexpectedly"); + }) + } + + fn current_height(&self) -> Arc { + Arc::clone(&self.current_height) + } + + fn finish(mut self) { + drop(self.sender.take()); + if let Some(worker) = self.worker.take() { + worker + .join() + .expect("external consensus commit sink panicked"); + } + } +} + +fn init_persistent_chain_index( + data_dir: &Path, + enable_chain_index: bool, +) -> Option { + if !enable_chain_index { + return None; + } + + let chain_index_db_path = data_dir.join("chain-index.sqlite"); + let index = Arc::new( + PersistentChainIndex::new(&chain_index_db_path) + .expect("failed to open chain index database"), + ); + if let Err(err) = index.initialize() { + tracing::warn!("Failed to initialize chain index: {:?}", err); + } + Some(index) +} + +fn compute_external_consensus_block_hash(height: u64, timestamp: u64, parent_hash: B256) -> B256 { + let mut data = Vec::with_capacity(48); + data.extend_from_slice(&height.to_le_bytes()); + data.extend_from_slice(×tamp.to_le_bytes()); + data.extend_from_slice(parent_hash.as_slice()); + keccak256(&data) +} + +fn resolve_initial_parent_hash( + chain_index: Option<&SharedChainIndex>, + initial_height: u64, +) -> B256 { + let Some(index) = chain_index else { + return B256::ZERO; + }; + + match index.get_block(initial_height) { + Ok(Some(block)) => { + tracing::info!("Seeding parent hash from indexed block {}", initial_height); + return block.hash; + } + Ok(None) => {} + Err(err) => { + tracing::warn!( + "Failed to read indexed block {} while seeding parent hash: {:?}", + initial_height, + err + ); + } + } + + let latest_indexed = match index.latest_block_number() { + Ok(latest) => latest, + Err(err) => { + tracing::warn!( + "Failed to read latest indexed block while seeding parent hash: {:?}", + err + ); + return B256::ZERO; + } + }; + + let Some(latest_height) = latest_indexed else { + return B256::ZERO; + }; + + if latest_height != initial_height { + tracing::warn!( + "Chain state height {} does not match indexed head {}; seeding parent hash from indexed head", + initial_height, + latest_height + ); + } + + match index.get_block(latest_height) { + Ok(Some(block)) => block.hash, + Ok(None) => { + tracing::warn!( + "Indexed head {} missing block payload while seeding parent hash", + latest_height + ); + B256::ZERO + } + Err(err) => { + tracing::warn!( + "Failed to read indexed head block {} while seeding parent hash: {:?}", + latest_height, + err + ); + B256::ZERO + } + } +} + +async fn run_server_with_shutdown( + serve_future: F, + context_for_shutdown: TokioContext, + shutdown_timeout_secs: u64, +) where + F: Future>, + E: std::fmt::Display, +{ + tokio::pin!(serve_future); + tokio::select! { + result = &mut serve_future => { + if let Err(err) = result { + tracing::error!("server error: {}", err); + } + } + _ = tokio::signal::ctrl_c() => { + tracing::info!("Received Ctrl+C, shutting down..."); + context_for_shutdown + .stop(0, Some(Duration::from_secs(shutdown_timeout_secs))) + .await + .expect("shutdown failed"); + } + } +} + +async fn start_external_consensus_rpc_server( + config: &NodeConfig, + storage: S, + mempool: SharedMempool>, + chain_index: &Option, + token_account_id: AccountId, + build_codes: &BuildCodes, +) -> Option +where + S: ReadonlyKV + Clone + Send + Sync + 'static, + Codes: AccountsCodeStorage + Send + Sync + 'static, + BuildCodes: Fn() -> Codes + Clone + Send + Sync + 'static, +{ + if !config.rpc.enabled { + return None; + } + + let chain_index = Arc::clone(chain_index.as_ref().expect("chain index required for RPC")); + let subscriptions = Arc::new(SubscriptionManager::new()); + let codes_for_rpc = Arc::new(build_codes()); + let state_provider_config = ChainStateProviderConfig { + chain_id: config.chain.chain_id, + protocol_version: "0x1".to_string(), + gas_price: U256::ZERO, + sync_status: SyncStatus::NotSyncing(false), + }; + let state_querier: Arc = + Arc::new(StorageStateQuerier::new(storage, token_account_id)); + let state_provider = ChainStateProvider::with_mempool( + Arc::clone(&chain_index), + state_provider_config, + codes_for_rpc, + mempool, + ) + .with_state_querier(state_querier); + + let rpc_addr = config.parsed_rpc_addr(); + let server_config = RpcServerConfig { + http_addr: rpc_addr, + chain_id: config.chain.chain_id, + }; + + tracing::info!("Starting JSON-RPC server on {}", rpc_addr); + let handle = + start_server_with_subscriptions(server_config, state_provider, Arc::clone(&subscriptions)) + .await + .expect("failed to start RPC server"); + + Some(RpcRuntimeHandle::new(move || { + handle.stop().expect("failed to stop RPC server"); + })) +} + +fn state_changes_to_operations(changes: Vec) -> Vec { + changes + .into_iter() + .map(|change| match change { + StateChange::Set { key, value } => Operation::Set { key, value }, + StateChange::Remove { key } => Operation::Remove { key }, + }) + .collect() +} + +/// Run an external-consensus execution node for ETH transactions. +/// +/// The caller supplies application-specific STF/genesis/storage builders while +/// this runner owns storage bootstrap, query-plane startup, EVNode serving, +/// serialized block commit/indexing, and shutdown persistence. +pub fn run_external_consensus_node_eth< + Stf, + Codes, + G, + S, + BuildGenesisStf, + BuildStf, + BuildCodes, + RunGenesis, + BuildStorage, + BuildStorageFut, +>( + config: NodeConfig, + build_genesis_stf: BuildGenesisStf, + build_stf: BuildStf, + build_codes: BuildCodes, + run_genesis: RunGenesis, + build_storage: BuildStorage, +) where + Codes: AccountsCodeStorage + Send + Sync + 'static, + S: ReadonlyKV + Storage + Clone + Send + Sync + 'static, + Stf: StfExecutor + EvnodeStfExecutor + Send + Sync + 'static, + G: BorshSerialize + + BorshDeserialize + + Clone + + Debug + + HasTokenAccountId + + Send + + Sync + + 'static, + BuildGenesisStf: Fn() -> Stf + Send + Sync + 'static, + BuildStf: Fn(&G) -> Stf + Send + Sync + 'static, + BuildCodes: Fn() -> Codes + Clone + Send + Sync + 'static, + RunGenesis: Fn(&Stf, &Codes, &S) -> Result, Box> + + Send + + Sync + + 'static, + BuildStorage: Fn(TokioContext, StorageConfig) -> BuildStorageFut + Send + Sync + 'static, + BuildStorageFut: + Future>> + Send + 'static, +{ + tracing::info!("=== Evolve External Consensus Node ==="); + std::fs::create_dir_all(&config.storage.path).expect("failed to create data directory"); + + let data_dir = Path::new(&config.storage.path); + let storage_config = StorageConfig { + path: data_dir.to_path_buf(), + ..Default::default() + }; + + let runtime_config = TokioConfig::default() + .with_storage_directory(data_dir) + .with_worker_threads(4); + let runner = Runner::new(runtime_config); + + let build_genesis_stf = Arc::new(build_genesis_stf); + let build_stf = Arc::new(build_stf); + let build_codes = Arc::new(build_codes); + let run_genesis = Arc::new(run_genesis); + let build_storage = Arc::new(build_storage); + + runner.start(move |context| { + let build_genesis_stf = Arc::clone(&build_genesis_stf); + let build_stf = Arc::clone(&build_stf); + let build_codes = Arc::clone(&build_codes); + let run_genesis = Arc::clone(&run_genesis); + let build_storage = Arc::clone(&build_storage); + let config = config.clone(); + + async move { + let context_for_shutdown = context.clone(); + let storage = (build_storage)(context, storage_config) + .await + .expect("failed to create storage"); + + let codes = build_codes(); + let (genesis_result, initial_height) = match load_chain_state::(&storage) { + Some(state) => { + tracing::info!("Resuming from existing state at height {}", state.height); + tracing::info!("Genesis state: {:?}", state.genesis_result); + (state.genesis_result, state.height) + } + None => { + tracing::info!("No existing state found, running genesis..."); + let bootstrap_stf = (build_genesis_stf)(); + let output = + (run_genesis)(&bootstrap_stf, &codes, &storage).expect("genesis failed"); + + let chain_state = ChainState { + height: 1, + genesis_result: output.genesis_result.clone(), + }; + let mut operations = state_changes_to_operations(output.changes); + operations.push(Operation::Set { + key: evolve_server::CHAIN_STATE_KEY.to_vec(), + value: borsh::to_vec(&chain_state) + .map_err(|e| format!("serialize chain state: {e}")) + .expect("serialize chain state"), + }); + + storage + .batch(operations) + .await + .expect("genesis batch failed"); + storage.commit().await.expect("genesis commit failed"); + + tracing::info!("Genesis complete. Result: {:?}", output.genesis_result); + (output.genesis_result, 1) + } + }; + + let stf = (build_stf)(&genesis_result); + let mempool: SharedMempool> = new_shared_mempool(); + let chain_index = init_persistent_chain_index( + Path::new(&config.storage.path), + config.rpc.enabled || config.rpc.enable_block_indexing, + ); + let rpc_handle = start_external_consensus_rpc_server( + &config, + storage.clone(), + mempool.clone(), + &chain_index, + genesis_result.token_account_id(), + build_codes.as_ref(), + ) + .await; + + let executor_config = ExecutorServiceConfig::default(); + let commit_sink = ExternalConsensusCommitSink::spawn( + storage.clone(), + chain_index, + ExternalConsensusSinkConfig { + initial_height, + chain_id: config.chain.chain_id, + max_gas: executor_config.max_gas, + indexing_enabled: config.rpc.enable_block_indexing, + }, + ); + let current_height = commit_sink.current_height(); + + let grpc_config = EvnodeServerConfig { + addr: config.parsed_grpc_addr(), + enable_gzip: config.grpc.enable_gzip, + max_message_size: config.grpc_max_message_size_usize(), + executor_config, + }; + let server = + EvnodeServer::with_mempool(grpc_config, stf, storage.clone(), codes, mempool) + .with_on_block_executed(commit_sink.callback()); + + tracing::info!("Server ready. Press Ctrl+C to stop."); + run_server_with_shutdown( + server.serve(), + context_for_shutdown, + config.operations.shutdown_timeout_secs, + ) + .await; + + commit_sink.finish(); + + let chain_state = ChainState { + height: current_height.load(Ordering::SeqCst), + genesis_result, + }; + if let Err(err) = save_chain_state(&storage, &chain_state).await { + tracing::error!("Failed to save chain state: {}", err); + } + + if let Some(handle) = rpc_handle { + tracing::info!("Stopping JSON-RPC server..."); + handle.stop(); + } + + tracing::info!("Shutdown complete."); + } + }); +} diff --git a/crates/rpc/evnode/src/service.rs b/crates/rpc/evnode/src/service.rs index c1f951f..23ae9ff 100644 --- a/crates/rpc/evnode/src/service.rs +++ b/crates/rpc/evnode/src/service.rs @@ -462,6 +462,7 @@ where let updated_state_root = compute_state_root(&changes); // Log execution results (before moving ownership) + let executed_tx_count = result.tx_results.len(); let successful = result .tx_results .iter() @@ -481,11 +482,15 @@ where // Finalize the block proposal: remove executed txs, return unexecuted in-flight txs. if let Some(ref mempool) = self.mempool { - let tx_hashes: Vec<[u8; 32]> = - block.transactions.iter().map(|tx| tx.hash().0).collect(); - if !tx_hashes.is_empty() { + let executed_hashes: Vec<[u8; 32]> = block + .transactions + .iter() + .take(executed_tx_count) + .map(|tx| tx.hash().0) + .collect(); + if !block.transactions.is_empty() { let mut pool = mempool.write().await; - pool.finalize(&tx_hashes); + pool.finalize(&executed_hashes); } } @@ -605,9 +610,12 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_consensus::{SignableTransaction, TxEip1559}; + use alloy_primitives::{Address, Bytes, PrimitiveSignature, TxKind, U256}; use evolve_core::{InvokeResponse, Message}; use evolve_mempool::shared_mempool_from; use evolve_stf::results::TxResult; + use k256::ecdsa::{signature::hazmat::PrehashSigner, SigningKey}; use std::sync::atomic::AtomicUsize; use std::sync::Mutex; use tonic::Code; @@ -651,6 +659,7 @@ mod tests { emit_genesis_change: bool, run_genesis_calls: AtomicUsize, execute_changes: Vec, + executed_tx_count: Option, execute_block_calls: AtomicUsize, last_executed_block: Mutex>, } @@ -661,10 +670,16 @@ mod tests { emit_genesis_change, run_genesis_calls: AtomicUsize::new(0), execute_changes, + executed_tx_count: None, execute_block_calls: AtomicUsize::new(0), last_executed_block: Mutex::new(None), } } + + fn with_executed_tx_count(mut self, executed_tx_count: usize) -> Self { + self.executed_tx_count = Some(executed_tx_count); + self + } } impl EvnodeStfExecutor for MockStf { @@ -699,9 +714,14 @@ mod tests { } } + let executed_tx_count = self + .executed_tx_count + .unwrap_or(block.transactions.len()) + .min(block.transactions.len()); let tx_results: Vec = block .transactions .iter() + .take(executed_tx_count) .map(|tx| TxResult { events: vec![], gas_used: tx.envelope().gas_limit(), @@ -716,7 +736,7 @@ mod tests { tx_results, end_block_events: vec![], gas_used, - txs_skipped: 0, + txs_skipped: block.transactions.len() - executed_tx_count, }, exec_state, ) @@ -798,6 +818,36 @@ mod tests { )) } + fn sign_hash(signing_key: &SigningKey, hash: B256) -> PrimitiveSignature { + let (sig, recovery_id): (k256::ecdsa::Signature, k256::ecdsa::RecoveryId) = signing_key + .sign_prehash(hash.as_ref()) + .expect("signing should succeed"); + let r = U256::from_be_slice(sig.r().to_bytes().as_slice()); + let s = U256::from_be_slice(sig.s().to_bytes().as_slice()); + PrimitiveSignature::new(r, s, recovery_id.is_y_odd()) + } + + fn sample_eip1559_tx_bytes(nonce: u64) -> Vec { + let signing_key = + SigningKey::from_bytes((&[7u8; 32]).into()).expect("fixed signing key should be valid"); + let tx = TxEip1559 { + chain_id: 1, + nonce, + max_priority_fee_per_gas: 1, + max_fee_per_gas: 1, + gas_limit: 21_000, + to: TxKind::Call(Address::repeat_byte(0x22)), + value: U256::ZERO, + input: Bytes::new(), + access_list: Default::default(), + }; + let signature = sign_hash(&signing_key, tx.signature_hash()); + let signed = tx.into_signed(signature); + let mut encoded = vec![0x02]; + signed.rlp_encode(&mut encoded); + encoded + } + #[tokio::test] async fn init_chain_rejects_zero_initial_height() { let service = mk_service(false); @@ -1189,4 +1239,64 @@ mod tests { "executed tx should be finalized out of mempool" ); } + + #[tokio::test] + async fn execute_txs_finalizes_only_executed_prefix_from_external_batch() { + let tx1 = TxContext::decode(&sample_eip1559_tx_bytes(0)).expect("tx1 should decode"); + let tx2 = TxContext::decode(&sample_eip1559_tx_bytes(1)).expect("tx2 should decode"); + + let mut pool = Mempool::::new(); + pool.add(tx1).expect("tx1 should be added to mempool"); + pool.add(tx2).expect("tx2 should be added to mempool"); + let mempool = shared_mempool_from(pool); + + let service = ExecutorServiceImpl::with_mempool( + MockStf::new(false, Vec::new()).with_executed_tx_count(1), + MockStorage, + MockCodes, + ExecutorServiceConfig::default(), + mempool, + ); + init_test_chain(&service).await; + + let proposed = service + .get_txs(Request::new(GetTxsRequest {})) + .await + .expect("get_txs should succeed") + .into_inner() + .txs; + assert_eq!(proposed.len(), 2, "both txs should be proposed"); + let expected_remaining_hash = TxContext::decode(&proposed[1]) + .expect("second proposed tx should decode") + .hash(); + + service + .execute_txs(Request::new(ExecuteTxsRequest { + block_height: 2, + timestamp: None, + prev_state_root: vec![], + txs: proposed, + })) + .await + .expect("execute_txs should succeed"); + + let after_finalize = service + .get_txs(Request::new(GetTxsRequest {})) + .await + .expect("second get_txs should succeed") + .into_inner() + .txs; + assert_eq!( + after_finalize.len(), + 1, + "unexecuted tx should be returned to the mempool" + ); + let remaining_hash = TxContext::decode(&after_finalize[0]) + .expect("remaining tx should decode") + .hash(); + assert_eq!( + remaining_hash, expected_remaining_hash, + "only the unexecuted tail should be re-proposed" + ); + } } From 9499e1eff12c56f5dd6072fda5903ba405f141a9 Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Fri, 6 Mar 2026 15:59:44 +0100 Subject: [PATCH 3/4] dry code --- bin/testapp/src/sim_testing.rs | 39 ++++----- crates/app/node/src/lib.rs | 13 +-- crates/app/server/src/dev.rs | 2 +- crates/app/server/src/lib.rs | 3 +- crates/rpc/chain-index/src/integration.rs | 97 ++++++++++------------- crates/rpc/evnode/src/runner.rs | 17 ++-- 6 files changed, 68 insertions(+), 103 deletions(-) diff --git a/bin/testapp/src/sim_testing.rs b/bin/testapp/src/sim_testing.rs index 1d182aa..7908dc3 100644 --- a/bin/testapp/src/sim_testing.rs +++ b/bin/testapp/src/sim_testing.rs @@ -481,6 +481,16 @@ impl SimTestApp { pool.finalize(executed_tx_hashes); } + /// Finalize the mempool after block production, keeping only unexecuted txs. + fn finalize_proposed_batch(&mut self, tx_hashes: &[[u8; 32]], result: &BlockResult) { + let executed: Vec<_> = tx_hashes + .iter() + .copied() + .take(result.tx_results.len()) + .collect(); + self.finalize_mempool_batch(&executed); + } + fn produce_block_internal( &mut self, height: u64, @@ -499,12 +509,7 @@ impl SimTestApp { let (tx_hashes, transactions) = self.propose_mempool_batch(max_txs); let height = self.sim.time().block_height(); let result = self.produce_block_internal(height, transactions, true); - let executed: Vec<_> = tx_hashes - .iter() - .copied() - .take(result.tx_results.len()) - .collect(); - self.finalize_mempool_batch(&executed); + self.finalize_proposed_batch(&tx_hashes, &result); result } @@ -632,12 +637,7 @@ impl SimTestApp { } let (tx_hashes, transactions) = app.propose_mempool_batch(max_txs); let result = app.produce_block_internal(height, transactions, false); - let executed: Vec<_> = tx_hashes - .iter() - .copied() - .take(result.tx_results.len()) - .collect(); - app.finalize_mempool_batch(&executed); + app.finalize_proposed_batch(&tx_hashes, &result); result }, |app| app.sim.advance_block(), @@ -661,12 +661,7 @@ impl SimTestApp { } let (tx_hashes, transactions) = app.propose_mempool_batch(max_txs); let result = app.produce_block_internal(height, transactions, false); - let executed: Vec<_> = tx_hashes - .iter() - .copied() - .take(result.tx_results.len()) - .collect(); - app.finalize_mempool_batch(&executed); + app.finalize_proposed_batch(&tx_hashes, &result); result }, |app| app.sim.advance_block(), @@ -700,13 +695,7 @@ impl SimTestApp { let (tx_hashes, transactions) = app.propose_mempool_batch(max_txs); let block = Block::for_testing(height, transactions); let result = app.apply_block_with_trace(&block, &mut builder); - - let executed: Vec<_> = tx_hashes - .iter() - .copied() - .take(result.tx_results.len()) - .collect(); - app.finalize_mempool_batch(&executed); + app.finalize_proposed_batch(&tx_hashes, &result); result }, |app| app.sim.advance_block(), diff --git a/crates/app/node/src/lib.rs b/crates/app/node/src/lib.rs index afb00c3..d565d36 100644 --- a/crates/app/node/src/lib.rs +++ b/crates/app/node/src/lib.rs @@ -30,7 +30,8 @@ use evolve_grpc::{GrpcServer, GrpcServerConfig}; use evolve_mempool::{new_shared_mempool, Mempool, MempoolTx, SharedMempool}; use evolve_rpc_types::SyncStatus; use evolve_server::{ - load_chain_state, save_chain_state, ChainState, DevConfig, DevConsensus, CHAIN_STATE_KEY, + load_chain_state, save_chain_state, state_changes_to_operations, ChainState, DevConfig, + DevConsensus, CHAIN_STATE_KEY, }; use evolve_server::{OnBlockArchive, StfExecutor}; use evolve_stf_traits::{AccountsCodeStorage, StateChange, Transaction}; @@ -1358,16 +1359,6 @@ async fn commit_genesis( Ok(()) } -fn state_changes_to_operations(changes: Vec) -> Vec { - changes - .into_iter() - .map(|change| match change { - StateChange::Set { key, value } => Operation::Set { key, value }, - StateChange::Remove { key } => Operation::Remove { key }, - }) - .collect() -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/app/server/src/dev.rs b/crates/app/server/src/dev.rs index 5f79c8a..5b661af 100644 --- a/crates/app/server/src/dev.rs +++ b/crates/app/server/src/dev.rs @@ -855,7 +855,7 @@ fn compute_block_hash(height: u64, timestamp: u64, parent_hash: B256) -> B256 { keccak256(&data) } -fn state_changes_to_operations(changes: Vec) -> Vec { +pub fn state_changes_to_operations(changes: Vec) -> Vec { changes .into_iter() .map(|change| match change { diff --git a/crates/app/server/src/lib.rs b/crates/app/server/src/lib.rs index 3956747..f313725 100644 --- a/crates/app/server/src/lib.rs +++ b/crates/app/server/src/lib.rs @@ -29,7 +29,8 @@ mod persistence; pub use block::{ArchivedBlock, Block, BlockBuilder, BlockHeader}; pub use dev::{ - DevConfig, DevConsensus, NoopChainIndex, OnBlockArchive, ProducedBlock, StfExecutor, + state_changes_to_operations, DevConfig, DevConsensus, NoopChainIndex, OnBlockArchive, + ProducedBlock, StfExecutor, }; pub use error::ServerError; pub use evolve_mempool::{ diff --git a/crates/rpc/chain-index/src/integration.rs b/crates/rpc/chain-index/src/integration.rs index 3e4a282..df84cb1 100644 --- a/crates/rpc/chain-index/src/integration.rs +++ b/crates/rpc/chain-index/src/integration.rs @@ -194,8 +194,8 @@ where for (idx, (tx, tx_result)) in txs.iter().zip(result.tx_results.iter()).enumerate() { let tx_hash = B256::from(tx.compute_identifier()); cumulative_gas += tx_result.gas_used; + let eth_fields = ethereum_tx_fields(tx); - // Build stored transaction let stored_tx = build_stored_transaction( tx, tx_hash, @@ -203,10 +203,10 @@ where block_hash, idx as u32, metadata.chain_id, + ð_fields, ); stored_txs.push(stored_tx); - // Build stored receipt let stored_receipt = build_stored_receipt( tx, tx_result, @@ -215,6 +215,7 @@ where block_hash, idx as u32, cumulative_gas, + ð_fields, ); stored_receipts.push(stored_receipt); } @@ -243,33 +244,44 @@ where } /// Build a StoredTransaction from an STF Transaction. -fn build_stored_transaction( +fn build_stored_transaction( tx: &Tx, tx_hash: B256, block_number: u64, block_hash: B256, transaction_index: u32, chain_id: u64, + eth_fields: &Option, ) -> StoredTransaction { let from = resolve_sender_address(tx); let to = resolve_recipient_address(tx); - let eth_fields = ethereum_tx_fields(tx); - - // Extract value from funds (sum of all fungible assets as a simple approach) - let value = eth_fields - .as_ref() - .map(|fields| fields.value) - .unwrap_or_else(|| { - tx.funds() - .iter() - .fold(U256::ZERO, |acc, fa| acc + U256::from(fa.amount)) - }); - - // Preserve the original Ethereum calldata when available. - let input = eth_fields - .as_ref() - .map(|fields| fields.input.clone()) - .unwrap_or_else(|| Bytes::from(borsh::to_vec(tx.request()).unwrap_or_default())); + + if let Some(f) = eth_fields { + return StoredTransaction { + hash: tx_hash, + block_number, + block_hash, + transaction_index, + from, + to, + value: f.value, + gas: tx.gas_limit(), + gas_price: f.gas_price, + input: f.input.clone(), + nonce: f.nonce, + v: f.v, + r: f.r, + s: f.s, + tx_type: f.tx_type, + chain_id: f.chain_id, + }; + } + + let value = tx + .funds() + .iter() + .fold(U256::ZERO, |acc, fa| acc + U256::from(fa.amount)); + let input = Bytes::from(borsh::to_vec(tx.request()).unwrap_or_default()); StoredTransaction { hash: tx_hash, @@ -280,34 +292,20 @@ fn build_stored_transaction( to, value, gas: tx.gas_limit(), - gas_price: eth_fields - .as_ref() - .map(|fields| fields.gas_price) - .unwrap_or(U256::ZERO), + gas_price: U256::ZERO, input, - nonce: eth_fields.as_ref().map(|fields| fields.nonce).unwrap_or(0), - v: eth_fields.as_ref().map(|fields| fields.v).unwrap_or(0), - r: eth_fields - .as_ref() - .map(|fields| fields.r) - .unwrap_or(U256::ZERO), - s: eth_fields - .as_ref() - .map(|fields| fields.s) - .unwrap_or(U256::ZERO), - tx_type: eth_fields - .as_ref() - .map(|fields| fields.tx_type) - .unwrap_or(0), - chain_id: match eth_fields.as_ref() { - Some(fields) => fields.chain_id, - None => Some(chain_id), - }, + nonce: 0, + v: 0, + r: U256::ZERO, + s: U256::ZERO, + tx_type: 0, + chain_id: Some(chain_id), } } /// Build a StoredReceipt from an STF TxResult. -fn build_stored_receipt( +#[allow(clippy::too_many_arguments)] +fn build_stored_receipt( tx: &Tx, tx_result: &TxResult, tx_hash: B256, @@ -315,15 +313,11 @@ fn build_stored_receipt( block_hash: B256, transaction_index: u32, cumulative_gas_used: u64, + eth_fields: &Option, ) -> StoredReceipt { let from = resolve_sender_address(tx); let to = resolve_recipient_address(tx); - let eth_fields = ethereum_tx_fields(tx); - - // Convert events to logs let logs: Vec = tx_result.events.iter().map(event_to_stored_log).collect(); - - // Determine status (1 = success, 0 = failure) let status = if tx_result.response.is_ok() { 1 } else { 0 }; StoredReceipt { @@ -337,15 +331,12 @@ fn build_stored_receipt( gas_used: tx_result.gas_used, effective_gas_price: eth_fields .as_ref() - .map(|fields| fields.gas_price) + .map(|f| f.gas_price) .unwrap_or(U256::ZERO), contract_address: None, // TODO: Detect contract creation logs, status, - tx_type: eth_fields - .as_ref() - .map(|fields| fields.tx_type) - .unwrap_or(0), + tx_type: eth_fields.as_ref().map(|f| f.tx_type).unwrap_or(0), } } diff --git a/crates/rpc/evnode/src/runner.rs b/crates/rpc/evnode/src/runner.rs index 22c774e..c24101e 100644 --- a/crates/rpc/evnode/src/runner.rs +++ b/crates/rpc/evnode/src/runner.rs @@ -25,8 +25,11 @@ use evolve_eth_jsonrpc::{start_server_with_subscriptions, RpcServerConfig, Subsc use evolve_mempool::{new_shared_mempool, Mempool, SharedMempool}; use evolve_node::{GenesisOutput, HasTokenAccountId, NodeConfig}; use evolve_rpc_types::SyncStatus; -use evolve_server::{load_chain_state, save_chain_state, BlockBuilder, ChainState, StfExecutor}; -use evolve_stf_traits::{AccountsCodeStorage, StateChange}; +use evolve_server::{ + load_chain_state, save_chain_state, state_changes_to_operations, BlockBuilder, ChainState, + StfExecutor, +}; +use evolve_stf_traits::AccountsCodeStorage; use evolve_storage::{Operation, Storage, StorageConfig}; use evolve_tx_eth::TxContext; @@ -349,16 +352,6 @@ where })) } -fn state_changes_to_operations(changes: Vec) -> Vec { - changes - .into_iter() - .map(|change| match change { - StateChange::Set { key, value } => Operation::Set { key, value }, - StateChange::Remove { key } => Operation::Remove { key }, - }) - .collect() -} - /// Run an external-consensus execution node for ETH transactions. /// /// The caller supplies application-specific STF/genesis/storage builders while From a8c230572a887836f2855282efc10f03842feb10 Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Fri, 6 Mar 2026 16:23:03 +0100 Subject: [PATCH 4/4] dedup --- bin/testapp/Cargo.toml | 2 +- bin/testapp/src/lib.rs | 2 +- bin/testapp/src/main.rs | 5 +++-- bin/testapp/src/sim_testing.rs | 20 ++++++-------------- bin/testapp/tests/mempool_e2e.rs | 13 +++---------- bin/txload/Cargo.toml | 2 +- bin/txload/src/main.rs | 14 +++----------- crates/app/node/Cargo.toml | 1 + crates/app/node/src/lib.rs | 18 ++++++------------ crates/app/server/src/dev.rs | 2 +- crates/app/server/src/lib.rs | 4 ++-- crates/app/tx/eth/Cargo.toml | 4 ++++ crates/app/tx/eth/src/eoa_registry.rs | 3 ++- crates/app/tx/eth/src/ethereum/recovery.rs | 9 ++------- crates/app/tx/eth/src/lib.rs | 7 ++++++- crates/app/tx/eth/src/mempool.rs | 12 +++--------- crates/app/tx/eth/src/test_utils.rs | 14 ++++++++++++++ crates/app/tx/eth/tests/integration_tests.rs | 13 +++---------- crates/app/tx/eth/tests/proptest_tests.rs | 13 +++---------- crates/rpc/chain-index/src/lib.rs | 4 +++- crates/rpc/chain-index/src/provider.rs | 3 +++ crates/rpc/evnode/Cargo.toml | 1 + crates/rpc/evnode/src/runner.rs | 19 +++++-------------- crates/rpc/evnode/src/service.rs | 13 +++---------- 24 files changed, 80 insertions(+), 118 deletions(-) create mode 100644 crates/app/tx/eth/src/test_utils.rs diff --git a/bin/testapp/Cargo.toml b/bin/testapp/Cargo.toml index 50c97f8..d31bf2f 100644 --- a/bin/testapp/Cargo.toml +++ b/bin/testapp/Cargo.toml @@ -22,7 +22,7 @@ evolve_server.workspace = true evolve_storage.workspace = true evolve_node.workspace = true evolve_mempool.workspace = true -evolve_tx_eth.workspace = true +evolve_tx_eth = { workspace = true, features = ["testing"] } evolve_grpc.workspace = true evolve_chain_index.workspace = true evolve_eth_jsonrpc.workspace = true diff --git a/bin/testapp/src/lib.rs b/bin/testapp/src/lib.rs index f4c9943..3fdf229 100644 --- a/bin/testapp/src/lib.rs +++ b/bin/testapp/src/lib.rs @@ -438,7 +438,7 @@ mod tests { return None; } let code_id = Message::from_bytes(value.clone()).get::().ok()?; - if code_id != "EthEoaAccount" { + if code_id != evolve_tx_eth::ETH_EOA_CODE_ID { return None; } let account_bytes: [u8; 32] = key[1..33].try_into().ok()?; diff --git a/bin/testapp/src/main.rs b/bin/testapp/src/main.rs index 925dc2e..1ff196d 100644 --- a/bin/testapp/src/main.rs +++ b/bin/testapp/src/main.rs @@ -290,6 +290,7 @@ mod tests { use evolve_core::Message; use evolve_storage::MockStorage; use evolve_testapp::genesis_config::{AccountConfig, TokenConfig}; + use evolve_tx_eth::ETH_EOA_CODE_ID; use std::collections::BTreeMap; fn apply_changes_to_map( @@ -375,7 +376,7 @@ mod tests { return None; } let code_id = Message::from_bytes((*value).clone()).get::().ok()?; - if code_id != "EthEoaAccount" { + if code_id != ETH_EOA_CODE_ID { return None; } let id_bytes: [u8; 32] = key[1..33].try_into().ok()?; @@ -391,6 +392,6 @@ mod tests { assert_ne!(output.genesis_result.alice, output.genesis_result.atom); assert_ne!(output.genesis_result.bob, output.genesis_result.atom); - assert_eq!(count_registered_code_id(&state, "EthEoaAccount"), 1); + assert_eq!(count_registered_code_id(&state, ETH_EOA_CODE_ID), 1); } } diff --git a/bin/testapp/src/sim_testing.rs b/bin/testapp/src/sim_testing.rs index 7908dc3..b65ba00 100644 --- a/bin/testapp/src/sim_testing.rs +++ b/bin/testapp/src/sim_testing.rs @@ -3,7 +3,7 @@ use crate::{ PLACEHOLDER_ACCOUNT, }; use alloy_consensus::{SignableTransaction, TxEip1559}; -use alloy_primitives::{Bytes, PrimitiveSignature, TxKind, U256}; +use alloy_primitives::{Bytes, TxKind, U256}; use evolve_core::{ encoding::Decodable, AccountId, Environment, FungibleAsset, InvokableMessage, ReadonlyKV, SdkResult, @@ -23,9 +23,10 @@ use evolve_testing::server_mocks::AccountStorageMock; use evolve_token::account::TokenRef; use evolve_tx_eth::{ derive_eth_eoa_account_id, derive_runtime_contract_address, register_runtime_contract_account, + sign_hash, ETH_EOA_CODE_ID, }; use evolve_tx_eth::{EthGateway, TxContext}; -use k256::ecdsa::{signature::hazmat::PrehashSigner, SigningKey, VerifyingKey}; +use k256::ecdsa::{SigningKey, VerifyingKey}; use std::collections::BTreeMap; use tiny_keccak::{Hasher, Keccak}; @@ -66,15 +67,6 @@ fn get_address(signing_key: &SigningKey) -> alloy_primitives::Address { alloy_primitives::Address::from_slice(&hash[12..]) } -fn sign_hash(signing_key: &SigningKey, hash: alloy_primitives::B256) -> PrimitiveSignature { - let (sig, recovery_id): (k256::ecdsa::Signature, k256::ecdsa::RecoveryId) = - signing_key.sign_prehash(hash.as_ref()).unwrap(); - let r = U256::from_be_slice(sig.r().to_bytes().as_slice()); - let s = U256::from_be_slice(sig.s().to_bytes().as_slice()); - let v = recovery_id.is_y_odd(); - PrimitiveSignature::new(r, s, v) -} - fn create_signed_tx( signing_key: &SigningKey, chain_id: u64, @@ -337,9 +329,9 @@ impl SimTestApp { let alice_id = derive_eth_eoa_account_id(alice_address); let bob_id = derive_eth_eoa_account_id(bob_address); - register_account_code_identifier(&mut sim, alice_id, "EthEoaAccount") + register_account_code_identifier(&mut sim, alice_id, ETH_EOA_CODE_ID) .expect("register alice code"); - register_account_code_identifier(&mut sim, bob_id, "EthEoaAccount") + register_account_code_identifier(&mut sim, bob_id, ETH_EOA_CODE_ID) .expect("register bob code"); init_eth_eoa_storage(&mut sim, alice_id, alice_address.into()).expect("init alice eoa"); init_eth_eoa_storage(&mut sim, bob_id, bob_address.into()).expect("init bob eoa"); @@ -736,7 +728,7 @@ impl SimTestApp { /// Create an EOA with a specific Ethereum address. pub fn create_eoa_with_address(&mut self, eth_address: [u8; 20]) -> AccountId { let account_id = derive_eth_eoa_account_id(alloy_primitives::Address::from(eth_address)); - register_account_code_identifier(&mut self.sim, account_id, "EthEoaAccount") + register_account_code_identifier(&mut self.sim, account_id, ETH_EOA_CODE_ID) .expect("register eoa code"); init_eth_eoa_storage(&mut self.sim, account_id, eth_address).expect("init eoa storage"); account_id diff --git a/bin/testapp/tests/mempool_e2e.rs b/bin/testapp/tests/mempool_e2e.rs index 4d1d37e..99458a5 100644 --- a/bin/testapp/tests/mempool_e2e.rs +++ b/bin/testapp/tests/mempool_e2e.rs @@ -8,7 +8,7 @@ //! 5. Token balances are updated correctly use alloy_consensus::{SignableTransaction, TxEip1559}; -use alloy_primitives::{Address, Bytes, PrimitiveSignature, TxKind, B256, U256}; +use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use async_trait::async_trait; use evolve_core::{AccountId, ErrorCode, ReadonlyKV}; use evolve_node::{build_dev_node_with_mempool, DevNodeMempoolHandles}; @@ -21,7 +21,7 @@ use evolve_testapp::{ }; use evolve_testing::server_mocks::AccountStorageMock; use evolve_tx_eth::{derive_runtime_contract_address, EthGateway, TxContext}; -use k256::ecdsa::{signature::hazmat::PrehashSigner, SigningKey, VerifyingKey}; +use k256::ecdsa::{SigningKey, VerifyingKey}; use std::collections::BTreeMap; use std::sync::RwLock; use tiny_keccak::{Hasher, Keccak}; @@ -168,14 +168,7 @@ fn compute_selector(fn_name: &str) -> [u8; 4] { [hash[0], hash[1], hash[2], hash[3]] } -/// Sign a transaction hash and create an alloy signature. -fn sign_hash(signing_key: &SigningKey, hash: alloy_primitives::B256) -> PrimitiveSignature { - let (sig, recovery_id) = signing_key.sign_prehash(hash.as_ref()).unwrap(); - let r = U256::from_be_slice(&sig.r().to_bytes()); - let s = U256::from_be_slice(&sig.s().to_bytes()); - let v = recovery_id.is_y_odd(); - PrimitiveSignature::new(r, s, v) -} +use evolve_tx_eth::sign_hash; /// Get Ethereum address from signing key. fn get_address(signing_key: &SigningKey) -> alloy_primitives::Address { diff --git a/bin/txload/Cargo.toml b/bin/txload/Cargo.toml index 69c7f22..c90412c 100644 --- a/bin/txload/Cargo.toml +++ b/bin/txload/Cargo.toml @@ -13,7 +13,7 @@ path = "src/main.rs" [dependencies] evolve_core.workspace = true -evolve_tx_eth.workspace = true +evolve_tx_eth = { workspace = true, features = ["testing"] } alloy-primitives.workspace = true alloy-consensus = { workspace = true, features = ["k256"] } diff --git a/bin/txload/src/main.rs b/bin/txload/src/main.rs index 89d10b8..096e82f 100644 --- a/bin/txload/src/main.rs +++ b/bin/txload/src/main.rs @@ -5,11 +5,11 @@ use std::sync::Arc; use std::time::Duration; use alloy_consensus::{SignableTransaction, TxEip1559}; -use alloy_primitives::{keccak256, Address, Bytes, PrimitiveSignature, TxKind, B256, U256}; +use alloy_primitives::{keccak256, Address, Bytes, TxKind, U256}; use clap::Parser; use evolve_core::AccountId; use evolve_tx_eth::derive_eth_eoa_account_id; -use k256::ecdsa::{signature::hazmat::PrehashSigner, SigningKey, VerifyingKey}; +use k256::ecdsa::{SigningKey, VerifyingKey}; use rand::RngCore; use serde_json::{json, Value}; use tokio::time::Instant; @@ -265,15 +265,7 @@ fn wallet_address(signing_key: &SigningKey) -> Address { Address::from_slice(&hash.as_slice()[12..]) } -fn sign_hash(signing_key: &SigningKey, hash: B256) -> PrimitiveSignature { - let (sig, recovery_id): (k256::ecdsa::Signature, k256::ecdsa::RecoveryId) = signing_key - .sign_prehash(hash.as_ref()) - .expect("signing failed"); - let r = U256::from_be_slice(sig.r().to_bytes().as_slice()); - let s = U256::from_be_slice(sig.s().to_bytes().as_slice()); - let v = recovery_id.is_y_odd(); - PrimitiveSignature::new(r, s, v) -} +use evolve_tx_eth::sign_hash; fn create_signed_tx( signing_key: &SigningKey, diff --git a/crates/app/node/Cargo.toml b/crates/app/node/Cargo.toml index c43d213..afc3f8b 100644 --- a/crates/app/node/Cargo.toml +++ b/crates/app/node/Cargo.toml @@ -39,5 +39,6 @@ workspace = true tempfile = "3.8" alloy-consensus = { workspace = true } evolve_stf = { workspace = true } +evolve_tx_eth = { workspace = true, features = ["testing"] } k256 = { version = "0.13", features = ["ecdsa"] } rand = "0.8" diff --git a/crates/app/node/src/lib.rs b/crates/app/node/src/lib.rs index d565d36..945ad99 100644 --- a/crates/app/node/src/lib.rs +++ b/crates/app/node/src/lib.rs @@ -21,7 +21,7 @@ use commonware_runtime::tokio::{Config as TokioConfig, Context as TokioContext, use commonware_runtime::{Runner as RunnerTrait, Spawner}; use evolve_chain_index::{ ChainStateProvider, ChainStateProviderConfig, PersistentChainIndex, StateQuerier, - StorageStateQuerier, + StorageStateQuerier, DEFAULT_PROTOCOL_VERSION, }; use evolve_core::encoding::Encodable; use evolve_core::{AccountId, ReadonlyKV}; @@ -486,7 +486,7 @@ pub fn run_dev_node_with_rpc< let codes_for_rpc = Arc::new(build_codes()); let state_provider_config = ChainStateProviderConfig { chain_id: rpc_config.chain_id, - protocol_version: "0x1".to_string(), + protocol_version: DEFAULT_PROTOCOL_VERSION.to_string(), gas_price: U256::ZERO, sync_status: SyncStatus::NotSyncing(false), }; @@ -1024,7 +1024,7 @@ fn run_dev_node_with_rpc_and_mempool_eth_impl< let codes_for_rpc = Arc::new(build_codes()); let state_provider_config = ChainStateProviderConfig { chain_id: rpc_config.chain_id, - protocol_version: "0x1".to_string(), + protocol_version: DEFAULT_PROTOCOL_VERSION.to_string(), gas_price: U256::ZERO, sync_status: SyncStatus::NotSyncing(false), }; @@ -1363,7 +1363,7 @@ async fn commit_genesis( mod tests { use super::*; use alloy_consensus::{SignableTransaction, TxLegacy}; - use alloy_primitives::{Address, Bytes, PrimitiveSignature, U256, U64}; + use alloy_primitives::{Address, Bytes, U256, U64}; use evolve_chain_index::NoopAccountCodes; use evolve_eth_jsonrpc::StateProvider; use evolve_mempool::MempoolTx; @@ -1373,7 +1373,7 @@ mod tests { use evolve_stf::execution_state::ExecutionState; use evolve_stf::results::{BlockResult, TxResult}; use evolve_stf_traits::{AccountsCodeStorage, Block as _}; - use k256::ecdsa::{signature::hazmat::PrehashSigner, SigningKey}; + use k256::ecdsa::SigningKey; use rand::rngs::OsRng; use tokio::time::{sleep, timeout, Duration}; @@ -1427,13 +1427,7 @@ mod tests { } } - fn sign_hash(signing_key: &SigningKey, hash: alloy_primitives::B256) -> PrimitiveSignature { - let (sig, recovery_id) = signing_key.sign_prehash(hash.as_ref()).unwrap(); - let r = U256::from_be_slice(&sig.r().to_bytes()); - let s = U256::from_be_slice(&sig.s().to_bytes()); - let v = recovery_id.is_y_odd(); - PrimitiveSignature::new(r, s, v) - } + use evolve_tx_eth::sign_hash; fn make_signed_legacy_tx(chain_id: u64) -> Vec { make_signed_legacy_tx_with_gas(chain_id, 21_000) diff --git a/crates/app/server/src/dev.rs b/crates/app/server/src/dev.rs index 5b661af..0fe7670 100644 --- a/crates/app/server/src/dev.rs +++ b/crates/app/server/src/dev.rs @@ -844,7 +844,7 @@ fn current_timestamp() -> u64 { /// /// In production, this would be a proper Merkle root or similar. /// For dev mode, we use a simple hash of height + timestamp + parent. -fn compute_block_hash(height: u64, timestamp: u64, parent_hash: B256) -> B256 { +pub fn compute_block_hash(height: u64, timestamp: u64, parent_hash: B256) -> B256 { use alloy_primitives::keccak256; let mut data = Vec::with_capacity(48); diff --git a/crates/app/server/src/lib.rs b/crates/app/server/src/lib.rs index f313725..551b6cd 100644 --- a/crates/app/server/src/lib.rs +++ b/crates/app/server/src/lib.rs @@ -29,8 +29,8 @@ mod persistence; pub use block::{ArchivedBlock, Block, BlockBuilder, BlockHeader}; pub use dev::{ - state_changes_to_operations, DevConfig, DevConsensus, NoopChainIndex, OnBlockArchive, - ProducedBlock, StfExecutor, + compute_block_hash, state_changes_to_operations, DevConfig, DevConsensus, NoopChainIndex, + OnBlockArchive, ProducedBlock, StfExecutor, }; pub use error::ServerError; pub use evolve_mempool::{ diff --git a/crates/app/tx/eth/Cargo.toml b/crates/app/tx/eth/Cargo.toml index 4a3927a..b142890 100644 --- a/crates/app/tx/eth/Cargo.toml +++ b/crates/app/tx/eth/Cargo.toml @@ -10,6 +10,7 @@ description = "Ethereum-compatible transaction types for Evolve" [features] default = [] error-decode = ["linkme", "evolve_core/error-decode"] +testing = ["dep:k256"] [dependencies] evolve_core = { workspace = true } @@ -34,6 +35,9 @@ rayon = "1.10" sha2 = { workspace = true } sha3 = { workspace = true } +# Testing utilities (optional) +k256 = { version = "0.13", features = ["ecdsa"], optional = true } + [dev-dependencies] # For testing with real signatures k256 = { version = "0.13", features = ["ecdsa", "arithmetic"] } diff --git a/crates/app/tx/eth/src/eoa_registry.rs b/crates/app/tx/eth/src/eoa_registry.rs index 0115cad..29c3913 100644 --- a/crates/app/tx/eth/src/eoa_registry.rs +++ b/crates/app/tx/eth/src/eoa_registry.rs @@ -13,7 +13,8 @@ const EOA_ADDR_TO_ID_PREFIX: &[u8] = b"registry/eoa/eth/a2i/"; const EOA_ID_TO_ADDR_PREFIX: &[u8] = b"registry/eoa/eth/i2a/"; const CONTRACT_ADDR_TO_ID_PREFIX: &[u8] = b"registry/contract/runtime/a2i/"; const CONTRACT_ID_TO_ADDR_PREFIX: &[u8] = b"registry/contract/runtime/i2a/"; -const ETH_EOA_CODE_ID: &str = "EthEoaAccount"; +/// Code identifier for Ethereum externally owned accounts. +pub const ETH_EOA_CODE_ID: &str = "EthEoaAccount"; fn addr_to_id_key(address: Address) -> Vec { let mut key = Vec::with_capacity(EOA_ADDR_TO_ID_PREFIX.len() + 20); diff --git a/crates/app/tx/eth/src/ethereum/recovery.rs b/crates/app/tx/eth/src/ethereum/recovery.rs index 339110f..35a3f59 100644 --- a/crates/app/tx/eth/src/ethereum/recovery.rs +++ b/crates/app/tx/eth/src/ethereum/recovery.rs @@ -39,15 +39,10 @@ fn secp() -> &'static Secp256k1 { mod tests { use super::*; use alloy_primitives::U256; - use k256::ecdsa::{signature::hazmat::PrehashSigner, SigningKey, VerifyingKey}; + use k256::ecdsa::{SigningKey, VerifyingKey}; use rand::rngs::OsRng; - fn sign_hash(signing_key: &SigningKey, hash: B256) -> alloy_primitives::PrimitiveSignature { - let (sig, recovery_id) = signing_key.sign_prehash(hash.as_ref()).expect("sign"); - let r = U256::from_be_slice(&sig.r().to_bytes()); - let s = U256::from_be_slice(&sig.s().to_bytes()); - alloy_primitives::PrimitiveSignature::new(r, s, recovery_id.is_y_odd()) - } + use crate::sign_hash; fn get_address(signing_key: &SigningKey) -> Address { let verifying_key = VerifyingKey::from(signing_key); diff --git a/crates/app/tx/eth/src/lib.rs b/crates/app/tx/eth/src/lib.rs index af88ccc..6c995e5 100644 --- a/crates/app/tx/eth/src/lib.rs +++ b/crates/app/tx/eth/src/lib.rs @@ -43,6 +43,11 @@ pub mod mempool; pub mod traits; pub mod verifier; +#[cfg(any(test, feature = "testing"))] +mod test_utils; +#[cfg(any(test, feature = "testing"))] +pub use test_utils::sign_hash; + // Re-export main types pub use decoder::TypedTxDecoder; pub use envelope::{tx_type, TxEnvelope}; @@ -50,7 +55,7 @@ pub use eoa_registry::{ lookup_account_id_in_env, lookup_account_id_in_storage, lookup_address_in_env, lookup_address_in_storage, lookup_contract_account_id_in_env, lookup_contract_account_id_in_storage, register_runtime_contract_account, - resolve_or_create_eoa_account, + resolve_or_create_eoa_account, ETH_EOA_CODE_ID, }; pub use error::*; pub use ethereum::{SignedEip1559Tx, SignedLegacyTx}; diff --git a/crates/app/tx/eth/src/mempool.rs b/crates/app/tx/eth/src/mempool.rs index 77290b6..43ff056 100644 --- a/crates/app/tx/eth/src/mempool.rs +++ b/crates/app/tx/eth/src/mempool.rs @@ -212,7 +212,7 @@ mod tests { use crate::eoa_registry::{lookup_account_id_in_env, register_runtime_contract_account}; use crate::traits::{derive_eth_eoa_account_id, derive_runtime_contract_account_id}; use alloy_consensus::{SignableTransaction, TxLegacy}; - use alloy_primitives::{Bytes, PrimitiveSignature, TxKind, U256}; + use alloy_primitives::{Bytes, TxKind, U256}; use evolve_core::runtime_api::{ RegisterAccountAtIdRequest, RegisterAccountAtIdResponse, ACCOUNT_IDENTIFIER_PREFIX, RUNTIME_ACCOUNT_ID, @@ -225,7 +225,7 @@ mod tests { BlockContext, EnvironmentQuery, FungibleAsset, InvokableMessage, InvokeResponse, ERR_UNKNOWN_FUNCTION, }; - use k256::ecdsa::{signature::hazmat::PrehashSigner, SigningKey}; + use k256::ecdsa::SigningKey; use rand::rngs::OsRng; use std::collections::BTreeMap; @@ -322,13 +322,7 @@ mod tests { } } - fn sign_hash(signing_key: &SigningKey, hash: B256) -> PrimitiveSignature { - let (sig, recovery_id) = signing_key.sign_prehash(hash.as_ref()).unwrap(); - let r = U256::from_be_slice(&sig.r().to_bytes()); - let s = U256::from_be_slice(&sig.s().to_bytes()); - let v = recovery_id.is_y_odd(); - PrimitiveSignature::new(r, s, v) - } + use crate::sign_hash; fn build_tx_context(to: Address) -> TxContext { let signing_key = SigningKey::random(&mut OsRng); diff --git a/crates/app/tx/eth/src/test_utils.rs b/crates/app/tx/eth/src/test_utils.rs new file mode 100644 index 0000000..674d266 --- /dev/null +++ b/crates/app/tx/eth/src/test_utils.rs @@ -0,0 +1,14 @@ +//! Shared test utilities for Ethereum transaction signing. + +use alloy_primitives::{PrimitiveSignature, B256, U256}; +use k256::ecdsa::{signature::hazmat::PrehashSigner, SigningKey}; + +/// Sign a hash using a k256 signing key and return an alloy PrimitiveSignature. +pub fn sign_hash(signing_key: &SigningKey, hash: B256) -> PrimitiveSignature { + let (sig, recovery_id): (k256::ecdsa::Signature, k256::ecdsa::RecoveryId) = signing_key + .sign_prehash(hash.as_ref()) + .expect("signing should succeed"); + let r = U256::from_be_slice(sig.r().to_bytes().as_slice()); + let s = U256::from_be_slice(sig.s().to_bytes().as_slice()); + PrimitiveSignature::new(r, s, recovery_id.is_y_odd()) +} diff --git a/crates/app/tx/eth/tests/integration_tests.rs b/crates/app/tx/eth/tests/integration_tests.rs index c390e61..6489d34 100644 --- a/crates/app/tx/eth/tests/integration_tests.rs +++ b/crates/app/tx/eth/tests/integration_tests.rs @@ -3,22 +3,15 @@ //! These tests use real Ethereum transaction data to verify correct behavior. use alloy_consensus::{SignableTransaction, TxEip1559, TxLegacy}; -use alloy_primitives::{Address, Bytes, PrimitiveSignature, B256, U256}; +use alloy_primitives::{Address, Bytes, B256, U256}; use evolve_stf_traits::TxDecoder; use evolve_tx_eth::{ tx_type, EcdsaVerifier, SignatureVerifierRegistry, TxEnvelope, TypedTransaction, TypedTxDecoder, }; -use k256::ecdsa::{signature::hazmat::PrehashSigner, SigningKey, VerifyingKey}; +use k256::ecdsa::{SigningKey, VerifyingKey}; use rand::rngs::OsRng; -/// Helper to sign a transaction hash and create an alloy signature -fn sign_hash(signing_key: &SigningKey, hash: B256) -> PrimitiveSignature { - let (sig, recovery_id) = signing_key.sign_prehash(hash.as_ref()).unwrap(); - let r = U256::from_be_slice(&sig.r().to_bytes()); - let s = U256::from_be_slice(&sig.s().to_bytes()); - let v = recovery_id.is_y_odd(); - PrimitiveSignature::new(r, s, v) -} +use evolve_tx_eth::sign_hash; /// Get address from signing key (Ethereum address derivation) fn get_address(signing_key: &SigningKey) -> Address { diff --git a/crates/app/tx/eth/tests/proptest_tests.rs b/crates/app/tx/eth/tests/proptest_tests.rs index 8487234..21b4712 100644 --- a/crates/app/tx/eth/tests/proptest_tests.rs +++ b/crates/app/tx/eth/tests/proptest_tests.rs @@ -1,20 +1,13 @@ //! Property-based tests for transaction encoding/decoding roundtrips. use alloy_consensus::{SignableTransaction, TxEip1559, TxLegacy}; -use alloy_primitives::{Address, Bytes, PrimitiveSignature, B256, U256}; +use alloy_primitives::{Address, Bytes, U256}; use evolve_tx_eth::{tx_type, TxEnvelope, TypedTransaction}; -use k256::ecdsa::{signature::hazmat::PrehashSigner, SigningKey, VerifyingKey}; +use k256::ecdsa::{SigningKey, VerifyingKey}; use proptest::prelude::*; use rand::rngs::OsRng; -/// Helper to sign a transaction hash and create an alloy signature -fn sign_hash(signing_key: &SigningKey, hash: B256) -> PrimitiveSignature { - let (sig, recovery_id) = signing_key.sign_prehash(hash.as_ref()).unwrap(); - let r = U256::from_be_slice(&sig.r().to_bytes()); - let s = U256::from_be_slice(&sig.s().to_bytes()); - let v = recovery_id.is_y_odd(); - PrimitiveSignature::new(r, s, v) -} +use evolve_tx_eth::sign_hash; /// Get address from signing key fn get_address(signing_key: &SigningKey) -> Address { diff --git a/crates/rpc/chain-index/src/lib.rs b/crates/rpc/chain-index/src/lib.rs index 789ba89..c49effd 100644 --- a/crates/rpc/chain-index/src/lib.rs +++ b/crates/rpc/chain-index/src/lib.rs @@ -41,6 +41,8 @@ pub use cache::ChainCache; pub use error::{ChainIndexError, ChainIndexResult}; pub use index::{ChainIndex, PersistentChainIndex}; pub use integration::{build_index_data, event_to_stored_log, index_block, BlockMetadata}; -pub use provider::{ChainStateProvider, ChainStateProviderConfig, NoopAccountCodes}; +pub use provider::{ + ChainStateProvider, ChainStateProviderConfig, NoopAccountCodes, DEFAULT_PROTOCOL_VERSION, +}; pub use querier::{StateQuerier, StorageStateQuerier}; pub use types::*; diff --git a/crates/rpc/chain-index/src/provider.rs b/crates/rpc/chain-index/src/provider.rs index a802ca9..3966d06 100644 --- a/crates/rpc/chain-index/src/provider.rs +++ b/crates/rpc/chain-index/src/provider.rs @@ -102,6 +102,9 @@ impl IngressVerifier { } } +/// Default protocol version reported by the RPC server. +pub const DEFAULT_PROTOCOL_VERSION: &str = "0x1"; + /// State provider configuration. #[derive(Debug, Clone)] pub struct ChainStateProviderConfig { diff --git a/crates/rpc/evnode/Cargo.toml b/crates/rpc/evnode/Cargo.toml index f9b3027..7b0ecd9 100644 --- a/crates/rpc/evnode/Cargo.toml +++ b/crates/rpc/evnode/Cargo.toml @@ -49,6 +49,7 @@ tonic-build = "0.12" tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } tracing-subscriber = "0.3" evolve_testapp.workspace = true +evolve_tx_eth = { workspace = true, features = ["testing"] } alloy-consensus = { workspace = true, features = ["k256"] } k256 = { version = "0.13", features = ["ecdsa"] } diff --git a/crates/rpc/evnode/src/runner.rs b/crates/rpc/evnode/src/runner.rs index c24101e..7c80427 100644 --- a/crates/rpc/evnode/src/runner.rs +++ b/crates/rpc/evnode/src/runner.rs @@ -12,7 +12,7 @@ use std::sync::{mpsc, Arc}; use std::thread::JoinHandle; use std::time::Duration; -use alloy_primitives::{keccak256, Address, B256, U256}; +use alloy_primitives::{Address, B256, U256}; use borsh::{BorshDeserialize, BorshSerialize}; use commonware_runtime::tokio::{Config as TokioConfig, Context as TokioContext, Runner}; use commonware_runtime::{Runner as RunnerTrait, Spawner}; @@ -26,8 +26,8 @@ use evolve_mempool::{new_shared_mempool, Mempool, SharedMempool}; use evolve_node::{GenesisOutput, HasTokenAccountId, NodeConfig}; use evolve_rpc_types::SyncStatus; use evolve_server::{ - load_chain_state, save_chain_state, state_changes_to_operations, BlockBuilder, ChainState, - StfExecutor, + compute_block_hash, load_chain_state, save_chain_state, state_changes_to_operations, + BlockBuilder, ChainState, StfExecutor, }; use evolve_stf_traits::AccountsCodeStorage; use evolve_storage::{Operation, Storage, StorageConfig}; @@ -104,8 +104,7 @@ impl ExternalConsensusCommitSink { storage.commit().await.expect("storage commit failed") }); let state_root = B256::from_slice(commit_hash.as_bytes()); - let block_hash = - compute_external_consensus_block_hash(info.height, info.timestamp, parent_hash); + let block_hash = compute_block_hash(info.height, info.timestamp, parent_hash); let metadata = BlockMetadata::new( block_hash, parent_hash, @@ -200,14 +199,6 @@ fn init_persistent_chain_index( Some(index) } -fn compute_external_consensus_block_hash(height: u64, timestamp: u64, parent_hash: B256) -> B256 { - let mut data = Vec::with_capacity(48); - data.extend_from_slice(&height.to_le_bytes()); - data.extend_from_slice(×tamp.to_le_bytes()); - data.extend_from_slice(parent_hash.as_slice()); - keccak256(&data) -} - fn resolve_initial_parent_hash( chain_index: Option<&SharedChainIndex>, initial_height: u64, @@ -321,7 +312,7 @@ where let codes_for_rpc = Arc::new(build_codes()); let state_provider_config = ChainStateProviderConfig { chain_id: config.chain.chain_id, - protocol_version: "0x1".to_string(), + protocol_version: evolve_chain_index::DEFAULT_PROTOCOL_VERSION.to_string(), gas_price: U256::ZERO, sync_status: SyncStatus::NotSyncing(false), }; diff --git a/crates/rpc/evnode/src/service.rs b/crates/rpc/evnode/src/service.rs index 23ae9ff..6f2eb67 100644 --- a/crates/rpc/evnode/src/service.rs +++ b/crates/rpc/evnode/src/service.rs @@ -611,11 +611,11 @@ where mod tests { use super::*; use alloy_consensus::{SignableTransaction, TxEip1559}; - use alloy_primitives::{Address, Bytes, PrimitiveSignature, TxKind, U256}; + use alloy_primitives::{Address, Bytes, TxKind, U256}; use evolve_core::{InvokeResponse, Message}; use evolve_mempool::shared_mempool_from; use evolve_stf::results::TxResult; - use k256::ecdsa::{signature::hazmat::PrehashSigner, SigningKey}; + use k256::ecdsa::SigningKey; use std::sync::atomic::AtomicUsize; use std::sync::Mutex; use tonic::Code; @@ -818,14 +818,7 @@ mod tests { )) } - fn sign_hash(signing_key: &SigningKey, hash: B256) -> PrimitiveSignature { - let (sig, recovery_id): (k256::ecdsa::Signature, k256::ecdsa::RecoveryId) = signing_key - .sign_prehash(hash.as_ref()) - .expect("signing should succeed"); - let r = U256::from_be_slice(sig.r().to_bytes().as_slice()); - let s = U256::from_be_slice(sig.s().to_bytes().as_slice()); - PrimitiveSignature::new(r, s, recovery_id.is_y_odd()) - } + use evolve_tx_eth::sign_hash; fn sample_eip1559_tx_bytes(nonce: u64) -> Vec { let signing_key =