diff --git a/Makefile b/Makefile index afde54dbc..8e67004fe 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ ################## update dependencies #################### -ETHEREUM_SUBMODULE_COMMIT_OR_TAG := morph-v2.1.2 -ETHEREUM_TARGET_VERSION := morph-v2.1.2 -TENDERMINT_TARGET_VERSION := v0.3.3 +ETHEREUM_SUBMODULE_COMMIT_OR_TAG := test_3_13 +ETHEREUM_TARGET_VERSION := v1.10.14-0.20260303114154-29281e501802 +TENDERMINT_TARGET_VERSION := v0.3.4-0.20260226093240-9be76fe518c2 ETHEREUM_MODULE_NAME := github.com/morph-l2/go-ethereum diff --git a/bindings/bindings/l1sequencer.go b/bindings/bindings/l1sequencer.go new file mode 100644 index 000000000..80110f035 --- /dev/null +++ b/bindings/bindings/l1sequencer.go @@ -0,0 +1,820 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package bindings + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/morph-l2/go-ethereum" + "github.com/morph-l2/go-ethereum/accounts/abi" + "github.com/morph-l2/go-ethereum/accounts/abi/bind" + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/core/types" + "github.com/morph-l2/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// L1SequencerMetaData contains all meta data concerning the L1Sequencer contract. +var L1SequencerMetaData = &bind.MetaData{ + ABI: "[{\"type\":\"function\",\"name\":\"getSequencer\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"initialize\",\"inputs\":[{\"name\":\"_owner\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"owner\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"renounceOwnership\",\"inputs\":[],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"sequencer\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"transferOwnership\",\"inputs\":[{\"name\":\"newOwner\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"updateSequencer\",\"inputs\":[{\"name\":\"newSequencer\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"event\",\"name\":\"Initialized\",\"inputs\":[{\"name\":\"version\",\"type\":\"uint8\",\"indexed\":false,\"internalType\":\"uint8\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"OwnershipTransferred\",\"inputs\":[{\"name\":\"previousOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"newOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"SequencerUpdated\",\"inputs\":[{\"name\":\"oldSequencer\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"newSequencer\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false}]", + Bin: "0x608060405234801561000f575f80fd5b5061081a8061001d5f395ff3fe608060405234801561000f575f80fd5b506004361061007a575f3560e01c8063715018a611610058578063715018a6146100f65780638da5cb5b146100fe578063c4d66de81461011c578063f2fde38b1461012f575f80fd5b806343ae20a31461007e5780634d96a90a146100935780635c1bba38146100d6575b5f80fd5b61009161008c3660046107d3565b610142565b005b60655473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b6065546100ad9073ffffffffffffffffffffffffffffffffffffffff1681565b6100916102c7565b60335473ffffffffffffffffffffffffffffffffffffffff166100ad565b61009161012a3660046107d3565b6102da565b61009161013d3660046107d3565b6104ed565b61014a6105a4565b73ffffffffffffffffffffffffffffffffffffffff81166101cc576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f696e76616c69642073657175656e63657200000000000000000000000000000060448201526064015b60405180910390fd5b60655473ffffffffffffffffffffffffffffffffffffffff90811690821603610251576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600e60248201527f73616d652073657175656e63657200000000000000000000000000000000000060448201526064016101c3565b6065805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681179093556040519116919082907fcd58b762453bd126b48db83f2cecd464f5281dd7e5e6824b528c09d0482984d6905f90a35050565b6102cf6105a4565b6102d85f610625565b565b5f54610100900460ff16158080156102f857505f54600160ff909116105b806103115750303b15801561031157505f5460ff166001145b61039d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a656400000000000000000000000000000000000060648201526084016101c3565b5f80547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905580156103f9575f80547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b73ffffffffffffffffffffffffffffffffffffffff8216610476576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600d60248201527f696e76616c6964206f776e65720000000000000000000000000000000000000060448201526064016101c3565b61047e61069b565b61048782610625565b80156104e9575f80547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b5050565b6104f56105a4565b73ffffffffffffffffffffffffffffffffffffffff8116610598576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201527f646472657373000000000000000000000000000000000000000000000000000060648201526084016101c3565b6105a181610625565b50565b60335473ffffffffffffffffffffffffffffffffffffffff1633146102d8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e657260448201526064016101c3565b6033805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0905f90a35050565b5f54610100900460ff16610731576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e6700000000000000000000000000000000000000000060648201526084016101c3565b6102d85f54610100900460ff166107ca576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e6700000000000000000000000000000000000000000060648201526084016101c3565b6102d833610625565b5f602082840312156107e3575f80fd5b813573ffffffffffffffffffffffffffffffffffffffff81168114610806575f80fd5b939250505056fea164736f6c6343000818000a", +} + +// L1SequencerABI is the input ABI used to generate the binding from. +// Deprecated: Use L1SequencerMetaData.ABI instead. +var L1SequencerABI = L1SequencerMetaData.ABI + +// L1SequencerBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use L1SequencerMetaData.Bin instead. +var L1SequencerBin = L1SequencerMetaData.Bin + +// DeployL1Sequencer deploys a new Ethereum contract, binding an instance of L1Sequencer to it. +func DeployL1Sequencer(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *L1Sequencer, error) { + parsed, err := L1SequencerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(L1SequencerBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &L1Sequencer{L1SequencerCaller: L1SequencerCaller{contract: contract}, L1SequencerTransactor: L1SequencerTransactor{contract: contract}, L1SequencerFilterer: L1SequencerFilterer{contract: contract}}, nil +} + +// L1Sequencer is an auto generated Go binding around an Ethereum contract. +type L1Sequencer struct { + L1SequencerCaller // Read-only binding to the contract + L1SequencerTransactor // Write-only binding to the contract + L1SequencerFilterer // Log filterer for contract events +} + +// L1SequencerCaller is an auto generated read-only Go binding around an Ethereum contract. +type L1SequencerCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// L1SequencerTransactor is an auto generated write-only Go binding around an Ethereum contract. +type L1SequencerTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// L1SequencerFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type L1SequencerFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// L1SequencerSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type L1SequencerSession struct { + Contract *L1Sequencer // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// L1SequencerCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type L1SequencerCallerSession struct { + Contract *L1SequencerCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// L1SequencerTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type L1SequencerTransactorSession struct { + Contract *L1SequencerTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// L1SequencerRaw is an auto generated low-level Go binding around an Ethereum contract. +type L1SequencerRaw struct { + Contract *L1Sequencer // Generic contract binding to access the raw methods on +} + +// L1SequencerCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type L1SequencerCallerRaw struct { + Contract *L1SequencerCaller // Generic read-only contract binding to access the raw methods on +} + +// L1SequencerTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type L1SequencerTransactorRaw struct { + Contract *L1SequencerTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewL1Sequencer creates a new instance of L1Sequencer, bound to a specific deployed contract. +func NewL1Sequencer(address common.Address, backend bind.ContractBackend) (*L1Sequencer, error) { + contract, err := bindL1Sequencer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &L1Sequencer{L1SequencerCaller: L1SequencerCaller{contract: contract}, L1SequencerTransactor: L1SequencerTransactor{contract: contract}, L1SequencerFilterer: L1SequencerFilterer{contract: contract}}, nil +} + +// NewL1SequencerCaller creates a new read-only instance of L1Sequencer, bound to a specific deployed contract. +func NewL1SequencerCaller(address common.Address, caller bind.ContractCaller) (*L1SequencerCaller, error) { + contract, err := bindL1Sequencer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &L1SequencerCaller{contract: contract}, nil +} + +// NewL1SequencerTransactor creates a new write-only instance of L1Sequencer, bound to a specific deployed contract. +func NewL1SequencerTransactor(address common.Address, transactor bind.ContractTransactor) (*L1SequencerTransactor, error) { + contract, err := bindL1Sequencer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &L1SequencerTransactor{contract: contract}, nil +} + +// NewL1SequencerFilterer creates a new log filterer instance of L1Sequencer, bound to a specific deployed contract. +func NewL1SequencerFilterer(address common.Address, filterer bind.ContractFilterer) (*L1SequencerFilterer, error) { + contract, err := bindL1Sequencer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &L1SequencerFilterer{contract: contract}, nil +} + +// bindL1Sequencer binds a generic wrapper to an already deployed contract. +func bindL1Sequencer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := L1SequencerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_L1Sequencer *L1SequencerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _L1Sequencer.Contract.L1SequencerCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_L1Sequencer *L1SequencerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _L1Sequencer.Contract.L1SequencerTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_L1Sequencer *L1SequencerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _L1Sequencer.Contract.L1SequencerTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_L1Sequencer *L1SequencerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _L1Sequencer.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_L1Sequencer *L1SequencerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _L1Sequencer.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_L1Sequencer *L1SequencerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _L1Sequencer.Contract.contract.Transact(opts, method, params...) +} + +// GetSequencer is a free data retrieval call binding the contract method 0x4d96a90a. +// +// Solidity: function getSequencer() view returns(address) +func (_L1Sequencer *L1SequencerCaller) GetSequencer(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _L1Sequencer.contract.Call(opts, &out, "getSequencer") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// GetSequencer is a free data retrieval call binding the contract method 0x4d96a90a. +// +// Solidity: function getSequencer() view returns(address) +func (_L1Sequencer *L1SequencerSession) GetSequencer() (common.Address, error) { + return _L1Sequencer.Contract.GetSequencer(&_L1Sequencer.CallOpts) +} + +// GetSequencer is a free data retrieval call binding the contract method 0x4d96a90a. +// +// Solidity: function getSequencer() view returns(address) +func (_L1Sequencer *L1SequencerCallerSession) GetSequencer() (common.Address, error) { + return _L1Sequencer.Contract.GetSequencer(&_L1Sequencer.CallOpts) +} + +// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. +// +// Solidity: function owner() view returns(address) +func (_L1Sequencer *L1SequencerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _L1Sequencer.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. +// +// Solidity: function owner() view returns(address) +func (_L1Sequencer *L1SequencerSession) Owner() (common.Address, error) { + return _L1Sequencer.Contract.Owner(&_L1Sequencer.CallOpts) +} + +// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. +// +// Solidity: function owner() view returns(address) +func (_L1Sequencer *L1SequencerCallerSession) Owner() (common.Address, error) { + return _L1Sequencer.Contract.Owner(&_L1Sequencer.CallOpts) +} + +// Sequencer is a free data retrieval call binding the contract method 0x5c1bba38. +// +// Solidity: function sequencer() view returns(address) +func (_L1Sequencer *L1SequencerCaller) Sequencer(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _L1Sequencer.contract.Call(opts, &out, "sequencer") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// Sequencer is a free data retrieval call binding the contract method 0x5c1bba38. +// +// Solidity: function sequencer() view returns(address) +func (_L1Sequencer *L1SequencerSession) Sequencer() (common.Address, error) { + return _L1Sequencer.Contract.Sequencer(&_L1Sequencer.CallOpts) +} + +// Sequencer is a free data retrieval call binding the contract method 0x5c1bba38. +// +// Solidity: function sequencer() view returns(address) +func (_L1Sequencer *L1SequencerCallerSession) Sequencer() (common.Address, error) { + return _L1Sequencer.Contract.Sequencer(&_L1Sequencer.CallOpts) +} + +// Initialize is a paid mutator transaction binding the contract method 0xc4d66de8. +// +// Solidity: function initialize(address _owner) returns() +func (_L1Sequencer *L1SequencerTransactor) Initialize(opts *bind.TransactOpts, _owner common.Address) (*types.Transaction, error) { + return _L1Sequencer.contract.Transact(opts, "initialize", _owner) +} + +// Initialize is a paid mutator transaction binding the contract method 0xc4d66de8. +// +// Solidity: function initialize(address _owner) returns() +func (_L1Sequencer *L1SequencerSession) Initialize(_owner common.Address) (*types.Transaction, error) { + return _L1Sequencer.Contract.Initialize(&_L1Sequencer.TransactOpts, _owner) +} + +// Initialize is a paid mutator transaction binding the contract method 0xc4d66de8. +// +// Solidity: function initialize(address _owner) returns() +func (_L1Sequencer *L1SequencerTransactorSession) Initialize(_owner common.Address) (*types.Transaction, error) { + return _L1Sequencer.Contract.Initialize(&_L1Sequencer.TransactOpts, _owner) +} + +// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. +// +// Solidity: function renounceOwnership() returns() +func (_L1Sequencer *L1SequencerTransactor) RenounceOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _L1Sequencer.contract.Transact(opts, "renounceOwnership") +} + +// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. +// +// Solidity: function renounceOwnership() returns() +func (_L1Sequencer *L1SequencerSession) RenounceOwnership() (*types.Transaction, error) { + return _L1Sequencer.Contract.RenounceOwnership(&_L1Sequencer.TransactOpts) +} + +// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. +// +// Solidity: function renounceOwnership() returns() +func (_L1Sequencer *L1SequencerTransactorSession) RenounceOwnership() (*types.Transaction, error) { + return _L1Sequencer.Contract.RenounceOwnership(&_L1Sequencer.TransactOpts) +} + +// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. +// +// Solidity: function transferOwnership(address newOwner) returns() +func (_L1Sequencer *L1SequencerTransactor) TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) { + return _L1Sequencer.contract.Transact(opts, "transferOwnership", newOwner) +} + +// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. +// +// Solidity: function transferOwnership(address newOwner) returns() +func (_L1Sequencer *L1SequencerSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _L1Sequencer.Contract.TransferOwnership(&_L1Sequencer.TransactOpts, newOwner) +} + +// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. +// +// Solidity: function transferOwnership(address newOwner) returns() +func (_L1Sequencer *L1SequencerTransactorSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _L1Sequencer.Contract.TransferOwnership(&_L1Sequencer.TransactOpts, newOwner) +} + +// UpdateSequencer is a paid mutator transaction binding the contract method 0x43ae20a3. +// +// Solidity: function updateSequencer(address newSequencer) returns() +func (_L1Sequencer *L1SequencerTransactor) UpdateSequencer(opts *bind.TransactOpts, newSequencer common.Address) (*types.Transaction, error) { + return _L1Sequencer.contract.Transact(opts, "updateSequencer", newSequencer) +} + +// UpdateSequencer is a paid mutator transaction binding the contract method 0x43ae20a3. +// +// Solidity: function updateSequencer(address newSequencer) returns() +func (_L1Sequencer *L1SequencerSession) UpdateSequencer(newSequencer common.Address) (*types.Transaction, error) { + return _L1Sequencer.Contract.UpdateSequencer(&_L1Sequencer.TransactOpts, newSequencer) +} + +// UpdateSequencer is a paid mutator transaction binding the contract method 0x43ae20a3. +// +// Solidity: function updateSequencer(address newSequencer) returns() +func (_L1Sequencer *L1SequencerTransactorSession) UpdateSequencer(newSequencer common.Address) (*types.Transaction, error) { + return _L1Sequencer.Contract.UpdateSequencer(&_L1Sequencer.TransactOpts, newSequencer) +} + +// L1SequencerInitializedIterator is returned from FilterInitialized and is used to iterate over the raw logs and unpacked data for Initialized events raised by the L1Sequencer contract. +type L1SequencerInitializedIterator struct { + Event *L1SequencerInitialized // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *L1SequencerInitializedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(L1SequencerInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(L1SequencerInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *L1SequencerInitializedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *L1SequencerInitializedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// L1SequencerInitialized represents a Initialized event raised by the L1Sequencer contract. +type L1SequencerInitialized struct { + Version uint8 + Raw types.Log // Blockchain specific contextual infos +} + +// FilterInitialized is a free log retrieval operation binding the contract event 0x7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498. +// +// Solidity: event Initialized(uint8 version) +func (_L1Sequencer *L1SequencerFilterer) FilterInitialized(opts *bind.FilterOpts) (*L1SequencerInitializedIterator, error) { + + logs, sub, err := _L1Sequencer.contract.FilterLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return &L1SequencerInitializedIterator{contract: _L1Sequencer.contract, event: "Initialized", logs: logs, sub: sub}, nil +} + +// WatchInitialized is a free log subscription operation binding the contract event 0x7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498. +// +// Solidity: event Initialized(uint8 version) +func (_L1Sequencer *L1SequencerFilterer) WatchInitialized(opts *bind.WatchOpts, sink chan<- *L1SequencerInitialized) (event.Subscription, error) { + + logs, sub, err := _L1Sequencer.contract.WatchLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(L1SequencerInitialized) + if err := _L1Sequencer.contract.UnpackLog(event, "Initialized", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseInitialized is a log parse operation binding the contract event 0x7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498. +// +// Solidity: event Initialized(uint8 version) +func (_L1Sequencer *L1SequencerFilterer) ParseInitialized(log types.Log) (*L1SequencerInitialized, error) { + event := new(L1SequencerInitialized) + if err := _L1Sequencer.contract.UnpackLog(event, "Initialized", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// L1SequencerOwnershipTransferredIterator is returned from FilterOwnershipTransferred and is used to iterate over the raw logs and unpacked data for OwnershipTransferred events raised by the L1Sequencer contract. +type L1SequencerOwnershipTransferredIterator struct { + Event *L1SequencerOwnershipTransferred // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *L1SequencerOwnershipTransferredIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(L1SequencerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(L1SequencerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *L1SequencerOwnershipTransferredIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *L1SequencerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// L1SequencerOwnershipTransferred represents a OwnershipTransferred event raised by the L1Sequencer contract. +type L1SequencerOwnershipTransferred struct { + PreviousOwner common.Address + NewOwner common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterOwnershipTransferred is a free log retrieval operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. +// +// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) +func (_L1Sequencer *L1SequencerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*L1SequencerOwnershipTransferredIterator, error) { + + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) + } + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) + } + + logs, sub, err := _L1Sequencer.contract.FilterLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + if err != nil { + return nil, err + } + return &L1SequencerOwnershipTransferredIterator{contract: _L1Sequencer.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +// WatchOwnershipTransferred is a free log subscription operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. +// +// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) +func (_L1Sequencer *L1SequencerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *L1SequencerOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) { + + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) + } + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) + } + + logs, sub, err := _L1Sequencer.contract.WatchLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(L1SequencerOwnershipTransferred) + if err := _L1Sequencer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseOwnershipTransferred is a log parse operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. +// +// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) +func (_L1Sequencer *L1SequencerFilterer) ParseOwnershipTransferred(log types.Log) (*L1SequencerOwnershipTransferred, error) { + event := new(L1SequencerOwnershipTransferred) + if err := _L1Sequencer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// L1SequencerSequencerUpdatedIterator is returned from FilterSequencerUpdated and is used to iterate over the raw logs and unpacked data for SequencerUpdated events raised by the L1Sequencer contract. +type L1SequencerSequencerUpdatedIterator struct { + Event *L1SequencerSequencerUpdated // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *L1SequencerSequencerUpdatedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(L1SequencerSequencerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(L1SequencerSequencerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *L1SequencerSequencerUpdatedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *L1SequencerSequencerUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// L1SequencerSequencerUpdated represents a SequencerUpdated event raised by the L1Sequencer contract. +type L1SequencerSequencerUpdated struct { + OldSequencer common.Address + NewSequencer common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterSequencerUpdated is a free log retrieval operation binding the contract event 0xcd58b762453bd126b48db83f2cecd464f5281dd7e5e6824b528c09d0482984d6. +// +// Solidity: event SequencerUpdated(address indexed oldSequencer, address indexed newSequencer) +func (_L1Sequencer *L1SequencerFilterer) FilterSequencerUpdated(opts *bind.FilterOpts, oldSequencer []common.Address, newSequencer []common.Address) (*L1SequencerSequencerUpdatedIterator, error) { + + var oldSequencerRule []interface{} + for _, oldSequencerItem := range oldSequencer { + oldSequencerRule = append(oldSequencerRule, oldSequencerItem) + } + var newSequencerRule []interface{} + for _, newSequencerItem := range newSequencer { + newSequencerRule = append(newSequencerRule, newSequencerItem) + } + + logs, sub, err := _L1Sequencer.contract.FilterLogs(opts, "SequencerUpdated", oldSequencerRule, newSequencerRule) + if err != nil { + return nil, err + } + return &L1SequencerSequencerUpdatedIterator{contract: _L1Sequencer.contract, event: "SequencerUpdated", logs: logs, sub: sub}, nil +} + +// WatchSequencerUpdated is a free log subscription operation binding the contract event 0xcd58b762453bd126b48db83f2cecd464f5281dd7e5e6824b528c09d0482984d6. +// +// Solidity: event SequencerUpdated(address indexed oldSequencer, address indexed newSequencer) +func (_L1Sequencer *L1SequencerFilterer) WatchSequencerUpdated(opts *bind.WatchOpts, sink chan<- *L1SequencerSequencerUpdated, oldSequencer []common.Address, newSequencer []common.Address) (event.Subscription, error) { + + var oldSequencerRule []interface{} + for _, oldSequencerItem := range oldSequencer { + oldSequencerRule = append(oldSequencerRule, oldSequencerItem) + } + var newSequencerRule []interface{} + for _, newSequencerItem := range newSequencer { + newSequencerRule = append(newSequencerRule, newSequencerItem) + } + + logs, sub, err := _L1Sequencer.contract.WatchLogs(opts, "SequencerUpdated", oldSequencerRule, newSequencerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(L1SequencerSequencerUpdated) + if err := _L1Sequencer.contract.UnpackLog(event, "SequencerUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseSequencerUpdated is a log parse operation binding the contract event 0xcd58b762453bd126b48db83f2cecd464f5281dd7e5e6824b528c09d0482984d6. +// +// Solidity: event SequencerUpdated(address indexed oldSequencer, address indexed newSequencer) +func (_L1Sequencer *L1SequencerFilterer) ParseSequencerUpdated(log types.Log) (*L1SequencerSequencerUpdated, error) { + event := new(L1SequencerSequencerUpdated) + if err := _L1Sequencer.contract.UnpackLog(event, "SequencerUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} diff --git a/bindings/go.mod b/bindings/go.mod index a509930b7..2a2e01ad8 100644 --- a/bindings/go.mod +++ b/bindings/go.mod @@ -2,9 +2,9 @@ module morph-l2/bindings go 1.24.0 -replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.3 +replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.4-0.20260313040448-999449fd4d23 -require github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 +require github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d require ( github.com/VictoriaMetrics/fastcache v1.12.2 // indirect diff --git a/bindings/go.sum b/bindings/go.sum index bb71a7769..012505bda 100644 --- a/bindings/go.sum +++ b/bindings/go.sum @@ -111,8 +111,8 @@ github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqky github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 h1:A8eygErKU6WKMipGWIemzwLeYkIGLd9yb/Ry3x+J9PQ= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d h1:Qy3ytYw/PGnrPDAWen1MsMUhUXclk1F2Q36A07+bBv4= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= diff --git a/contracts/contracts/l1/L1Sequencer.sol b/contracts/contracts/l1/L1Sequencer.sol new file mode 100644 index 000000000..3a46768bd --- /dev/null +++ b/contracts/contracts/l1/L1Sequencer.sol @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: MIT +pragma solidity =0.8.24; + +import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; + +/// @title L1Sequencer +/// @notice L1 contract for managing the sequencer address. +/// The sequencer address can be updated by the owner (multisig recommended). +contract L1Sequencer is OwnableUpgradeable { + // ============ Storage ============ + + /// @notice Current sequencer address + address public sequencer; + + // ============ Events ============ + + /// @notice Emitted when sequencer is updated + event SequencerUpdated(address indexed oldSequencer, address indexed newSequencer); + + // ============ Initializer ============ + + /// @notice Initialize the contract + /// @param _owner Contract owner (multisig recommended) + /// @param _initialSequencer Initial sequencer address (can be address(0) to set later) + function initialize(address _owner, address _initialSequencer) external initializer { + require(_owner != address(0), "invalid owner"); + + __Ownable_init(); + _transferOwnership(_owner); + + // Set initial sequencer if provided + if (_initialSequencer != address(0)) { + sequencer = _initialSequencer; + emit SequencerUpdated(address(0), _initialSequencer); + } + } + + // ============ Admin Functions ============ + + /// @notice Update sequencer address (takes effect immediately) + /// @param newSequencer New sequencer address + function updateSequencer(address newSequencer) external onlyOwner { + require(newSequencer != address(0), "invalid sequencer"); + require(newSequencer != sequencer, "same sequencer"); + + address oldSequencer = sequencer; + sequencer = newSequencer; + + emit SequencerUpdated(oldSequencer, newSequencer); + } + + // ============ View Functions ============ + + /// @notice Get current sequencer address + function getSequencer() external view returns (address) { + return sequencer; + } +} diff --git a/contracts/deploy/013-DeployProxys.ts b/contracts/deploy/013-DeployProxys.ts index 24545613a..6b3a4a5ce 100644 --- a/contracts/deploy/013-DeployProxys.ts +++ b/contracts/deploy/013-DeployProxys.ts @@ -48,6 +48,7 @@ export const deployContractProxies = async ( const RollupProxyStorageName = ProxyStorageName.RollupProxyStorageName const L1StakingProxyStorageName = ProxyStorageName.L1StakingProxyStorageName + const L1SequencerProxyStorageName = ProxyStorageName.L1SequencerProxyStorageName const L1GatewayRouterProxyStorageName = ProxyStorageName.L1GatewayRouterProxyStorageName const L1ETHGatewayProxyStorageName = ProxyStorageName.L1ETHGatewayProxyStorageName @@ -112,6 +113,13 @@ export const deployContractProxies = async ( return err } + // ************************ sequencer contracts deploy ************************ + // L1SequencerProxy deploy + err = await deployContractProxyByStorageName(hre, path, deployer, L1SequencerProxyStorageName) + if (err != "") { + return err + } + // ************************ rollup contracts deploy ************************ // RollupProxy deploy err = await deployContractProxyByStorageName(hre, path, deployer, RollupProxyStorageName) @@ -274,6 +282,7 @@ export const deployContractProxiesConcurrently = async ( ProxyStorageName.L1CrossDomainMessengerProxyStorageName, ProxyStorageName.L1MessageQueueWithGasPriceOracleProxyStorageName, ProxyStorageName.L1StakingProxyStorageName, + ProxyStorageName.L1SequencerProxyStorageName, ProxyStorageName.RollupProxyStorageName, ProxyStorageName.L1GatewayRouterProxyStorageName, ProxyStorageName.L1ETHGatewayProxyStorageName, diff --git a/contracts/deploy/014-DeployImpls.ts b/contracts/deploy/014-DeployImpls.ts index ed0653706..b32613018 100644 --- a/contracts/deploy/014-DeployImpls.ts +++ b/contracts/deploy/014-DeployImpls.ts @@ -122,6 +122,11 @@ export const deployContractImplsConcurrently = async ( deployPromises.push(deployContract(L1StakingFactoryName, StakingImplStorageName, [L1CrossDomainMessengerProxyAddress])) + // L1Sequencer deploy (no constructor args) + const L1SequencerFactoryName = ContractFactoryName.L1Sequencer + const L1SequencerImplStorageName = ImplStorageName.L1SequencerStorageName + deployPromises.push(deployContract(L1SequencerFactoryName, L1SequencerImplStorageName)) + const results = await Promise.all(deployPromises) for (const result of results) { @@ -382,6 +387,21 @@ export const deployContractImpls = async ( return err } + // ************************ sequencer contracts deploy ************************ + // L1Sequencer deploy + const L1SequencerFactoryName = ContractFactoryName.L1Sequencer + const L1SequencerImplStorageName = ImplStorageName.L1SequencerStorageName + Factory = await hre.ethers.getContractFactory(L1SequencerFactoryName) + contract = await Factory.deploy() + await contract.deployed() + console.log("%s=%s ; TX_HASH: %s", L1SequencerImplStorageName, contract.address.toLocaleLowerCase(), contract.deployTransaction.hash) + blockNumber = await hre.ethers.provider.getBlockNumber() + console.log("BLOCK_NUMBER: %s", blockNumber) + err = await storage(path, L1SequencerImplStorageName, contract.address.toLocaleLowerCase(), blockNumber || 0) + if (err != '') { + return err + } + // return return '' } diff --git a/contracts/deploy/019-AdminTransfer.ts b/contracts/deploy/019-AdminTransfer.ts index 566396b90..a0cd1b58a 100644 --- a/contracts/deploy/019-AdminTransfer.ts +++ b/contracts/deploy/019-AdminTransfer.ts @@ -109,6 +109,7 @@ export const AdminTransferConcurrently = async ( ProxyStorageName.L1CrossDomainMessengerProxyStorageName, ProxyStorageName.L1MessageQueueWithGasPriceOracleProxyStorageName, ProxyStorageName.L1StakingProxyStorageName, + ProxyStorageName.L1SequencerProxyStorageName, // Added L1Sequencer ProxyStorageName.RollupProxyStorageName, ProxyStorageName.L1GatewayRouterProxyStorageName, ProxyStorageName.L1ETHGatewayProxyStorageName, @@ -159,6 +160,7 @@ export const AdminTransfer = async ( const RollupProxyStorageName = ProxyStorageName.RollupProxyStorageName const L1StakingProxyStorageName = ProxyStorageName.L1StakingProxyStorageName + const L1SequencerProxyStorageName = ProxyStorageName.L1SequencerProxyStorageName const L1GatewayRouterProxyStorageName = ProxyStorageName.L1GatewayRouterProxyStorageName const L1ETHGatewayProxyStorageName = ProxyStorageName.L1ETHGatewayProxyStorageName @@ -192,6 +194,13 @@ export const AdminTransfer = async ( return err } + // ************************ sequencer contracts admin change ************************ + // L1SequencerProxy admin change + err = await AdminTransferByProxyStorageName(hre, path, deployer, L1SequencerProxyStorageName) + if (err != '') { + return err + } + // ************************ rollup contracts admin change ************************ // RollupProxy admin change err = await AdminTransferByProxyStorageName(hre, path, deployer, RollupProxyStorageName) diff --git a/contracts/deploy/020-ContractInit.ts b/contracts/deploy/020-ContractInit.ts index a0e17dc19..96bf3637b 100644 --- a/contracts/deploy/020-ContractInit.ts +++ b/contracts/deploy/020-ContractInit.ts @@ -57,18 +57,32 @@ export const ContractInit = async ( // submitter and challenger const submitter: string = config.rollupProposer const challenger: string = config.rollupChallenger + const rollupDelayPeriod: number = config.rollupDelayPeriod + if (!ethers.utils.isAddress(submitter) || !ethers.utils.isAddress(challenger) ) { console.error('please check your address') return '' } + if (rollupDelayPeriod==0){ + console.error('rollupDelayPeriod cannot set zero') + return '' + } let res = await Rollup.importGenesisBatch(batchHeader) let rec = await res.wait() console.log(`importGenesisBatch(%s) ${rec.status == 1 ? "success" : "failed"}`, batchHeader) res = await Rollup.addChallenger(challenger) rec = await res.wait() console.log(`addChallenger(%s) ${rec.status == 1 ? "success" : "failed"}`, challenger) + + res =await Rollup.initialize2("0x0000000000000000000000000000000000000000000000000000000000000001") + rec = await res.wait() + console.log(`initialize2(%s) ${rec.status == 1 ? "success" : "failed"}`) + + res = await Rollup.initialize3(rollupDelayPeriod) + rec = await res.wait() + console.log(`initialize3(%s) ${rec.status == 1 ? "success" : "failed"}`) } // ------------------ staking init ----------------- diff --git a/contracts/deploy/022-SequencerInit.ts b/contracts/deploy/022-SequencerInit.ts new file mode 100644 index 000000000..e8de25116 --- /dev/null +++ b/contracts/deploy/022-SequencerInit.ts @@ -0,0 +1,89 @@ +import "@nomiclabs/hardhat-web3"; +import "@nomiclabs/hardhat-ethers"; +import "@nomiclabs/hardhat-waffle"; + +import { + HardhatRuntimeEnvironment +} from 'hardhat/types'; +import { assertContractVariable, getContractAddressByName, awaitCondition } from "../src/deploy-utils"; +import { ethers } from 'ethers' + +import { + ImplStorageName, + ProxyStorageName, + ContractFactoryName, +} from "../src/types" + +export const SequencerInit = async ( + hre: HardhatRuntimeEnvironment, + path: string, + deployer: any, + configTmp: any +): Promise => { + // L1Sequencer addresses + const L1SequencerProxyAddress = getContractAddressByName(path, ProxyStorageName.L1SequencerProxyStorageName) + const L1SequencerImplAddress = getContractAddressByName(path, ImplStorageName.L1SequencerStorageName) + const L1SequencerFactory = await hre.ethers.getContractFactory(ContractFactoryName.L1Sequencer) + + const IL1SequencerProxy = await hre.ethers.getContractAt(ContractFactoryName.DefaultProxyInterface, L1SequencerProxyAddress, deployer) + + if ( + (await IL1SequencerProxy.implementation()).toLocaleLowerCase() !== L1SequencerImplAddress.toLocaleLowerCase() + ) { + console.log('Upgrading the L1Sequencer proxy...') + + // Owner is the deployer (will be transferred to multisig in production) + const owner = await deployer.getAddress() + + // Get initial sequencer address from config (first sequencer address) + // Note: l2SequencerAddresses is defined in contracts/src/deploy-config/l1.ts + const initialSequencer = (configTmp.l2SequencerAddresses && configTmp.l2SequencerAddresses.length > 0) + ? configTmp.l2SequencerAddresses[0] + : ethers.constants.AddressZero + + console.log('Initial sequencer address:', initialSequencer) + + // Upgrade and initialize the proxy with owner and initial sequencer + // Note: We set sequencer in initialize() to avoid TransparentUpgradeableProxy admin restriction + await IL1SequencerProxy.upgradeToAndCall( + L1SequencerImplAddress, + L1SequencerFactory.interface.encodeFunctionData('initialize', [owner, initialSequencer]) + ) + + await awaitCondition( + async () => { + return ( + (await IL1SequencerProxy.implementation()).toLocaleLowerCase() === L1SequencerImplAddress.toLocaleLowerCase() + ) + }, + 3000, + 1000 + ) + + const contractTmp = new ethers.Contract( + L1SequencerProxyAddress, + L1SequencerFactory.interface, + deployer, + ) + + await assertContractVariable( + contractTmp, + 'owner', + owner, + ) + + if (initialSequencer !== ethers.constants.AddressZero) { + await assertContractVariable( + contractTmp, + 'sequencer', + initialSequencer, + ) + console.log('L1SequencerProxy upgrade success, initial sequencer set:', initialSequencer) + } else { + console.log('L1SequencerProxy upgrade success (no initial sequencer set)') + } + } + return '' +} + +export default SequencerInit diff --git a/contracts/deploy/index.ts b/contracts/deploy/index.ts index 16f69f20b..a30ab90fb 100644 --- a/contracts/deploy/index.ts +++ b/contracts/deploy/index.ts @@ -10,6 +10,7 @@ import StakingInit from './018-StakingInit' import {AdminTransfer,AdminTransferByProxyStorageName, AdminTransferConcurrently} from './019-AdminTransfer' import ContractInit from './020-ContractInit' import StakingRegister from './021-StakingRegister' +import SequencerInit from './022-SequencerInit' export { @@ -28,5 +29,6 @@ export { AdminTransferByProxyStorageName, AdminTransferConcurrently, ContractInit, - StakingRegister + StakingRegister, + SequencerInit } \ No newline at end of file diff --git a/contracts/go.mod b/contracts/go.mod index 514d63ee1..1756505b0 100644 --- a/contracts/go.mod +++ b/contracts/go.mod @@ -2,11 +2,11 @@ module morph-l2/contract go 1.24.0 -replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.3 +replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.4-0.20260313040448-999449fd4d23 require ( github.com/iden3/go-iden3-crypto v0.0.16 - github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 + github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d github.com/stretchr/testify v1.10.0 ) diff --git a/contracts/go.sum b/contracts/go.sum index d0f44f830..b5618e721 100644 --- a/contracts/go.sum +++ b/contracts/go.sum @@ -138,8 +138,8 @@ github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqky github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 h1:A8eygErKU6WKMipGWIemzwLeYkIGLd9yb/Ry3x+J9PQ= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d h1:Qy3ytYw/PGnrPDAWen1MsMUhUXclk1F2Q36A07+bBv4= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= diff --git a/contracts/src/deploy-config/l1.ts b/contracts/src/deploy-config/l1.ts index 42cf90bbe..9bd23177a 100644 --- a/contracts/src/deploy-config/l1.ts +++ b/contracts/src/deploy-config/l1.ts @@ -23,11 +23,13 @@ const config = { finalizationPeriodSeconds: 10, rollupProofWindow: 86400, proofRewardPercent: 70, + rollupDelayPeriod: 86400, + // challenge config rollupProposer: '0x70997970C51812dc3A010C7d01b50e0d17dc79C8', rollupChallenger: '0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65', // genesis config - batchHeader: '0x0000000000000000000000000000000000000000000000000043a758882ae97327ffcc63373e26fcd144a5a738eac834c167175d69713780c0010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c444014000000000000000000000000000000000000000000000000000000000000000020cd420e20d610897b8f2c5ac5259ab8b57cce1074212cec2815b2b73ff93d9f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + batchHeader: '0x00000000000000000000000000000000000000000000000000886e14341b355178d11a2c9f985f60a1a195973078b688a11aeaebb0c95db595010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c44401400000000000000000000000000000000000000000000000000000000000000002d20dde82426d971e398b3cba11ebb60d0d740b799f85e2f95fd12a1faad8e2f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', // staking config // staking Cross-Chain config diff --git a/contracts/src/types.ts b/contracts/src/types.ts index ea2577c89..7e518addd 100644 --- a/contracts/src/types.ts +++ b/contracts/src/types.ts @@ -18,6 +18,8 @@ const ContractFactoryName = { MultipleVersionRollupVerifier: 'MultipleVersionRollupVerifier', // staking L1Staking: 'L1Staking', + // L1 sequencer + L1Sequencer: 'L1Sequencer', // gateway L1GatewayRouter: 'L1GatewayRouter', L1StandardERC20Gateway: 'L1StandardERC20Gateway', @@ -40,6 +42,8 @@ const ProxyStorageName = { RollupProxyStorageName: 'Proxy__Rollup', // staking L1StakingProxyStorageName: 'Proxy__L1Staking', + // L1 sequencer + L1SequencerProxyStorageName: 'Proxy__L1Sequencer', // gateway L1GatewayRouterProxyStorageName: 'Proxy__L1GatewayRouter', L1StandardERC20GatewayProxyStorageName: 'Proxy__L1StandardERC20Gateway', @@ -71,6 +75,8 @@ const ImplStorageName = { MultipleVersionRollupVerifierStorageName: 'Impl__MultipleVersionRollupVerifier', // staking L1StakingStorageName: 'Impl__L1Staking', + // L1 sequencer + L1SequencerStorageName: 'Impl__L1Sequencer', // gateway L1GatewayRouterStorageName: 'Impl__L1GatewayRouter', L1StandardERC20GatewayStorageName: 'Impl__L1StandardERC20Gateway', diff --git a/contracts/tasks/deploy.ts b/contracts/tasks/deploy.ts index 9ee6739a9..a7ea6bb6a 100644 --- a/contracts/tasks/deploy.ts +++ b/contracts/tasks/deploy.ts @@ -21,6 +21,7 @@ import { AdminTransferConcurrently, ContractInit, StakingRegister, + SequencerInit, } from '../deploy/index' import { ethers } from "ethers"; @@ -120,6 +121,12 @@ task("initialize") console.log('Staking init failed, err: ', err) return } + console.log('\n---------------------------------- Sequencer init ----------------------------------') + err = await SequencerInit(hre, storagePath, deployer, config) + if (err != '') { + console.log('Sequencer init failed, err: ', err) + return + } console.log('\n---------------------------------- Admin Transfer ----------------------------------') if (concurrent === 'true') { err = await AdminTransferConcurrently(hre, storagePath, deployer, config) diff --git a/go-ethereum b/go-ethereum index 4f0f6e6bd..e6c501a8d 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 4f0f6e6bd14178a19a6ef6e57bdcb48eb6612bf4 +Subproject commit e6c501a8d68020dade42a78da87ec2516de77a0e diff --git a/go.work.sum b/go.work.sum index 8c91e41ad..bb8413f95 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1015,6 +1015,12 @@ github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7P github.com/morph-l2/go-ethereum v1.10.14-0.20251125061742-69718a9dcab9/go.mod h1:tiFPeidxjoCmLj18ne9H3KQdIGTCvRC30qlef06Fd9M= github.com/morph-l2/go-ethereum v1.10.14-0.20260206063816-522b70a5f16f h1:e8gfduHc4AKlR0fD6J3HXveP2Gp4PMvN2UfA9CYEvEc= github.com/morph-l2/go-ethereum v1.10.14-0.20260206063816-522b70a5f16f/go.mod h1:tiFPeidxjoCmLj18ne9H3KQdIGTCvRC30qlef06Fd9M= +github.com/morph-l2/go-ethereum v1.10.14-0.20260227074910-324c53b65341 h1:kupvcg2mxi6WpWPMrGNRGHfpXhkz7IiORwE3kSExwDE= +github.com/morph-l2/go-ethereum v1.10.14-0.20260227074910-324c53b65341/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= +github.com/morph-l2/go-ethereum v1.10.14-0.20260303114154-29281e501802 h1:9gu7AklnN0a0+Fshc/lBvi/2OeatXaN38yqsJryvMRA= +github.com/morph-l2/go-ethereum v1.10.14-0.20260303114154-29281e501802/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= +github.com/morph-l2/tendermint v0.3.3-0.20260226075902-3692a2a2889c h1:CzaQ/rK3nrqylN8JVr2htAsnu2xlg4u99SjzudzxrpM= +github.com/morph-l2/tendermint v0.3.3-0.20260226075902-3692a2a2889c/go.mod h1:TtCzp9l6Z6yDUiwv3TbqKqw8Q8RKp3fSz5+adO1/Y8w= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= diff --git a/node/blocktag/config.go b/node/blocktag/config.go index 6c392ce0f..43c282800 100644 --- a/node/blocktag/config.go +++ b/node/blocktag/config.go @@ -20,7 +20,6 @@ const ( // Config holds the configuration for BlockTagService type Config struct { - L1Addr string RollupAddress common.Address SafeConfirmations uint64 PollInterval time.Duration @@ -36,8 +35,6 @@ func DefaultConfig() *Config { // SetCliContext sets the configuration from CLI context func (c *Config) SetCliContext(ctx *cli.Context) error { - c.L1Addr = ctx.GlobalString(flags.L1NodeAddr.Name) - // Determine RollupAddress: use explicit flag, or mainnet default, or error if ctx.GlobalBool(flags.MainnetFlag.Name) { c.RollupAddress = node.MainnetRollupContractAddress diff --git a/node/blocktag/service.go b/node/blocktag/service.go index 86f9b8d70..45f7ecda3 100644 --- a/node/blocktag/service.go +++ b/node/blocktag/service.go @@ -63,22 +63,18 @@ type BlockTagService struct { // NewBlockTagService creates a new BlockTagService func NewBlockTagService( ctx context.Context, + l1Client *ethclient.Client, l2Client *types.RetryableClient, config *Config, logger tmlog.Logger, ) (*BlockTagService, error) { - if config.L1Addr == "" { - return nil, fmt.Errorf("L1 RPC address is required") + if l1Client == nil { + return nil, fmt.Errorf("L1 client is required") } if config.RollupAddress == (common.Address{}) { return nil, fmt.Errorf("Rollup contract address is required") } - l1Client, err := ethclient.Dial(config.L1Addr) - if err != nil { - return nil, fmt.Errorf("failed to connect to L1: %w", err) - } - rollup, err := bindings.NewRollup(config.RollupAddress, l1Client) if err != nil { return nil, fmt.Errorf("failed to create rollup binding: %w", err) @@ -122,7 +118,6 @@ func (s *BlockTagService) Stop() { s.logger.Info("Stopping BlockTagService") s.cancel() <-s.stop - s.l1Client.Close() s.logger.Info("BlockTagService stopped") } diff --git a/node/cmd/node/main.go b/node/cmd/node/main.go index 2a71f2a28..b056588c3 100644 --- a/node/cmd/node/main.go +++ b/node/cmd/node/main.go @@ -6,11 +6,17 @@ import ( "os" "os/signal" "path/filepath" + "strings" "syscall" + "time" + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/crypto" "github.com/morph-l2/go-ethereum/ethclient" + tmlog "github.com/tendermint/tendermint/libs/log" tmnode "github.com/tendermint/tendermint/node" "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/upgrade" "github.com/urfave/cli" "morph-l2/bindings/bindings" @@ -20,6 +26,7 @@ import ( "morph-l2/node/db" "morph-l2/node/derivation" "morph-l2/node/flags" + "morph-l2/node/l1sequencer" "morph-l2/node/sequencer" "morph-l2/node/sequencer/mock" "morph-l2/node/sync" @@ -60,12 +67,20 @@ func L2NodeMain(ctx *cli.Context) error { tmNode *tmnode.Node dvNode *derivation.Derivation blockTagSvc *blocktag.BlockTagService + tracker *l1sequencer.L1Tracker + verifier *l1sequencer.SequencerVerifier + signer l1sequencer.Signer nodeConfig = node.DefaultConfig() ) isMockSequencer := ctx.GlobalBool(flags.MockEnabled.Name) isValidator := ctx.GlobalBool(flags.ValidatorEnable.Name) + // Apply consensus switch height if explicitly set via flag + if ctx.GlobalIsSet(flags.ConsensusSwitchHeight.Name) { + upgrade.SetUpgradeBlockHeight(ctx.GlobalInt64(flags.ConsensusSwitchHeight.Name)) + } + if err = nodeConfig.SetCliContext(ctx); err != nil { return err } @@ -118,14 +133,27 @@ func L2NodeMain(ctx *cli.Context) error { dvNode.Start() nodeConfig.Logger.Info("derivation node starting") } else { - // launch tendermint node + // ========== Create Syncer and L1 Sequencer Components ========== + syncer, err = node.NewSyncer(ctx, home, nodeConfig) + if err != nil { + return fmt.Errorf("failed to create syncer: %w", err) + } + + tracker, verifier, signer, err = initL1SequencerComponents(ctx, syncer.L1Client(), nodeConfig.Logger) + if err != nil { + return fmt.Errorf("failed to init L1 sequencer components: %w", err) + } + + // ========== Launch Tendermint Node ========== tmCfg, err := sequencer.LoadTmConfig(ctx, home) if err != nil { return err } tmVal := privval.LoadOrGenFilePV(tmCfg.PrivValidatorKeyFile(), tmCfg.PrivValidatorStateFile()) pubKey, _ := tmVal.GetPubKey() - newSyncerFunc := func() (*sync.Syncer, error) { return node.NewSyncer(ctx, home, nodeConfig) } + + // Create executor with syncer + newSyncerFunc := func() (*sync.Syncer, error) { return syncer, nil } // Reuse existing syncer executor, err = node.NewExecutor(newSyncerFunc, nodeConfig, pubKey) if err != nil { return err @@ -137,25 +165,19 @@ func L2NodeMain(ctx *cli.Context) error { } go ms.Start() } else { - if tmNode, err = sequencer.SetupNode(tmCfg, tmVal, executor, nodeConfig.Logger); err != nil { - return fmt.Errorf("failed to setup consensus node, error: %v", err) + tmNode, err = sequencer.SetupNode(tmCfg, tmVal, executor, nodeConfig.Logger, verifier, signer) + if err != nil { + return fmt.Errorf("failed to setup consensus node: %v", err) } if err = tmNode.Start(); err != nil { return fmt.Errorf("failed to start consensus node, error: %v", err) } } - // Start BlockTagService for sequencer mode - blockTagConfig := blocktag.DefaultConfig() - if err := blockTagConfig.SetCliContext(ctx); err != nil { - return fmt.Errorf("blocktag config set cli context error: %w", err) - } - blockTagSvc, err = blocktag.NewBlockTagService(context.Background(), executor.L2Client(), blockTagConfig, nodeConfig.Logger) + // ========== Initialize BlockTagService ========== + blockTagSvc, err = initBlockTagService(ctx, syncer.L1Client(), executor, nodeConfig.Logger) if err != nil { - return fmt.Errorf("failed to create BlockTagService: %w", err) - } - if err := blockTagSvc.Start(); err != nil { - return fmt.Errorf("failed to start BlockTagService: %w", err) + return fmt.Errorf("failed to init BlockTagService: %w", err) } } @@ -186,10 +208,102 @@ func L2NodeMain(ctx *cli.Context) error { if blockTagSvc != nil { blockTagSvc.Stop() } + if tracker != nil { + tracker.Stop() + } return nil } +// initL1SequencerComponents initializes all L1 sequencer related components: +// - L1Tracker: monitors L1 sync status +// - SequencerCache: caches L1 sequencer address (nil if contract not configured) +// - Signer: signs blocks (nil if private key not configured) +func initL1SequencerComponents( + ctx *cli.Context, + l1Client *ethclient.Client, + logger tmlog.Logger, +) (*l1sequencer.L1Tracker, *l1sequencer.SequencerVerifier, l1sequencer.Signer, error) { + if l1Client == nil { + return nil, nil, nil, fmt.Errorf("L1 client is required, check l1.rpc configuration") + } + + // Get config from flags + lagThreshold := ctx.GlobalDuration(flags.L1SyncLagThreshold.Name) + if lagThreshold == 0 { + lagThreshold = 5 * time.Minute // default + } + contractAddr := common.HexToAddress(ctx.GlobalString(flags.L1SequencerContractAddr.Name)) + seqPrivKeyHex := ctx.GlobalString(flags.SequencerPrivateKey.Name) + + // Initialize L1 Tracker + tracker := l1sequencer.NewL1Tracker(context.Background(), l1Client, lagThreshold, logger) + if err := tracker.Start(); err != nil { + return nil, nil, nil, fmt.Errorf("failed to start L1 tracker: %w", err) + } + logger.Info("L1 Tracker started", "lagThreshold", lagThreshold) + + // Initialize Sequencer Verifier (optional) + var verifier *l1sequencer.SequencerVerifier + if contractAddr != (common.Address{}) { + caller, err := bindings.NewL1SequencerCaller(contractAddr, l1Client) + if err != nil { + tracker.Stop() + return nil, nil, nil, fmt.Errorf("failed to create L1Sequencer caller: %w", err) + } + verifier = l1sequencer.NewSequencerVerifier(caller, logger) + logger.Info("Sequencer verifier initialized", "contract", contractAddr.Hex()) + } else { + logger.Info("L1 Sequencer contract not configured, verifier disabled") + } + + // Initialize Signer (optional) + var signer l1sequencer.Signer + if seqPrivKeyHex != "" { + seqPrivKeyHex = strings.TrimPrefix(seqPrivKeyHex, "0x") + privKey, err := crypto.HexToECDSA(seqPrivKeyHex) + if err != nil { + tracker.Stop() + return nil, nil, nil, fmt.Errorf("invalid sequencer private key: %w", err) + } + signer, err = l1sequencer.NewLocalSigner(privKey, verifier, logger) + if err != nil { + tracker.Stop() + return nil, nil, nil, err + } + logger.Info("Sequencer signer initialized", "address", signer.Address().Hex()) + } else { + logger.Info("Sequencer private key not configured, signer disabled") + } + + return tracker, verifier, signer, nil +} + +// initBlockTagService initializes the block tag service +func initBlockTagService( + ctx *cli.Context, + l1Client *ethclient.Client, + executor *node.Executor, + logger tmlog.Logger, +) (*blocktag.BlockTagService, error) { + config := blocktag.DefaultConfig() + if err := config.SetCliContext(ctx); err != nil { + return nil, err + } + + svc, err := blocktag.NewBlockTagService(context.Background(), l1Client, executor.L2Client(), config, logger) + if err != nil { + return nil, err + } + + if err := svc.Start(); err != nil { + return nil, err + } + + logger.Info("BlockTagService started") + return svc, nil +} + func homeDir(ctx *cli.Context) (string, error) { home := ctx.GlobalString(flags.Home.Name) if home == "" { diff --git a/node/core/batch.go b/node/core/batch.go index 9c851956e..167c3c2e5 100644 --- a/node/core/batch.go +++ b/node/core/batch.go @@ -178,14 +178,14 @@ func (e *Executor) CalculateCapWithProposalBlock(currentBlockBytes []byte, curre return false, err } - // MPT fork: force batch points on the 1st and 2nd post-fork blocks, so the 1st post-fork block + // Jade fork: force batch points on the 1st and 2nd post-fork blocks, so the 1st post-fork block // becomes a single-block batch: [H1, H2). - force, err := e.forceBatchPointForMPTFork(height, block.Timestamp, block.StateRoot, block.Hash) + force, err := e.forceBatchPointForJadeFork(height, block.Timestamp, block.StateRoot, block.Hash) if err != nil { return false, err } if force { - e.logger.Info("MPT fork: force batch point", "height", height, "timestamp", block.Timestamp) + e.logger.Info("Jade fork: force batch point", "height", height, "timestamp", block.Timestamp) return true, nil } @@ -198,27 +198,27 @@ func (e *Executor) CalculateCapWithProposalBlock(currentBlockBytes []byte, curre return exceeded, err } -// forceBatchPointForMPTFork forces batch points at the 1st and 2nd block after the MPT fork time. +// forceBatchPointForJadeFork forces batch points at the 1st and 2nd block after the Jade fork time. // // Design goals: // - Minimal change: only affects batch-point decision logic. // - Stability: CalculateCapWithProposalBlock can be called multiple times at the same height; return must be consistent. // - Performance: after handling (or skipping beyond) the fork boundary, no more HeaderByNumber calls are made. -func (e *Executor) forceBatchPointForMPTFork(height uint64, blockTime uint64, stateRoot common.Hash, blockHash common.Hash) (bool, error) { +func (e *Executor) forceBatchPointForJadeFork(height uint64, blockTime uint64, stateRoot common.Hash, blockHash common.Hash) (bool, error) { // If we already decided to force at this height, keep returning true without extra RPCs. - if e.mptForkForceHeight == height && height != 0 { + if e.jadeForkForceHeight == height && height != 0 { return true, nil } // If fork boundary is already handled and this isn't a forced height, fast exit. - if e.mptForkStage >= 2 { + if e.jadeForkStage >= 2 { return false, nil } // Ensure we have fork time cached (0 means disabled). - if e.mptForkTime == 0 { - e.mptForkTime = e.l2Client.MPTForkTime() + if e.jadeForkTime == 0 { + e.jadeForkTime = e.l2Client.JadeForkTime() } - forkTime := e.mptForkTime + forkTime := e.jadeForkTime if forkTime == 0 || blockTime < forkTime { return false, nil } @@ -234,24 +234,24 @@ func (e *Executor) forceBatchPointForMPTFork(height uint64, blockTime uint64, st if parent.Time < forkTime { // Log H1 (the 1st post-fork block) state root // This stateRoot is intended to be used as the Rollup contract "genesis state root" - // when we reset/re-initialize the genesis state root during the MPT upgrade. + // when we reset/re-initialize the genesis state root during the Jade upgrade. e.logger.Info( - "MPT_FORK_H1_GENESIS_STATE_ROOT", + "JADE_FORK_H1_GENESIS_STATE_ROOT", "height", height, "timestamp", blockTime, "forkTime", forkTime, "stateRoot", stateRoot.Hex(), "blockHash", blockHash.Hex(), ) - e.mptForkStage = 1 - e.mptForkForceHeight = height + e.jadeForkStage = 1 + e.jadeForkForceHeight = height return true, nil } // If parent is already post-fork, we may be at the 2nd post-fork block (H2) or later. if height < 2 { // We cannot be H2; mark done to avoid future calls. - e.mptForkStage = 2 + e.jadeForkStage = 2 return false, nil } @@ -261,13 +261,13 @@ func (e *Executor) forceBatchPointForMPTFork(height uint64, blockTime uint64, st } if grandParent.Time < forkTime { // This is H2 (2nd post-fork block). - e.mptForkStage = 2 - e.mptForkForceHeight = height + e.jadeForkStage = 2 + e.jadeForkForceHeight = height return true, nil } // Beyond H2: nothing to do (can't retroactively fix). Mark done for performance. - e.mptForkStage = 2 + e.jadeForkStage = 2 return false, nil } diff --git a/node/core/executor.go b/node/core/executor.go index 52d30125d..08f20bf17 100644 --- a/node/core/executor.go +++ b/node/core/executor.go @@ -56,12 +56,12 @@ type Executor struct { rollupABI *abi.ABI batchingCache *BatchingCache - // MPT fork handling: force batch points at the 1st and 2nd block after fork. + // Jade fork handling: force batch points at the 1st and 2nd block after fork. // This state machine exists to avoid repeated HeaderByNumber calls after the fork is handled, // while keeping results stable if CalculateCapWithProposalBlock is called multiple times at the same height. - mptForkTime uint64 // cached from geth eth_config.morph.mptForkTime (0 means disabled/unknown) - mptForkStage uint8 // 0: not handled, 1: forced H1, 2: done (forced H2 or skipped beyond H2) - mptForkForceHeight uint64 // if equals current height, must return true (stability across multiple calls) + jadeForkTime uint64 // cached from geth eth_config.morph.jadeForkTime (0 means disabled/unknown) + jadeForkStage uint8 // 0: not handled, 1: forced H1, 2: done (forced H2 or skipped beyond H2) + jadeForkForceHeight uint64 // if equals current height, must return true (stability across multiple calls) logger tmlog.Logger metrics *Metrics @@ -155,7 +155,7 @@ func NewExecutor(newSyncFunc NewSyncerFunc, config *Config, tmPubKey crypto.PubK batchingCache: NewBatchingCache(), UpgradeBatchTime: config.UpgradeBatchTime, blsKeyCheckForkHeight: config.BlsKeyCheckForkHeight, - mptForkTime: l2Client.MPTForkTime(), + jadeForkTime: l2Client.JadeForkTime(), logger: logger, metrics: PrometheusMetrics("morphnode"), } @@ -482,3 +482,202 @@ func (e *Executor) getParamsAndValsAtHeight(height int64) (*tmproto.BatchParams, func (e *Executor) L2Client() *types.RetryableClient { return e.l2Client } + +// ============================================================================ +// L2NodeV2 interface implementation for sequencer mode +// ============================================================================ + +// RequestBlockDataV2 requests block data based on parent hash. +// This differs from RequestBlockData which uses height. +// Using parent hash allows for proper fork chain handling in the future. +func (e *Executor) RequestBlockDataV2(parentHashBytes []byte) (*l2node.BlockV2, bool, error) { + if e.l1MsgReader == nil { + return nil, false, fmt.Errorf("RequestBlockDataV2 is not allowed to be called") + } + parentHash := common.BytesToHash(parentHashBytes) + + // Read L1 messages + fromIndex := e.nextL1MsgIndex + l1Messages := e.l1MsgReader.ReadL1MessagesInRange(fromIndex, fromIndex+e.maxL1MsgNumPerBlock-1) + transactions := make(eth.Transactions, len(l1Messages)) + + collectedL1Msgs := false + if len(l1Messages) > 0 { + queueIndex := fromIndex + for i, l1Message := range l1Messages { + transaction := eth.NewTx(&l1Message.L1MessageTx) + transactions[i] = transaction + if queueIndex != l1Message.QueueIndex { + e.logger.Error("unexpected l1message queue index", "expected", queueIndex, "actual", l1Message.QueueIndex) + return nil, false, types.ErrInvalidL1MessageOrder + } + queueIndex++ + } + collectedL1Msgs = true + } + + // Call geth to assemble block based on parent hash + l2Block, err := e.l2Client.AssembleL2BlockV2(context.Background(), parentHash, transactions) + if err != nil { + e.logger.Error("failed to assemble block v2", "parentHash", parentHash.Hex(), "error", err) + return nil, false, err + } + + e.logger.Info("AssembleL2BlockV2 success ", + "number", l2Block.Number, + "hash", l2Block.Hash.Hex(), + "parentHash", l2Block.ParentHash.Hex(), + "tx length", len(l2Block.Transactions), + "collectedL1Msgs", collectedL1Msgs, + ) + + return executableL2DataToBlockV2(l2Block), collectedL1Msgs, nil +} + +// ApplyBlockV2 applies a block to the L2 execution layer. +// This is used in sequencer mode after block validation. +func (e *Executor) ApplyBlockV2(block *l2node.BlockV2) error { + // Convert BlockV2 to ExecutableL2Data for geth + execBlock := blockV2ToExecutableL2Data(block) + + // Check if block is already applied + height, err := e.l2Client.BlockNumber(context.Background()) + if err != nil { + return err + } + + if execBlock.Number <= height { + e.logger.Info("ignore it, the block was already applied", "block number", execBlock.Number) + return nil + } + + // We only accept continuous blocks + if execBlock.Number > height+1 { + return types.ErrWrongBlockNumber + } + + // Apply the block (no batch hash in sequencer mode for now) + err = e.l2Client.NewL2Block(context.Background(), execBlock, nil) + if err != nil { + e.logger.Error("failed to apply block v2", "error", err) + return err + } + + // Update L1 message index + e.updateNextL1MessageIndex(execBlock) + + e.metrics.Height.Set(float64(execBlock.Number)) + e.logger.Info("ApplyBlockV2 success", "number", execBlock.Number, "hash", execBlock.Hash.Hex()) + + return nil +} + +// GetBlockByNumber retrieves a block by its number from the L2 execution layer. +// Uses standard eth_getBlockByNumber JSON-RPC. +func (e *Executor) GetBlockByNumber(height uint64) (*l2node.BlockV2, error) { + block, err := e.l2Client.BlockByNumber(context.Background(), big.NewInt(int64(height))) + if err != nil { + e.logger.Error("failed to get block by number", "height", height, "error", err) + return nil, err + } + return ethBlockToBlockV2(block) +} + +// GetLatestBlockV2 returns the latest block from the L2 execution layer. +// Uses standard eth_getBlockByNumber JSON-RPC with nil (latest). +func (e *Executor) GetLatestBlockV2() (*l2node.BlockV2, error) { + block, err := e.l2Client.BlockByNumber(context.Background(), nil) + if err != nil { + e.logger.Error("failed to get latest block", "error", err) + return nil, err + } + return ethBlockToBlockV2(block) +} + +// ==================== Type Conversion Functions ==================== + +// ethBlockToBlockV2 converts eth.Block to BlockV2 +func ethBlockToBlockV2(block *eth.Block) (*l2node.BlockV2, error) { + if block == nil { + return nil, fmt.Errorf("block is nil") + } + header := block.Header() + + // Encode transactions using MarshalBinary (handles EIP-2718 typed transactions correctly) + // Initialize as empty slice (not nil) to ensure JSON serialization produces [] instead of null + txs := make([][]byte, 0, len(block.Transactions())) + for _, tx := range block.Transactions() { + bz, err := tx.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("failed to marshal tx, error: %v", err) + } + txs = append(txs, bz) + } + + return &l2node.BlockV2{ + ParentHash: header.ParentHash, + Miner: header.Coinbase, + Number: header.Number.Uint64(), + GasLimit: header.GasLimit, + BaseFee: header.BaseFee, + Timestamp: header.Time, + Transactions: txs, + StateRoot: header.Root, + GasUsed: header.GasUsed, + ReceiptRoot: header.ReceiptHash, + LogsBloom: header.Bloom.Bytes(), + NextL1MessageIndex: header.NextL1MsgIndex, + Hash: block.Hash(), + }, nil +} + +// blockV2ToExecutableL2Data converts BlockV2 to ExecutableL2Data +func blockV2ToExecutableL2Data(block *l2node.BlockV2) *catalyst.ExecutableL2Data { + if block == nil { + return nil + } + // Ensure Transactions is not nil (JSON requires [] not null) + txs := block.Transactions + if txs == nil { + txs = make([][]byte, 0) + } + return &catalyst.ExecutableL2Data{ + ParentHash: block.ParentHash, + Miner: block.Miner, + Number: block.Number, + GasLimit: block.GasLimit, + BaseFee: block.BaseFee, + Timestamp: block.Timestamp, + Transactions: txs, + StateRoot: block.StateRoot, + GasUsed: block.GasUsed, + ReceiptRoot: block.ReceiptRoot, + LogsBloom: block.LogsBloom, + WithdrawTrieRoot: block.WithdrawTrieRoot, + NextL1MessageIndex: block.NextL1MessageIndex, + Hash: block.Hash, + } +} + +// executableL2DataToBlockV2 converts ExecutableL2Data to BlockV2 +func executableL2DataToBlockV2(data *catalyst.ExecutableL2Data) *l2node.BlockV2 { + if data == nil { + return nil + } + return &l2node.BlockV2{ + ParentHash: data.ParentHash, + Miner: data.Miner, + Number: data.Number, + GasLimit: data.GasLimit, + BaseFee: data.BaseFee, + Timestamp: data.Timestamp, + Transactions: data.Transactions, + StateRoot: data.StateRoot, + GasUsed: data.GasUsed, + ReceiptRoot: data.ReceiptRoot, + LogsBloom: data.LogsBloom, + WithdrawTrieRoot: data.WithdrawTrieRoot, + NextL1MessageIndex: data.NextL1MessageIndex, + Hash: data.Hash, + } +} diff --git a/node/flags/flags.go b/node/flags/flags.go index 2c00f4a87..6d37943fe 100644 --- a/node/flags/flags.go +++ b/node/flags/flags.go @@ -1,6 +1,10 @@ package flags -import "github.com/urfave/cli" +import ( + "time" + + "github.com/urfave/cli" +) const envVarPrefix = "MORPH_NODE_" @@ -240,6 +244,27 @@ var ( Value: 10, } + // L1 Sequencer options + L1SequencerContractAddr = cli.StringFlag{ + Name: "l1.sequencerContract", + Usage: "L1 Sequencer contract address for signature verification", + EnvVar: prefixEnvVar("L1_SEQUENCER_CONTRACT"), + } + + L1SyncLagThreshold = cli.DurationFlag{ + Name: "l1.syncLagThreshold", + Usage: "L1 sync lag threshold for warning logs", + EnvVar: prefixEnvVar("L1_SYNC_LAG_THRESHOLD"), + Value: 5 * time.Minute, + } + + // Sequencer private key for block signing (hex encoded, without 0x prefix) + SequencerPrivateKey = cli.StringFlag{ + Name: "sequencer.privateKey", + Usage: "Sequencer private key for block signing (hex encoded)", + EnvVar: prefixEnvVar("SEQUENCER_PRIVATE_KEY"), + } + // Batch rules UpgradeBatchTime = cli.Uint64Flag{ Name: "upgrade.batchTime", @@ -251,6 +276,14 @@ var ( Usage: "Morph mainnet", } + // for test + ConsensusSwitchHeight = cli.Int64Flag{ + Name: "consensus.switchHeight", + Usage: "Block height at which the consensus switches to sequencer mode. Default -1 means upgrade disabled.", + EnvVar: prefixEnvVar("CONSENSUS_SWITCH_HEIGHT"), + Value: -1, + } + DerivationConfirmations = cli.Int64Flag{ Name: "derivation.confirmations", Usage: "The number of confirmations needed on L1 for finalization. If not set, the default value is l1.confirmations", @@ -369,6 +402,14 @@ var Flags = []cli.Flag{ // blocktag options BlockTagSafeConfirmations, + // L1 Sequencer options + L1SequencerContractAddr, + L1SyncLagThreshold, + SequencerPrivateKey, + + // consensus + ConsensusSwitchHeight, + // batch rules UpgradeBatchTime, MainnetFlag, diff --git a/node/go.mod b/node/go.mod index d8c7bff2e..c5385a70a 100644 --- a/node/go.mod +++ b/node/go.mod @@ -2,7 +2,7 @@ module morph-l2/node go 1.24.0 -replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.3 +replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.4-0.20260313040448-999449fd4d23 require ( github.com/cenkalti/backoff/v4 v4.1.3 @@ -11,7 +11,7 @@ require ( github.com/hashicorp/golang-lru v1.0.2 github.com/holiman/uint256 v1.2.4 github.com/klauspost/compress v1.17.9 - github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 + github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d github.com/prometheus/client_golang v1.17.0 github.com/spf13/viper v1.13.0 github.com/stretchr/testify v1.10.0 diff --git a/node/go.sum b/node/go.sum index dda7b959d..1939f5df4 100644 --- a/node/go.sum +++ b/node/go.sum @@ -361,10 +361,10 @@ github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqky github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 h1:A8eygErKU6WKMipGWIemzwLeYkIGLd9yb/Ry3x+J9PQ= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= -github.com/morph-l2/tendermint v0.3.3 h1:zsmzVJfKp+NuCr45ZUUY2ZJjnHAVLzwJLID6GxBR4i4= -github.com/morph-l2/tendermint v0.3.3/go.mod h1:TtCzp9l6Z6yDUiwv3TbqKqw8Q8RKp3fSz5+adO1/Y8w= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d h1:Qy3ytYw/PGnrPDAWen1MsMUhUXclk1F2Q36A07+bBv4= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= +github.com/morph-l2/tendermint v0.3.4-0.20260313040448-999449fd4d23 h1:YrxdcmetkysTg+WiHroyDgVKfuXNonp4HkTBAVkOY6w= +github.com/morph-l2/tendermint v0.3.4-0.20260313040448-999449fd4d23/go.mod h1:TtCzp9l6Z6yDUiwv3TbqKqw8Q8RKp3fSz5+adO1/Y8w= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= diff --git a/node/l1sequencer/signer.go b/node/l1sequencer/signer.go new file mode 100644 index 000000000..f03901ae3 --- /dev/null +++ b/node/l1sequencer/signer.go @@ -0,0 +1,71 @@ +package l1sequencer + +import ( + "context" + "crypto/ecdsa" + "fmt" + + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/crypto" + tmlog "github.com/tendermint/tendermint/libs/log" +) + +// Signer manages sequencer identity and signing capabilities. +// It abstracts the private key management, allowing for local key storage +// or remote signing services (e.g., HSM, KMS) in the future. +type Signer interface { + // Sign signs data with the sequencer's private key + Sign(data []byte) ([]byte, error) + + // Address returns the sequencer's address + Address() common.Address + + // IsActiveSequencer checks if this signer is the current L1 sequencer + IsActiveSequencer(ctx context.Context) (bool, error) +} + +// LocalSigner implements Signer with a local private key +type LocalSigner struct { + privKey *ecdsa.PrivateKey + address common.Address + verifier *SequencerVerifier + logger tmlog.Logger +} + +// NewLocalSigner creates a new LocalSigner with a local private key +func NewLocalSigner(privKey *ecdsa.PrivateKey, verifier *SequencerVerifier, logger tmlog.Logger) (*LocalSigner, error) { + if privKey == nil { + return nil, fmt.Errorf("private key is required") + } + + address := crypto.PubkeyToAddress(privKey.PublicKey) + + return &LocalSigner{ + privKey: privKey, + address: address, + verifier: verifier, + logger: logger.With("module", "signer"), + }, nil +} + +// Sign signs data with the sequencer's private key +func (s *LocalSigner) Sign(data []byte) ([]byte, error) { + signature, err := crypto.Sign(data, s.privKey) + if err != nil { + return nil, fmt.Errorf("failed to sign: %w", err) + } + return signature, nil +} + +// Address returns the sequencer's address +func (s *LocalSigner) Address() common.Address { + return s.address +} + +// IsActiveSequencer checks if this signer is the current L1 sequencer +func (s *LocalSigner) IsActiveSequencer(ctx context.Context) (bool, error) { + if s.verifier == nil { + return false, fmt.Errorf("sequencer verifier not set") + } + return s.verifier.IsSequencer(ctx, s.address) +} diff --git a/node/l1sequencer/tracker.go b/node/l1sequencer/tracker.go new file mode 100644 index 000000000..d8ea3b8c0 --- /dev/null +++ b/node/l1sequencer/tracker.go @@ -0,0 +1,86 @@ +package l1sequencer + +import ( + "context" + "time" + + "github.com/morph-l2/go-ethereum/ethclient" + tmlog "github.com/tendermint/tendermint/libs/log" +) + +// L1Tracker monitors L1 RPC sync status and logs warnings if behind. +// It runs as an independent service. +type L1Tracker struct { + ctx context.Context + cancel context.CancelFunc + l1Client *ethclient.Client + lagThreshold time.Duration + logger tmlog.Logger + stop chan struct{} +} + +// NewL1Tracker creates a new L1Tracker +func NewL1Tracker( + ctx context.Context, + l1Client *ethclient.Client, + lagThreshold time.Duration, + logger tmlog.Logger, +) *L1Tracker { + ctx, cancel := context.WithCancel(ctx) + return &L1Tracker{ + ctx: ctx, + cancel: cancel, + l1Client: l1Client, + lagThreshold: lagThreshold, + logger: logger.With("module", "l1tracker"), + stop: make(chan struct{}), + } +} + +// Start starts the L1Tracker +func (t *L1Tracker) Start() error { + t.logger.Info("Starting L1Tracker", "lagThreshold", t.lagThreshold) + go t.loop() + return nil +} + +// Stop stops the L1Tracker +func (t *L1Tracker) Stop() { + t.logger.Info("Stopping L1Tracker") + t.cancel() + <-t.stop +} + +func (t *L1Tracker) loop() { + defer close(t.stop) + + ticker := time.NewTicker(1 * time.Minute) + defer ticker.Stop() + + for { + select { + case <-t.ctx.Done(): + return + case <-ticker.C: + t.checkL1SyncLag() + } + } +} + +func (t *L1Tracker) checkL1SyncLag() { + header, err := t.l1Client.HeaderByNumber(t.ctx, nil) + if err != nil { + t.logger.Error("Failed to get L1 header", "error", err) + return + } + + blockTime := time.Unix(int64(header.Time), 0) + lag := time.Since(blockTime) + if lag > t.lagThreshold { + t.logger.Error("L1 RPC is behind", + "latestBlock", header.Number.Uint64(), + "blockTime", blockTime.Format(time.RFC3339), + "lag", lag.Round(time.Second), + ) + } +} diff --git a/node/l1sequencer/verifier.go b/node/l1sequencer/verifier.go new file mode 100644 index 000000000..1cbf8517a --- /dev/null +++ b/node/l1sequencer/verifier.go @@ -0,0 +1,97 @@ +package l1sequencer + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/morph-l2/go-ethereum/accounts/abi/bind" + "github.com/morph-l2/go-ethereum/common" + tmlog "github.com/tendermint/tendermint/libs/log" + + "morph-l2/bindings/bindings" +) + +const ( + // CacheTTL is the time-to-live for the sequencer verifier cache + //CacheTTL = 30 * time.Minute + CacheTTL = 10 * time.Second +) + +// SequencerVerifier verifies L1 sequencer status with caching. +// It provides IsSequencer() for checking if an address is the current sequencer. +type SequencerVerifier struct { + mutex sync.Mutex + sequencer common.Address + cacheExpiry time.Time + + caller *bindings.L1SequencerCaller + logger tmlog.Logger +} + +// NewSequencerVerifier creates a new SequencerVerifier +func NewSequencerVerifier(caller *bindings.L1SequencerCaller, logger tmlog.Logger) *SequencerVerifier { + return &SequencerVerifier{ + caller: caller, + logger: logger.With("module", "l1sequencer_verifier"), + } +} + +// flushCache refreshes the cache (caller must hold the lock) +func (c *SequencerVerifier) flushCache(ctx context.Context) error { + newSeq, err := c.caller.GetSequencer(&bind.CallOpts{Context: ctx}) + if err != nil { + return fmt.Errorf("failed to get sequencer from L1: %w", err) + } + + if c.sequencer != newSeq { + c.logger.Info("Sequencer address updated", + "old", c.sequencer.Hex(), + "new", newSeq.Hex()) + } + + c.sequencer = newSeq + c.cacheExpiry = time.Now().Add(CacheTTL) + return nil +} + +// IsSequencer checks if the given address is the current sequencer. +// It uses lazy loading: refreshes cache if expired, and retries on miss. +func (c *SequencerVerifier) IsSequencer(ctx context.Context, addr common.Address) (bool, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + + // Cache expired, refresh + if time.Now().After(c.cacheExpiry) { + if err := c.flushCache(ctx); err != nil { + return false, err + } + } + + // Cache hit + if c.sequencer == addr { + return true, nil + } + + // Cache miss - maybe sequencer just updated, force refresh once + if err := c.flushCache(ctx); err != nil { + return false, err + } + + return c.sequencer == addr, nil +} + +// GetSequencer returns the cached sequencer address (refreshes if expired) +func (c *SequencerVerifier) GetSequencer(ctx context.Context) (common.Address, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + + if time.Now().After(c.cacheExpiry) { + if err := c.flushCache(ctx); err != nil { + return common.Address{}, err + } + } + + return c.sequencer, nil +} diff --git a/node/sequencer/tm_node.go b/node/sequencer/tm_node.go index 6f6bf994c..b9cbffe38 100644 --- a/node/sequencer/tm_node.go +++ b/node/sequencer/tm_node.go @@ -14,11 +14,13 @@ import ( tmnode "github.com/tendermint/tendermint/node" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" + tmsequencer "github.com/tendermint/tendermint/sequencer" "github.com/tendermint/tendermint/types" "github.com/urfave/cli" node "morph-l2/node/core" "morph-l2/node/flags" + "morph-l2/node/l1sequencer" nodetypes "morph-l2/node/types" ) @@ -51,7 +53,17 @@ func LoadTmConfig(ctx *cli.Context, home string) (*config.Config, error) { return tmCfg, nil } -func SetupNode(tmCfg *config.Config, privValidator types.PrivValidator, executor *node.Executor, logger tmlog.Logger) (*tmnode.Node, error) { +// SetupNode creates a tendermint node with the given configuration. +// verifier: L1 sequencer verifier for signature verification (optional, can be nil) +// signer: sequencer signer for block signing (optional, can be nil) +func SetupNode( + tmCfg *config.Config, + privValidator types.PrivValidator, + executor *node.Executor, + logger tmlog.Logger, + verifier *l1sequencer.SequencerVerifier, + signer l1sequencer.Signer, +) (*tmnode.Node, error) { nodeLogger := logger.With("module", "main") nodeKey, err := p2p.LoadOrGenNodeKey(tmCfg.NodeKeyFile()) @@ -67,7 +79,12 @@ func SetupNode(tmCfg *config.Config, privValidator types.PrivValidator, executor return nil, fmt.Errorf("failed to load bls priv key") } - //var app types.Application + // Build verifier (SequencerVerifier implements tmsequencer.SequencerVerifier interface) + var tmVerifier tmsequencer.SequencerVerifier + if verifier != nil { + tmVerifier = verifier + } + n, err := tmnode.NewNode( tmCfg, executor, @@ -79,6 +96,8 @@ func SetupNode(tmCfg *config.Config, privValidator types.PrivValidator, executor tmnode.DefaultDBProvider, tmnode.DefaultMetricsProvider(tmCfg.Instrumentation), nodeLogger, + tmVerifier, + signer, ) return n, err } diff --git a/node/sync/bridge_client.go b/node/sync/bridge_client.go index bab51c45e..e8a73abf3 100644 --- a/node/sync/bridge_client.go +++ b/node/sync/bridge_client.go @@ -24,6 +24,11 @@ type BridgeClient struct { logger tmlog.Logger } +// L1Client returns the underlying L1 client (for sharing with other services) +func (c *BridgeClient) L1Client() *ethclient.Client { + return c.l1Client +} + func NewBridgeClient(l1Client *ethclient.Client, l1MessageQueueAddress common.Address, confirmations rpc.BlockNumber, logger tmlog.Logger) (*BridgeClient, error) { logger = logger.With("module", "bridge") filter, err := bindings.NewL1MessageQueueWithGasPriceOracleFilterer(l1MessageQueueAddress, l1Client) diff --git a/node/sync/syncer.go b/node/sync/syncer.go index 9c109ac06..c9948983a 100644 --- a/node/sync/syncer.go +++ b/node/sync/syncer.go @@ -196,3 +196,11 @@ func (s *Syncer) ReadL1MessagesInRange(start, end uint64) []types.L1Message { func (s *Syncer) LatestSynced() uint64 { return s.latestSynced } + +// L1Client returns the underlying L1 client (for sharing with other services) +func (s *Syncer) L1Client() *ethclient.Client { + if s.bridgeClient != nil { + return s.bridgeClient.L1Client() + } + return nil +} diff --git a/node/types/blob.go b/node/types/blob.go index d4aea96fb..49ac5dc64 100644 --- a/node/types/blob.go +++ b/node/types/blob.go @@ -153,11 +153,11 @@ func DecodeTxsFromBytes(txsBytes []byte) (eth.Transactions, error) { return nil, err } innerTx = new(eth.SetCodeTx) - case eth.AltFeeTxType: + case eth.MorphTxType: if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil { return nil, err } - innerTx = new(eth.AltFeeTx) + innerTx = new(eth.MorphTx) default: if firstByte <= 0xf7 { // legacy tx first byte must be greater than 0xf7(247) return nil, fmt.Errorf("not supported tx type: %d", firstByte) diff --git a/node/types/retryable_client.go b/node/types/retryable_client.go index 899c3be12..177af13d8 100644 --- a/node/types/retryable_client.go +++ b/node/types/retryable_client.go @@ -54,8 +54,8 @@ type forkConfig struct { // morphExtension contains Morph-specific configuration fields type morphExtension struct { - UseZktrie bool `json:"useZktrie"` - MPTForkTime *uint64 `json:"mptForkTime,omitempty"` + UseZktrie bool `json:"useZktrie"` + JadeForkTime *uint64 `json:"jadeForkTime,omitempty"` } // GethConfig holds the configuration fetched from geth via eth_config API @@ -105,28 +105,28 @@ func FetchGethConfig(rpcURL string, logger tmlog.Logger) (*GethConfig, error) { logger.Info("Fetched useZktrie from geth", "useZktrie", config.UseZktrie) } - // Try to get mptForkTime from current config - if resp.Current != nil && resp.Current.Morph != nil && resp.Current.Morph.MPTForkTime != nil { - config.SwitchTime = *resp.Current.Morph.MPTForkTime - logger.Info("Fetched MPT fork time from geth", "mptForkTime", config.SwitchTime, "source", "current") + // Try to get jadeForkTime from current config + if resp.Current != nil && resp.Current.Morph != nil && resp.Current.Morph.JadeForkTime != nil { + config.SwitchTime = *resp.Current.Morph.JadeForkTime + logger.Info("Fetched Jade fork time from geth", "jadeForkTime", config.SwitchTime, "source", "current") return config, nil } // Fallback to next config - if resp.Next != nil && resp.Next.Morph != nil && resp.Next.Morph.MPTForkTime != nil { - config.SwitchTime = *resp.Next.Morph.MPTForkTime - logger.Info("Fetched MPT fork time from geth", "mptForkTime", config.SwitchTime, "source", "next") + if resp.Next != nil && resp.Next.Morph != nil && resp.Next.Morph.JadeForkTime != nil { + config.SwitchTime = *resp.Next.Morph.JadeForkTime + logger.Info("Fetched Jade fork time from geth", "jadeForkTime", config.SwitchTime, "source", "next") return config, nil } // Fallback to last config - if resp.Last != nil && resp.Last.Morph != nil && resp.Last.Morph.MPTForkTime != nil { - config.SwitchTime = *resp.Last.Morph.MPTForkTime - logger.Info("Fetched MPT fork time from geth", "mptForkTime", config.SwitchTime, "source", "last") + if resp.Last != nil && resp.Last.Morph != nil && resp.Last.Morph.JadeForkTime != nil { + config.SwitchTime = *resp.Last.Morph.JadeForkTime + logger.Info("Fetched Jade fork time from geth", "jadeForkTime", config.SwitchTime, "source", "last") return config, nil } - logger.Info("MPT fork time not configured in geth, switch disabled") + logger.Info("Jade fork time not configured in geth, switch disabled") return config, nil } @@ -141,9 +141,9 @@ type RetryableClient struct { logger tmlog.Logger } -// MPTForkTime returns the configured MPT fork/switch timestamp fetched from geth (eth_config). +// JadeForkTime returns the configured Jade fork/switch timestamp fetched from geth (eth_config). // Note: this is a local value stored in the client; it does not perform any RPC. -func (rc *RetryableClient) MPTForkTime() uint64 { +func (rc *RetryableClient) JadeForkTime() uint64 { return rc.switchTime } @@ -428,7 +428,25 @@ func (rc *RetryableClient) HeaderByNumber(ctx context.Context, blockNumber *big. if retryErr := backoff.Retry(func() error { resp, respErr := rc.eClient().HeaderByNumber(ctx, blockNumber) if respErr != nil { - rc.logger.Info("failed to call BlockNumber", "error", respErr) + rc.logger.Info("failed to call HeaderByNumber", "error", respErr) + if retryableError(respErr) { + return respErr + } + err = respErr + } + ret = resp + return nil + }, rc.b); retryErr != nil { + return nil, retryErr + } + return +} + +func (rc *RetryableClient) BlockByNumber(ctx context.Context, blockNumber *big.Int) (ret *eth.Block, err error) { + if retryErr := backoff.Retry(func() error { + resp, respErr := rc.ethClient.BlockByNumber(ctx, blockNumber) + if respErr != nil { + rc.logger.Info("failed to call BlockByNumber", "error", respErr) if retryableError(respErr) { return respErr } @@ -506,3 +524,27 @@ func retryableError(err error) bool { // strings.Contains(err.Error(), Timeout) return !strings.Contains(err.Error(), DiscontinuousBlockError) } + +// ============================================================================ +// L2NodeV2 methods for sequencer mode +// ============================================================================ + +// AssembleL2BlockV2 assembles a L2 block based on parent hash. +func (rc *RetryableClient) AssembleL2BlockV2(ctx context.Context, parentHash common.Hash, transactions eth.Transactions) (ret *catalyst.ExecutableL2Data, err error) { + timestamp := uint64(time.Now().Unix()) + if retryErr := backoff.Retry(func() error { + resp, respErr := rc.authClient.AssembleL2BlockV2(ctx, parentHash, ×tamp, transactions) + if respErr != nil { + rc.logger.Info("failed to AssembleL2BlockV2", "error", respErr) + if retryableError(respErr) { + return respErr + } + err = respErr + } + ret = resp + return nil + }, rc.b); retryErr != nil { + return nil, retryErr + } + return +} diff --git a/ops/devnet-morph/devnet/__init__.py b/ops/devnet-morph/devnet/__init__.py index 4ec81c897..385a7a2a3 100644 --- a/ops/devnet-morph/devnet/__init__.py +++ b/ops/devnet-morph/devnet/__init__.py @@ -248,6 +248,7 @@ def devnet_deploy(paths, args): env_data['MORPH_ROLLUP'] = addresses['Proxy__Rollup'] env_data['RUST_LOG'] = rust_log_level env_data['Proxy__L1Staking'] = addresses['Proxy__L1Staking'] + env_data['L1_SEQUENCER_CONTRACT'] = addresses.get('Proxy__L1Sequencer', '') envfile.seek(0) for key, value in env_data.items(): envfile.write(f'{key}={value}\n') diff --git a/ops/docker-sequencer-test/Dockerfile.l2-geth-test b/ops/docker-sequencer-test/Dockerfile.l2-geth-test new file mode 100644 index 000000000..1c053f44b --- /dev/null +++ b/ops/docker-sequencer-test/Dockerfile.l2-geth-test @@ -0,0 +1,26 @@ +# Build Geth for Sequencer Test +# Build context should be bitget/ (parent of morph) +FROM ghcr.io/morph-l2/go-ubuntu-builder:go-1.24-ubuntu AS builder + +# Copy local go-ethereum (not submodule) +COPY ./go-ethereum /go-ethereum +WORKDIR /go-ethereum + +# Build geth +RUN go run build/ci.go install ./cmd/geth + +# Runtime stage +FROM ghcr.io/morph-l2/go-ubuntu-builder:go-1.24-ubuntu + +RUN apt-get -qq update && apt-get -qq install -y --no-install-recommends \ + ca-certificates bash curl \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ +COPY ./morph/ops/docker-sequencer-test/entrypoint-l2.sh /entrypoint.sh + +VOLUME ["/db"] + +ENTRYPOINT ["/bin/bash", "/entrypoint.sh"] + +EXPOSE 8545 8546 8551 30303 30303/udp diff --git a/ops/docker-sequencer-test/Dockerfile.l2-node-test b/ops/docker-sequencer-test/Dockerfile.l2-node-test new file mode 100644 index 000000000..1ece1eb81 --- /dev/null +++ b/ops/docker-sequencer-test/Dockerfile.l2-node-test @@ -0,0 +1,47 @@ +# Build Stage +FROM ghcr.io/morph-l2/go-ubuntu-builder:go-1.24-ubuntu AS builder + +# First: Copy only go.mod/go.sum files to cache dependencies +# Order matters for cache efficiency + +# Copy go-ethereum dependency files +COPY ./go-ethereum/go.mod ./go-ethereum/go.sum /bitget/go-ethereum/ + +# Copy tendermint dependency files +COPY ./tendermint/go.mod ./tendermint/go.sum /bitget/tendermint/ + +# Copy morph go.work and all module dependency files +COPY ./morph/go.work ./morph/go.work.sum /bitget/morph/ +COPY ./morph/node/go.mod ./morph/node/go.sum /bitget/morph/node/ +COPY ./morph/bindings/go.mod ./morph/bindings/go.sum /bitget/morph/bindings/ +COPY ./morph/contracts/go.mod ./morph/contracts/go.sum /bitget/morph/contracts/ +COPY ./morph/oracle/go.mod ./morph/oracle/go.sum /bitget/morph/oracle/ +COPY ./morph/tx-submitter/go.mod ./morph/tx-submitter/go.sum /bitget/morph/tx-submitter/ +COPY ./morph/ops/l2-genesis/go.mod ./morph/ops/l2-genesis/go.sum /bitget/morph/ops/l2-genesis/ +COPY ./morph/ops/tools/go.mod ./morph/ops/tools/go.sum /bitget/morph/ops/tools/ +COPY ./morph/token-price-oracle/go.mod ./morph/token-price-oracle/go.sum /bitget/morph/token-price-oracle/ + +# Download dependencies (this layer is cached if go.mod/go.sum don't change) +WORKDIR /bitget/morph/node +RUN go mod download -x + +# Now copy all source code +COPY ./go-ethereum /bitget/go-ethereum +COPY ./tendermint /bitget/tendermint +COPY ./morph /bitget/morph + +# Build (no need to download again, just compile) +WORKDIR /bitget/morph/node +RUN make build + +# Final Stage +FROM ghcr.io/morph-l2/go-ubuntu-builder:go-1.24-ubuntu + +RUN apt-get -qq update \ + && apt-get -qq install -y --no-install-recommends ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /bitget/morph/node/build/bin/tendermint /usr/local/bin/ +COPY --from=builder /bitget/morph/node/build/bin/morphnode /usr/local/bin/ + +CMD ["morphnode", "--home", "/data"] diff --git a/ops/docker-sequencer-test/README.md b/ops/docker-sequencer-test/README.md new file mode 100644 index 000000000..156f2d64a --- /dev/null +++ b/ops/docker-sequencer-test/README.md @@ -0,0 +1 @@ +This directory is intended for Docker testing purposes only and may be removed in the future. \ No newline at end of file diff --git a/ops/docker-sequencer-test/docker-compose.override.yml b/ops/docker-sequencer-test/docker-compose.override.yml new file mode 100644 index 000000000..9cc69cae8 --- /dev/null +++ b/ops/docker-sequencer-test/docker-compose.override.yml @@ -0,0 +1,65 @@ +# Override file to use test images +# Copy this to ops/docker/docker-compose.override.yml before running test +version: '3.8' + +services: + morph-geth-0: + image: morph-geth-test:latest + build: + context: ../.. + dockerfile: ops/docker-sequencer-test/Dockerfile.l2-geth-test + + morph-geth-1: + image: morph-geth-test:latest + + morph-geth-2: + image: morph-geth-test:latest + + morph-geth-3: + image: morph-geth-test:latest + + node-0: + image: morph-node-test:latest + build: + context: ../.. + dockerfile: ops/docker-sequencer-test/Dockerfile.l2-node-test + environment: + - MORPH_NODE_SEQUENCER_PRIVATE_KEY=0xd99870855d97327d20c666abc78588f1449b1fac76ed0c86c1afb9ce2db85f32 + - MORPH_NODE_L1_SEQUENCER_CONTRACT=${L1_SEQUENCER_CONTRACT} + - MORPH_NODE_ROLLUP_ADDRESS=${MORPH_ROLLUP:-0x6900000000000000000000000000000000000010} + - MORPH_NODE_CONSENSUS_SWITCH_HEIGHT=${CONSENSUS_SWITCH_HEIGHT:-10} + + + node-1: + image: morph-node-test:latest + environment: + - MORPH_NODE_SEQUENCER_PRIVATE_KEY=0x0890c388c3bf5e04fee1d8f3c117e5f44f435ced7baf7bfd66c10e1f3a3f4b10 + - MORPH_NODE_L1_SEQUENCER_CONTRACT=${L1_SEQUENCER_CONTRACT} + - MORPH_NODE_ROLLUP_ADDRESS=${MORPH_ROLLUP:-0x6900000000000000000000000000000000000010} + - MORPH_NODE_CONSENSUS_SWITCH_HEIGHT=${CONSENSUS_SWITCH_HEIGHT:-10} + + + node-2: + image: morph-node-test:latest + environment: + - MORPH_NODE_L1_SEQUENCER_CONTRACT=${L1_SEQUENCER_CONTRACT} + - MORPH_NODE_ROLLUP_ADDRESS=${MORPH_ROLLUP:-0x6900000000000000000000000000000000000010} + - MORPH_NODE_CONSENSUS_SWITCH_HEIGHT=${CONSENSUS_SWITCH_HEIGHT:-10} + + + node-3: + image: morph-node-test:latest + environment: + - MORPH_NODE_L1_SEQUENCER_CONTRACT=${L1_SEQUENCER_CONTRACT} + - MORPH_NODE_ROLLUP_ADDRESS=${MORPH_ROLLUP:-0x6900000000000000000000000000000000000010} + - MORPH_NODE_CONSENSUS_SWITCH_HEIGHT=${CONSENSUS_SWITCH_HEIGHT:-10} + + + sentry-geth-0: + image: morph-geth-test:latest + + sentry-node-0: + image: morph-node-test:latest + environment: + - MORPH_NODE_CONSENSUS_SWITCH_HEIGHT=${CONSENSUS_SWITCH_HEIGHT:-10} + diff --git a/ops/docker-sequencer-test/entrypoint-l2.sh b/ops/docker-sequencer-test/entrypoint-l2.sh new file mode 100644 index 000000000..04dfed476 --- /dev/null +++ b/ops/docker-sequencer-test/entrypoint-l2.sh @@ -0,0 +1,42 @@ +#!/bin/bash +set -e + +GETH_DATA_DIR=${GETH_DATA_DIR:-/db} +GENESIS_FILE_PATH=${GENESIS_FILE_PATH:-/genesis.json} +JWT_SECRET_PATH=${JWT_SECRET_PATH:-/jwt-secret.txt} + +# Initialize geth if not already done +if [ ! -d "$GETH_DATA_DIR/geth/chaindata" ]; then + echo "Initializing geth with genesis file..." + geth init --datadir "$GETH_DATA_DIR" "$GENESIS_FILE_PATH" +fi + +echo "Starting geth..." +exec geth \ + --datadir "$GETH_DATA_DIR" \ + --http \ + --http.addr "0.0.0.0" \ + --http.port 8545 \ + --http.api "eth,net,web3,debug,txpool,engine" \ + --http.corsdomain "*" \ + --http.vhosts "*" \ + --ws \ + --ws.addr "0.0.0.0" \ + --ws.port 8546 \ + --ws.api "eth,net,web3,debug,txpool,engine" \ + --ws.origins "*" \ + --authrpc.addr "0.0.0.0" \ + --authrpc.port 8551 \ + --authrpc.vhosts "*" \ + --authrpc.jwtsecret "$JWT_SECRET_PATH" \ + --networkid 53077 \ + --nodiscover \ + --syncmode full \ + --gcmode archive \ + --metrics \ + --metrics.addr "0.0.0.0" \ + --pprof \ + --pprof.addr "0.0.0.0" \ + --verbosity 3 \ + "$@" + diff --git a/ops/docker-sequencer-test/run-test.sh b/ops/docker-sequencer-test/run-test.sh new file mode 100755 index 000000000..81361fefa --- /dev/null +++ b/ops/docker-sequencer-test/run-test.sh @@ -0,0 +1,549 @@ +#!/bin/bash +# Sequencer Upgrade Test Runner +# Reuses devnet-morph logic but with test-specific docker images + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +MORPH_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +BITGET_ROOT="$(cd "$MORPH_ROOT/.." && pwd)" +OPS_DIR="$MORPH_ROOT/ops" +DOCKER_DIR="$OPS_DIR/docker" +DEVNET_DIR="$OPS_DIR/devnet-morph" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log_info() { echo -e "${BLUE}[INFO]${NC} $1"; } +log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; } +log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } +log_error() { echo -e "${RED}[ERROR]${NC} $1"; } + +# Configuration +UPGRADE_HEIGHT=${UPGRADE_HEIGHT:-10} +L2_RPC="http://127.0.0.1:8545" +L2_RPC_NODE1="http://127.0.0.1:8645" + +# ========== Helper Functions ========== + +wait_for_rpc() { + local rpc_url="$1" + local max_retries=${2:-60} + local retry=0 + + log_info "Waiting for RPC at $rpc_url..." + while [ $retry -lt $max_retries ]; do + if curl -s -X POST -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ + "$rpc_url" 2>/dev/null | grep -q "result"; then + log_success "RPC is ready!" + return 0 + fi + retry=$((retry + 1)) + sleep 2 + done + log_error "Timeout waiting for RPC" + return 1 +} + +get_block_number() { + local rpc_url="${1:-$L2_RPC}" + local result + result=$(curl -s -X POST -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ + "$rpc_url" 2>/dev/null) + echo "$result" | grep -o '"result":"[^"]*"' | cut -d'"' -f4 | xargs printf "%d" 2>/dev/null || echo "0" +} + +wait_for_block() { + local target_height=$1 + local rpc_url="${2:-$L2_RPC}" + + log_info "Waiting for block $target_height..." + while true; do + local current=$(get_block_number "$rpc_url") + if [ "$current" -ge "$target_height" ]; then + log_success "Reached block $current" + return 0 + fi + echo -ne "\r Current block: $current / $target_height" + sleep 2 + done +} + +# ========== Setup Functions ========== + +# Export consensus switch height as environment variable for Docker containers +# The morphnode binary reads MORPH_NODE_CONSENSUS_SWITCH_HEIGHT at runtime +set_upgrade_height() { + local height=$1 + log_info "Setting consensus switch height to $height (via CONSENSUS_SWITCH_HEIGHT env)..." + export CONSENSUS_SWITCH_HEIGHT="$height" + log_success "CONSENSUS_SWITCH_HEIGHT=$height (will be passed to containers)" +} + +# Build test images (with -test suffix) +# Uses bitget/ as build context to access local go-ethereum and tendermint +build_test_images() { + log_info "Building test Docker images..." + log_info "Using build context: $BITGET_ROOT" + + # Build go-ubuntu-builder if needed + cd "$MORPH_ROOT" + make go-ubuntu-builder + + # Build from bitget/ directory to access all repos + cd "$BITGET_ROOT" + + # # Copy go module cache to avoid network downloads + # if [ -d "$HOME/go/pkg/mod" ]; then + # log_info "Copying go module cache to build context..." + # rm -rf .gomodcache + # cp -r "$HOME/go/pkg/mod" .gomodcache + # else + # log_warn "Go module cache not found at $HOME/go/pkg/mod" + # log_warn "Build may fail due to network issues" + # fi + + # Build test geth image + log_info "Building morph-geth-test (using local go-ethereum)..." + docker build -t morph-geth-test:latest \ + -f morph/ops/docker-sequencer-test/Dockerfile.l2-geth-test . + + # Build test node image + log_info "Building morph-node-test (using local go-ethereum + tendermint)..." + docker build -t morph-node-test:latest \ + -f morph/ops/docker-sequencer-test/Dockerfile.l2-node-test . + + # # Cleanup go module cache copy + # rm -rf .gomodcache + + log_success "Test images built!" +} + +# Run full devnet setup (reusing existing logic, but skip L2 startup) +setup_devnet() { + log_info "Running devnet setup..." + cd "$MORPH_ROOT" + + # Note: upgrade height should already be set before build_test_images + + # Step 1: Start L1 and setup tendermint nodes + # Note: main.py calls setup_devnet_nodes() before devnet.main() + log_info "Step 1: Starting L1 and setting up tendermint nodes..." + python3 "$DEVNET_DIR/main.py" --polyrepo-dir="$MORPH_ROOT" --only-l1 + + # Step 2: Deploy contracts and generate L2 genesis (without starting L2) + log_info "Step 2: Deploying contracts and generating L2 genesis..." + python3 -c " +import sys +import os +import time +import re +import fileinput +sys.path.insert(0, '$DEVNET_DIR') +import devnet +from devnet import run_command, read_json, write_json, test_port, log + +pjoin = os.path.join +polyrepo_dir = '$MORPH_ROOT' +L2_dir = pjoin(polyrepo_dir, 'ops', 'l2-genesis') +devnet_dir = pjoin(polyrepo_dir, 'ops', 'l2-genesis', '.devnet') +ops_dir = pjoin(polyrepo_dir, 'ops', 'docker') +contracts_dir = pjoin(polyrepo_dir, 'contracts') + +os.makedirs(devnet_dir, exist_ok=True) + +# Generate network config +devnet_cfg_orig = pjoin(L2_dir, 'deploy-config', 'devnet-deploy-config.json') +deploy_config = read_json(devnet_cfg_orig) +deploy_config['l1GenesisBlockTimestamp'] = '0x{:x}'.format(int(time.time())) +deploy_config['l1StartingBlockTag'] = 'earliest' +temp_deploy_config = pjoin(devnet_dir, 'deploy-config.json') +write_json(temp_deploy_config, deploy_config) + +# Deploy L1 contracts +deployment_dir = pjoin(devnet_dir, 'devnetL1.json') +run_command(['rm', '-f', deployment_dir], env={}, cwd=contracts_dir) +log.info('Deploying L1 Proxy contracts...') +run_command(['yarn', 'build'], env={}, cwd=contracts_dir) +run_command(['npx', 'hardhat', 'deploy', '--network', 'l1', '--storagepath', deployment_dir, '--concurrent', 'true'], env={}, cwd=contracts_dir) + +# Generate L2 genesis +log.info('Generating L2 genesis and rollup configs...') +run_command([ + 'env', 'CGO_ENABLED=1', 'CGO_LDFLAGS=\"-ldl\"', + 'go', 'run', 'cmd/main.go', 'genesis', 'l2', + '--l1-rpc', 'http://localhost:9545', + '--deploy-config', temp_deploy_config, + '--deployment-dir', deployment_dir, + '--outfile.l2', pjoin(devnet_dir, 'genesis-l2.json'), + '--outfile.genbatchheader', pjoin(devnet_dir, 'genesis-batch-header.json'), + '--outfile.rollup', pjoin(devnet_dir, 'rollup.json') +], cwd=L2_dir) + +# Initialize contracts +log.info('Deploying L1 Impl contracts and initialize...') +rollup_cfg = read_json(pjoin(devnet_dir, 'rollup.json')) +genesis_batch_header = rollup_cfg['genesis_batch_header'] +contracts_config = pjoin(contracts_dir, 'src', 'deploy-config', 'l1.ts') +pattern3 = re.compile(\"batchHeader: '.*'\") +for line in fileinput.input(contracts_config, inplace=True): + modified_line = re.sub(pattern3, f\"batchHeader: '{genesis_batch_header}'\", line) + print(modified_line, end='') +run_command(['npx', 'hardhat', 'initialize', '--network', 'l1', '--storagepath', deployment_dir, '--concurrent', 'true'], env={}, cwd=contracts_dir) + +# Staking +log.info('Staking sequencers...') +addresses = {} +deployment = read_json(deployment_dir) +for d in deployment: + addresses[d['name']] = d['address'] +for i in range(4): + run_command(['cast', 'send', addresses['Proxy__L1Staking'], + 'register(bytes32,bytes memory)', + deploy_config['l2StakingTmKeys'][i], + deploy_config['l2StakingBlsKeys'][i], + '--rpc-url', 'http://127.0.0.1:9545', + '--value', '1ether', + '--private-key', deploy_config['l2StakingPks'][i] + ]) + +# Update .env file +log.info('Updating .env file...') +env_file = pjoin(ops_dir, '.env') +env_data = {} +with open(env_file, 'r+') as envfile: + env_content = envfile.readlines() + for line in env_content: + line = line.strip() + if line and not line.startswith('#'): + parts = line.split('=', 1) + if len(parts) == 2: + env_data[parts[0].strip()] = parts[1].strip() + env_data['L1_CROSS_DOMAIN_MESSENGER'] = addresses['Proxy__L1CrossDomainMessenger'] + env_data['MORPH_PORTAL'] = addresses['Proxy__L1MessageQueueWithGasPriceOracle'] + env_data['MORPH_ROLLUP'] = addresses['Proxy__Rollup'] + env_data['MORPH_L1STAKING'] = addresses['Proxy__L1Staking'] + env_data['L1_SEQUENCER_CONTRACT'] = addresses.get('Proxy__L1Sequencer', '') + envfile.seek(0) + for key, value in env_data.items(): + envfile.write(f'{key}={value}\n') + envfile.truncate() + +log.info('Contract deployment and genesis generation complete!') +log.info('Skipping L2 startup - will be done with test images.') +" + + log_success "Devnet setup complete (L2 not started yet)" +} + +# Docker compose command with override file +# Note: -f must explicitly include override file when using non-default compose file name +COMPOSE_CMD="docker compose -f docker-compose-4nodes.yml -f docker-compose.override.yml" +COMPOSE_CMD_NO_OVERRIDE="docker compose -f docker-compose-4nodes.yml" + +# Copy override file to use test images +setup_override() { + log_info "Setting up docker-compose override for test images..." + cp "$SCRIPT_DIR/docker-compose.override.yml" "$DOCKER_DIR/docker-compose.override.yml" + log_success "Override file copied to $DOCKER_DIR/" +} + +# Remove override file +remove_override() { + rm -f "$DOCKER_DIR/docker-compose.override.yml" +} + +# Start L2 with test images +start_l2_test() { + log_info "Starting L2 with test images..." + cd "$DOCKER_DIR" + + # Setup override file + setup_override + + # Read the .env file to get contract addresses + source .env 2>/dev/null || true + + # Set sequencer private key + export SEQUENCER_PRIVATE_KEY="0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + + # Stop any existing L2 containers + $COMPOSE_CMD stop \ + morph-geth-0 morph-geth-1 morph-geth-2 morph-geth-3 \ + node-0 node-1 node-2 node-3 2>/dev/null || true + + # Note: Test images should already be built by build_test_images() + # Uncomment below if you need to rebuild during start + # log_info "Building L2 containers with test images..." + # $COMPOSE_CMD build morph-geth-0 node-0 + + # Start L2 geth nodes + log_info "Starting L2 geth nodes..." + $COMPOSE_CMD up -d morph-geth-0 morph-geth-1 morph-geth-2 morph-geth-3 + + sleep 5 + + # Start L2 tendermint nodes + log_info "Starting L2 tendermint nodes..." + $COMPOSE_CMD up -d node-0 node-1 node-2 node-3 + + wait_for_rpc "$L2_RPC" + log_success "L2 is running with test images!" +} + +# ========== Test Functions ========== + +test_pbft_mode() { + log_info "========== Phase 1: Testing PBFT Mode ==========" + + local start_block=$(get_block_number) + log_info "Starting block: $start_block" + + # Wait for some blocks + local target=$((start_block + 10)) + wait_for_block $target + + # Verify nodes in sync + local block0=$(get_block_number "$L2_RPC") + local block1=$(get_block_number "$L2_RPC_NODE1") + + local diff=$((block0 - block1)) + if [ ${diff#-} -le 2 ]; then + log_success "Nodes in sync (node0: $block0, node1: $block1)" + else + log_error "Nodes out of sync!" + return 1 + fi +} + +test_upgrade() { + log_info "========== Phase 2: Waiting for Upgrade ==========" + log_info "Upgrade height: $UPGRADE_HEIGHT" + + wait_for_block $UPGRADE_HEIGHT + sleep 10 + + # Verify network continues + local post_upgrade=$(get_block_number) + wait_for_block $((post_upgrade + 5)) + + log_success "Upgrade completed! Network continues producing blocks." +} + +test_sequencer_mode() { + log_info "========== Phase 3: Testing Sequencer Mode ==========" + + local start_block=$(get_block_number) + wait_for_block $((start_block + 20)) + + local block0=$(get_block_number "$L2_RPC") + local block1=$(get_block_number "$L2_RPC_NODE1") + + local diff=$((block0 - block1)) + if [ ${diff#-} -le 2 ]; then + log_success "Nodes in sync after upgrade (node0: $block0, node1: $block1)" + else + log_error "Nodes out of sync after upgrade!" + return 1 + fi +} + +test_fullnode_sync() { + log_info "========== Phase 4: Testing Fullnode Sync ==========" + + local current_height=$(get_block_number) + log_info "Current height: $current_height" + + cd "$DOCKER_DIR" + + # Start sentry node (fullnode) + log_info "Starting fullnode (sentry-node-0)..." + $COMPOSE_CMD up -d sentry-geth-0 sentry-node-0 + + sleep 10 + wait_for_rpc "http://127.0.0.1:8945" + + # Wait for sync + local target_sync=$((current_height - 5)) + local max_wait=300 + local waited=0 + + while [ $waited -lt $max_wait ]; do + local fn_block=$(get_block_number "http://127.0.0.1:8945") + if [ "$fn_block" -ge "$target_sync" ]; then + log_success "Fullnode synced to block $fn_block" + return 0 + fi + echo -ne "\r Fullnode: $fn_block / $target_sync" + sleep 5 + waited=$((waited + 5)) + done + + log_error "Fullnode sync timeout" + return 1 +} + +# ========== Transaction Generator ========== + +start_tx_generator() { + log_info "Starting transaction generator..." + + # Simple tx generator using cast + ( + while true; do + RANDOM_ADDR="0x$(openssl rand -hex 20)" + cast send --private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 \ + --rpc-url "$L2_RPC" \ + --value 1wei \ + "$RANDOM_ADDR" 2>/dev/null || true + sleep ${TX_INTERVAL:-5} + done + ) & + TX_GEN_PID=$! + log_info "TX generator started (PID: $TX_GEN_PID)" +} + +stop_tx_generator() { + if [ -n "$TX_GEN_PID" ]; then + kill $TX_GEN_PID 2>/dev/null || true + log_info "TX generator stopped" + fi +} + +# ========== Cleanup ========== + +cleanup() { + log_info "Cleaning up..." + stop_tx_generator + cd "$DOCKER_DIR" + $COMPOSE_CMD_NO_OVERRIDE down -v 2>/dev/null || true + remove_override +} + +# ========== Main Commands ========== + +run_full_test() { + log_info "==========================================" + log_info " Sequencer Upgrade Test" + log_info " Upgrade Height: $UPGRADE_HEIGHT" + log_info "==========================================" + + trap cleanup EXIT + + # Set upgrade height BEFORE building (so it's compiled into the binary) + set_upgrade_height "$UPGRADE_HEIGHT" + + # Build test images (now with correct upgrade height) + build_test_images + + # Setup devnet (L1 + contracts + L2 genesis) + setup_devnet + + # Start L2 with test images + start_l2_test + + # Start tx generator + start_tx_generator + + # Run tests + test_pbft_mode + test_upgrade + test_sequencer_mode + test_fullnode_sync + + stop_tx_generator + + log_success "==========================================" + log_success " ALL TESTS PASSED!" + log_success "==========================================" +} + +show_status() { + echo "Node 1: Block $(get_block_number http://127.0.0.1:8645)" + echo "Node 2: Block $(get_block_number http://127.0.0.1:8745)" + echo "Node 3: Block $(get_block_number http://127.0.0.1:8845)" + echo "Node 0 (seq-0): Block $(get_block_number http://127.0.0.1:8545)" + echo "Sentry: Block $(get_block_number http://127.0.0.1:8945 2>/dev/null || echo 'N/A')" +} + +show_logs() { + cd "$DOCKER_DIR" + $COMPOSE_CMD_NO_OVERRIDE logs -f "$@" +} + +# ========== Command Parsing ========== + +case "${1:-}" in + build) + build_test_images + ;; + setup) + setup_devnet + ;; + start) + start_l2_test + ;; + stop) + cd "$DOCKER_DIR" + $COMPOSE_CMD_NO_OVERRIDE down + ;; + clean) + cleanup + # Also clean L2 genesis + rm -rf "$OPS_DIR/l2-genesis/.devnet" + rm -rf "$DOCKER_DIR/.devnet" + ;; + logs) + shift + show_logs "$@" + ;; + test) + run_full_test + ;; + tx) + start_tx_generator + wait + ;; + status) + show_status + ;; + upgrade-height) + set_upgrade_height "${2:-50}" + ;; + *) + echo "Sequencer Upgrade Test Runner" + echo "" + echo "Usage: $0 {build|setup|start|stop|clean|logs|test|tx|status|upgrade-height}" + echo "" + echo "Commands:" + echo " build - Build test Docker images (morph-geth-test, morph-node-test)" + echo " setup - Run full devnet setup (L1 + contracts + L2 genesis)" + echo " start - Start L2 nodes with test images" + echo " stop - Stop all containers" + echo " clean - Stop and remove all containers and data" + echo " logs [service] - Show container logs" + echo " test - Run full upgrade test" + echo " tx - Start transaction generator" + echo " status - Show current block numbers" + echo " upgrade-height N - Set upgrade height to N" + echo "" + echo "Environment Variables:" + echo " UPGRADE_HEIGHT - Block height for consensus switch (default: 10)" + echo " TX_INTERVAL - Seconds between txs (default: 5)" + echo "" + echo "Test Flow:" + echo " 1. build - Build test images" + echo " 2. setup - Deploy L1, contracts, generate L2 genesis" + echo " 3. start - Start L2 with test images" + echo " 4. test - Run PBFT -> Upgrade -> Sequencer -> Fullnode tests" + echo "" + echo "Quick Start:" + echo " UPGRADE_HEIGHT=10 $0 test" + ;; +esac diff --git a/ops/docker-sequencer-test/scripts/tx-generator.sh b/ops/docker-sequencer-test/scripts/tx-generator.sh new file mode 100644 index 000000000..2311a64d5 --- /dev/null +++ b/ops/docker-sequencer-test/scripts/tx-generator.sh @@ -0,0 +1,74 @@ +#!/bin/sh +# Transaction Generator for Sequencer Test +# Sends random transactions to keep the network active + +set -e + +L2_RPC="${L2_RPC:-http://morph-geth-0:8545}" +INTERVAL="${TX_INTERVAL:-5}" # seconds between txs +PRIVATE_KEY="${PRIVATE_KEY:-0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80}" + +# Wait for L2 to be ready +echo "Waiting for L2 RPC to be ready..." +while true; do + if curl -s -X POST -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ + "$L2_RPC" | grep -q "result"; then + echo "L2 RPC is ready!" + break + fi + echo "Waiting..." + sleep 2 +done + +# Get initial nonce +get_nonce() { + curl -s -X POST -H "Content-Type: application/json" \ + --data "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getTransactionCount\",\"params\":[\"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266\",\"latest\"],\"id\":1}" \ + "$L2_RPC" | grep -o '"result":"[^"]*"' | cut -d'"' -f4 +} + +# Get current block number +get_block_number() { + curl -s -X POST -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ + "$L2_RPC" | grep -o '"result":"[^"]*"' | cut -d'"' -f4 +} + +echo "Starting transaction generator..." +echo "L2 RPC: $L2_RPC" +echo "Interval: ${INTERVAL}s" + +NONCE_HEX=$(get_nonce) +NONCE=$((NONCE_HEX)) +TX_COUNT=0 + +while true; do + BLOCK=$(get_block_number) + BLOCK_DEC=$((BLOCK)) + + # Generate random recipient address + RANDOM_SUFFIX=$(od -An -N4 -tx1 /dev/urandom | tr -d ' ') + TO_ADDR="0x000000000000000000000000000000${RANDOM_SUFFIX}" + + # Create and send transaction + NONCE_HEX=$(printf "0x%x" $NONCE) + TX_DATA="{\"jsonrpc\":\"2.0\",\"method\":\"eth_sendTransaction\",\"params\":[{\"from\":\"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266\",\"to\":\"${TO_ADDR}\",\"value\":\"0x1\",\"nonce\":\"${NONCE_HEX}\"}],\"id\":1}" + + RESULT=$(curl -s -X POST -H "Content-Type: application/json" --data "$TX_DATA" "$L2_RPC") + + if echo "$RESULT" | grep -q "result"; then + TX_HASH=$(echo "$RESULT" | grep -o '"result":"[^"]*"' | cut -d'"' -f4) + echo "[Block $BLOCK_DEC] TX #$TX_COUNT sent: $TX_HASH" + NONCE=$((NONCE + 1)) + TX_COUNT=$((TX_COUNT + 1)) + else + echo "[Block $BLOCK_DEC] TX failed: $RESULT" + # Refresh nonce in case of error + NONCE_HEX=$(get_nonce) + NONCE=$((NONCE_HEX)) + fi + + sleep "$INTERVAL" +done + diff --git a/ops/l2-genesis/go.mod b/ops/l2-genesis/go.mod index e464485c4..ac98fcf32 100644 --- a/ops/l2-genesis/go.mod +++ b/ops/l2-genesis/go.mod @@ -2,11 +2,11 @@ module morph-l2/morph-deployer go 1.24.0 -replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.3 +replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.4-0.20260313040448-999449fd4d23 require ( github.com/holiman/uint256 v1.2.4 - github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 + github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d github.com/stretchr/testify v1.10.0 github.com/urfave/cli v1.22.17 ) diff --git a/ops/l2-genesis/go.sum b/ops/l2-genesis/go.sum index 6907ac512..8e8733c37 100644 --- a/ops/l2-genesis/go.sum +++ b/ops/l2-genesis/go.sum @@ -141,8 +141,8 @@ github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqky github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 h1:A8eygErKU6WKMipGWIemzwLeYkIGLd9yb/Ry3x+J9PQ= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d h1:Qy3ytYw/PGnrPDAWen1MsMUhUXclk1F2Q36A07+bBv4= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= diff --git a/ops/tools/go.mod b/ops/tools/go.mod index 78ad604a6..48630f30f 100644 --- a/ops/tools/go.mod +++ b/ops/tools/go.mod @@ -2,10 +2,10 @@ module morph-l2/tools go 1.24.0 -replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.3 +replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.4-0.20260313040448-999449fd4d23 require ( - github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 + github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d github.com/tendermint/tendermint v0.35.9 ) diff --git a/ops/tools/go.sum b/ops/tools/go.sum index 6be6c6bf0..567e33c84 100644 --- a/ops/tools/go.sum +++ b/ops/tools/go.sum @@ -163,10 +163,10 @@ github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqky github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 h1:A8eygErKU6WKMipGWIemzwLeYkIGLd9yb/Ry3x+J9PQ= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= -github.com/morph-l2/tendermint v0.3.3 h1:zsmzVJfKp+NuCr45ZUUY2ZJjnHAVLzwJLID6GxBR4i4= -github.com/morph-l2/tendermint v0.3.3/go.mod h1:TtCzp9l6Z6yDUiwv3TbqKqw8Q8RKp3fSz5+adO1/Y8w= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d h1:Qy3ytYw/PGnrPDAWen1MsMUhUXclk1F2Q36A07+bBv4= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= +github.com/morph-l2/tendermint v0.3.4-0.20260313040448-999449fd4d23 h1:YrxdcmetkysTg+WiHroyDgVKfuXNonp4HkTBAVkOY6w= +github.com/morph-l2/tendermint v0.3.4-0.20260313040448-999449fd4d23/go.mod h1:TtCzp9l6Z6yDUiwv3TbqKqw8Q8RKp3fSz5+adO1/Y8w= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= diff --git a/oracle/go.mod b/oracle/go.mod index d82290072..d0aa65c11 100644 --- a/oracle/go.mod +++ b/oracle/go.mod @@ -2,12 +2,12 @@ module morph-l2/oracle go 1.24.0 -replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.3 +replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.4-0.20260313040448-999449fd4d23 require ( github.com/go-kit/kit v0.12.0 github.com/morph-l2/externalsign v0.3.1 - github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 + github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d github.com/prometheus/client_golang v1.17.0 github.com/stretchr/testify v1.10.0 github.com/tendermint/tendermint v0.35.9 diff --git a/oracle/go.sum b/oracle/go.sum index da0d78add..2f75fe3d9 100644 --- a/oracle/go.sum +++ b/oracle/go.sum @@ -174,10 +174,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/morph-l2/externalsign v0.3.1 h1:UYFDZFB0L85A4rDvuwLNBiGEi0kSmg9AZ2v8Q5O4dQo= github.com/morph-l2/externalsign v0.3.1/go.mod h1:b6NJ4GUiiG/gcSJsp3p8ExsIs4ZdphlrVALASnVoGJE= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 h1:A8eygErKU6WKMipGWIemzwLeYkIGLd9yb/Ry3x+J9PQ= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= -github.com/morph-l2/tendermint v0.3.3 h1:zsmzVJfKp+NuCr45ZUUY2ZJjnHAVLzwJLID6GxBR4i4= -github.com/morph-l2/tendermint v0.3.3/go.mod h1:TtCzp9l6Z6yDUiwv3TbqKqw8Q8RKp3fSz5+adO1/Y8w= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d h1:Qy3ytYw/PGnrPDAWen1MsMUhUXclk1F2Q36A07+bBv4= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= +github.com/morph-l2/tendermint v0.3.4-0.20260313040448-999449fd4d23 h1:YrxdcmetkysTg+WiHroyDgVKfuXNonp4HkTBAVkOY6w= +github.com/morph-l2/tendermint v0.3.4-0.20260313040448-999449fd4d23/go.mod h1:TtCzp9l6Z6yDUiwv3TbqKqw8Q8RKp3fSz5+adO1/Y8w= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= diff --git a/token-price-oracle/go.mod b/token-price-oracle/go.mod index 02292b9e8..f8ccf0a86 100644 --- a/token-price-oracle/go.mod +++ b/token-price-oracle/go.mod @@ -9,7 +9,7 @@ replace ( require ( github.com/morph-l2/externalsign v0.3.1 - github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 + github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d github.com/prometheus/client_golang v1.17.0 github.com/sirupsen/logrus v1.9.3 github.com/urfave/cli v1.22.17 diff --git a/token-price-oracle/go.sum b/token-price-oracle/go.sum index a0b161877..a08366f8b 100644 --- a/token-price-oracle/go.sum +++ b/token-price-oracle/go.sum @@ -147,8 +147,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/morph-l2/externalsign v0.3.1 h1:UYFDZFB0L85A4rDvuwLNBiGEi0kSmg9AZ2v8Q5O4dQo= github.com/morph-l2/externalsign v0.3.1/go.mod h1:b6NJ4GUiiG/gcSJsp3p8ExsIs4ZdphlrVALASnVoGJE= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 h1:A8eygErKU6WKMipGWIemzwLeYkIGLd9yb/Ry3x+J9PQ= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d h1:Qy3ytYw/PGnrPDAWen1MsMUhUXclk1F2Q36A07+bBv4= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= diff --git a/tx-submitter/.gitignore b/tx-submitter/.gitignore index 835c4a0e4..fde59c30d 100644 --- a/tx-submitter/.gitignore +++ b/tx-submitter/.gitignore @@ -29,6 +29,7 @@ tx-submitter **/tx-submitter build/ *debug_bin* +submitter-leveldb # Config and Environment files .env* diff --git a/tx-submitter/Makefile b/tx-submitter/Makefile index ab7a3c1c7..85439967c 100644 --- a/tx-submitter/Makefile +++ b/tx-submitter/Makefile @@ -23,7 +23,7 @@ clean: rm tx-submitter test: - go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 ./... + go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $$(go list ./... | grep -v '/batch') lint: GOBIN=$(PWD)/build/bin go run ../build/lint.go diff --git a/tx-submitter/batch/batch_cache.go b/tx-submitter/batch/batch_cache.go new file mode 100644 index 000000000..cef6fa008 --- /dev/null +++ b/tx-submitter/batch/batch_cache.go @@ -0,0 +1,1227 @@ +package batch + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "math/big" + "sync" + + "morph-l2/tx-submitter/db" + "morph-l2/tx-submitter/iface" + "morph-l2/tx-submitter/types" + + "github.com/morph-l2/go-ethereum/accounts/abi/bind" + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/common/hexutil" + ethtypes "github.com/morph-l2/go-ethereum/core/types" + "github.com/morph-l2/go-ethereum/crypto" + "github.com/morph-l2/go-ethereum/eth" + "github.com/morph-l2/go-ethereum/log" +) + +// BatchCache is a structure for caching and building batch data +// Stores all batch information starting from 0, and has the functionality to pack batches +type BatchCache struct { + mu sync.RWMutex + ctx context.Context + initDone bool + + batchStorage *BatchStorage + + // key: batchIndex, value: RPCRollupBatch + sealedBatches map[uint64]*eth.RPCRollupBatch + sealedBatchHeaders map[uint64]*BatchHeaderBytes + + // Currently accumulating batch data (referencing node's BatchingCache) + // Parent batch information + parentBatchHeader *BatchHeaderBytes + prevStateRoot common.Hash + + // Accumulated batch data + batchData *BatchData + totalL1MessagePopped uint64 + postStateRoot common.Hash + withdrawRoot common.Hash + lastPackedBlockHeight uint64 + + // Currently processing block data (referencing node's BatchingCache) + // This data will not be appended to batch until block is confirmed + currentBlockContext []byte + currentTxsPayload []byte + currentL1TxsHashes []common.Hash + totalL1MessagePoppedAfterCurBlock uint64 + currentStateRoot common.Hash + currentWithdrawRoot common.Hash + currentBlockNumber uint64 + currentBlockHash common.Hash + + // Function to determine if batch is upgraded + isBatchUpgraded func(uint64) bool + + // Clients and contracts + l1Client iface.Client + l2Clients iface.L2Clients + rollupContract iface.IRollup + l2Caller *types.L2Caller + + // config + batchTimeOut uint64 + blockInterval uint64 +} + +// NewBatchCache creates and initializes a new BatchCache instance +func NewBatchCache( + isBatchUpgraded func(uint64) bool, + l1Client iface.Client, + l2Clients []iface.L2Client, + rollupContract iface.IRollup, + l2Caller *types.L2Caller, + ldb *db.Db, +) *BatchCache { + if isBatchUpgraded == nil { + // Default implementation: always returns true (use V1 version) + isBatchUpgraded = func(uint64) bool { return true } + } + ctx := context.Background() + ifL2Clients := iface.L2Clients{Clients: l2Clients} + _, err := ifL2Clients.BlockNumber(ctx) + if err != nil { + log.Error("Error getting block number", "err", err) + } + return &BatchCache{ + ctx: ctx, + initDone: false, + sealedBatches: make(map[uint64]*eth.RPCRollupBatch), + sealedBatchHeaders: make(map[uint64]*BatchHeaderBytes), + parentBatchHeader: nil, + prevStateRoot: common.Hash{}, + batchData: NewBatchData(), + totalL1MessagePopped: 0, + postStateRoot: common.Hash{}, + withdrawRoot: common.Hash{}, + lastPackedBlockHeight: 0, + currentBlockContext: nil, + currentTxsPayload: nil, + currentL1TxsHashes: nil, + totalL1MessagePoppedAfterCurBlock: 0, + currentStateRoot: common.Hash{}, + currentWithdrawRoot: common.Hash{}, + currentBlockNumber: 0, + currentBlockHash: common.Hash{}, + isBatchUpgraded: isBatchUpgraded, + l1Client: l1Client, + l2Clients: iface.L2Clients{Clients: l2Clients}, + rollupContract: rollupContract, + l2Caller: l2Caller, + batchStorage: NewBatchStorage(ldb), + } +} + +func (bc *BatchCache) Init() error { + err := bc.updateBatchConfigFromGov() + if err != nil { + return err + } + ci, fi, err := bc.getBatchStatusFromContract() + if err != nil { + return fmt.Errorf("get batch status from rollup failed err: %w", err) + } + headerBytes, err := bc.getLastFinalizeBatchHeaderFromRollupByIndex(fi.Uint64()) + if err != nil { + return fmt.Errorf("get last finalize batch header err: %w", err) + } + + // Initialize BatchCache parent batch information + // prevStateRoot should be the parent batch's postStateRoot + bc.parentBatchHeader = headerBytes + bc.prevStateRoot, err = headerBytes.PostStateRoot() + if err != nil { + return fmt.Errorf("get post state root err: %w", err) + } + bc.lastPackedBlockHeight, err = headerBytes.LastBlockNumber() + if err != nil { + store, err := bc.rollupContract.BatchDataStore(nil, fi) + if err != nil { + return err + } + bc.lastPackedBlockHeight = store.BlockNumber.Uint64() + } + bc.currentBlockNumber = bc.lastPackedBlockHeight + bc.totalL1MessagePopped, err = headerBytes.TotalL1MessagePopped() + if err != nil { + return fmt.Errorf("get total l1 message popped err: %w", err) + } + log.Info("Start assemble batch", "start batch", fi.Uint64(), "end batch", ci.Uint64()) + return nil +} + +func (bc *BatchCache) InitFromRollupByRange() error { + if bc.initDone { + return nil + } + err := bc.Init() + if err != nil { + return err + } + err = bc.assembleUnFinalizeBatchHeaderFromL2Blocks() + if err != nil { + return err + } + bc.initDone = true + log.Info("Initialized batch cache success") + return nil +} + +func (bc *BatchCache) InitAndSyncFromDatabase() error { + if bc.initDone { + return nil + } + err := bc.updateBatchConfigFromGov() + if err != nil { + return err + } + ci, fi, err := bc.getBatchStatusFromContract() + if err != nil { + return fmt.Errorf("get batch status from rollup failed err: %w", err) + } + + batches, headers, indices, err := bc.batchStorage.LoadAllSealedBatchesAndHeader() + if err != nil { + log.Error("Failed to load sealed batch headers from storage", "error", err) + return bc.DeleteBatchStorageAndInitFromRollup() + } + + if len(batches) == 0 { + return bc.InitAndSyncFromRollup() + } + maxIndex := indices[0] + for _, idx := range indices { + if idx > maxIndex { + maxIndex = idx + } + } + // check batch hash with the batch that already rollup by submitter + for i := fi.Uint64(); i <= ci.Uint64(); i++ { + batchHash, err := bc.rollupContract.CommittedBatches(nil, new(big.Int).SetUint64(i)) + if err != nil { + return err + } + batchStorage, exist := batches[i] + if !exist || !bytes.Equal(batchHash[:], batchStorage.Hash.Bytes()) { + // batch not contiguous or batch is invalid + return bc.DeleteBatchStorageAndInitFromRollup() + } + } + + latestHeaderBytes := headers[maxIndex] + prevStateRoot, err := latestHeaderBytes.PostStateRoot() + if err != nil { + log.Error("Get post state root failed", "err", err) + return bc.DeleteBatchStorageAndInitFromRollup() + } + totalL1MessagePopped, err := latestHeaderBytes.TotalL1MessagePopped() + if err != nil { + log.Error("Get total l1 message popped failed", "err", err) + return bc.DeleteBatchStorageAndInitFromRollup() + } + lastPackedBlockHeight, err := latestHeaderBytes.LastBlockNumber() + if err != nil { + // maybe the latest header is version 0 which do not have blockNum + latestBatchIndex, err := latestHeaderBytes.BatchIndex() + if err != nil { + return fmt.Errorf("get batch index from parent header failed err: %w", err) + } + // check batch index range + if latestBatchIndex < fi.Uint64() || latestBatchIndex > ci.Uint64() { + // missing batch data, sync from another side + log.Error("Batch index is out of range", + "latestBatchIndex", latestBatchIndex, + "commitIndex", ci.Uint64(), "finalizeIndex", fi.Uint64()) + return bc.DeleteBatchStorageAndInitFromRollup() + } + store, err := bc.rollupContract.BatchDataStore(nil, new(big.Int).SetUint64(latestBatchIndex)) + if err != nil { + log.Error("Failed to load latest batch index from rollup", + "error", err, + "batchIndex", latestBatchIndex) + return bc.DeleteBatchStorageAndInitFromRollup() + } + lastPackedBlockHeight = store.BlockNumber.Uint64() + } + bc.lastPackedBlockHeight = lastPackedBlockHeight + bc.sealedBatches = batches + bc.sealedBatchHeaders = headers + bc.parentBatchHeader = latestHeaderBytes + bc.currentBlockNumber = bc.lastPackedBlockHeight + bc.prevStateRoot = prevStateRoot + bc.totalL1MessagePopped = totalL1MessagePopped + + bc.initDone = true + log.Info("Sync sealed batch from database success", "count", len(batches)) + return nil +} + +func (bc *BatchCache) InitAndSyncFromRollup() error { + if bc.initDone { + return nil + } + err := bc.Init() + if err != nil { + return err + } + ci, fi, err := bc.getBatchStatusFromContract() + if err != nil { + return fmt.Errorf("get batch status from rollup failed err: %w", err) + } + log.Info("Start assemble batch", + "startBatch", fi.Uint64()+1, + "endBatch", ci.Uint64(), + "startNum", bc.lastPackedBlockHeight, + "prevStateRoot", bc.prevStateRoot.String(), + ) + for i := fi.Uint64() + 1; i <= ci.Uint64(); i++ { + batchIndex := new(big.Int).SetUint64(i) + startNum, endNum, err := bc.getBatchBlockRange(batchIndex) + if err != nil { + return fmt.Errorf("get batch block range err: %w,start %v, end %v", err, startNum, endNum) + } + log.Info("assemble batch block range", "startNum", startNum, "endNum", endNum) + batchHeaderBytes, err := bc.assembleBatchHeaderFromL2Blocks(startNum, endNum) + if err != nil { + return err + } + batchHash, err := batchHeaderBytes.Hash() + if err != nil { + return fmt.Errorf("get batch hash err: %w", err) + } + correct, err := bc.checkBatchHashCorrect(batchIndex, batchHash) + if err != nil { + return fmt.Errorf("check batch hash failed, err: %w, batchIndex %v, batchHash %v", err, batchIndex, batchHash.String()) + } + if !correct { + return fmt.Errorf("batch hash check failed: batch index %d is incorrect", i) + } + log.Info("Assemble batch success", "batch index", i, "last batch index", ci.Uint64()) + } + bc.initDone = true + log.Info("Initialized batch cache success") + return nil +} + +func (bc *BatchCache) LatestBatchIndex() (uint64, error) { + return bc.parentBatchHeader.BatchIndex() +} + +func (bc *BatchCache) updateBatchConfigFromGov() error { + interval, err := bc.l2Caller.BatchBlockInterval(nil) + if err != nil { + return err + } + timeout, err := bc.l2Caller.BatchTimeout(nil) + if err != nil { + return err + } + bc.batchTimeOut = timeout.Uint64() + bc.blockInterval = interval.Uint64() + log.Info("Update batch config success", "interval", interval.Uint64(), "timeout", timeout.Uint64()) + return nil +} + +func (bc *BatchCache) checkBatchHashCorrect(batchIndex *big.Int, batchHash common.Hash) (bool, error) { + commitBatchHash, err := bc.rollupContract.CommittedBatches(nil, batchIndex) + if err != nil { + return false, err + } + if !bytes.Equal(commitBatchHash[:], batchHash.Bytes()) { + log.Error("check commit batch hash failed", + "index", batchIndex.String(), + "committed", hex.EncodeToString(commitBatchHash[:]), + "generate", batchHash.String()) + return false, nil + } + return true, nil +} + +func (bc *BatchCache) getBatchStatusFromContract() (*big.Int, *big.Int, error) { + latestCommitBatchIndex, err := bc.rollupContract.LastCommittedBatchIndex(nil) + if err != nil { + return nil, nil, err + } + lastFinalizedBatchIndex, err := bc.rollupContract.LastFinalizedBatchIndex(nil) + if err != nil { + return nil, nil, err + } + return latestCommitBatchIndex, lastFinalizedBatchIndex, nil +} + +func (bc *BatchCache) getBatchBlockRange(batchIndex *big.Int) (uint64, uint64, error) { + preIndex := new(big.Int).Sub(batchIndex, big.NewInt(1)) + preBatchStorage, err := bc.rollupContract.BatchDataStore(nil, preIndex) + if err != nil { + return 0, 0, err + } + batchStorage, err := bc.rollupContract.BatchDataStore(nil, batchIndex) + if err != nil { + return 0, 0, err + } + return preBatchStorage.BlockNumber.Uint64() + 1, batchStorage.BlockNumber.Uint64(), nil +} + +func (bc *BatchCache) getUnFinalizeBlockRange() (uint64, uint64, *big.Int, error) { + ci, fi, err := bc.getBatchStatusFromContract() + if err != nil { + return 0, 0, nil, err + } + finalizeBatchStorage, err := bc.rollupContract.BatchDataStore(nil, fi) + if err != nil { + return 0, 0, nil, err + } + startNum := finalizeBatchStorage.BlockNumber.Uint64() + 1 + endNum, err := bc.l2Clients.BlockNumber(context.Background()) + if err != nil { + return 0, 0, nil, err + } + return startNum, endNum, ci, nil +} + +// IsEmpty checks if current batch data is empty +func (bc *BatchCache) IsEmpty() bool { + bc.mu.RLock() + defer bc.mu.RUnlock() + return bc.batchData == nil || bc.batchData.IsEmpty() +} + +// IsCurrentEmpty checks if current block data is empty +func (bc *BatchCache) IsCurrentEmpty() bool { + bc.mu.RLock() + defer bc.mu.RUnlock() + return len(bc.currentBlockContext) == 0 +} + +// ClearCurrent clears current block data +// Note: lock must be held before calling this method +func (bc *BatchCache) ClearCurrent() { + bc.currentTxsPayload = nil + bc.currentL1TxsHashes = nil + bc.currentBlockContext = nil + bc.totalL1MessagePoppedAfterCurBlock = 0 + bc.currentStateRoot = common.Hash{} + bc.currentWithdrawRoot = common.Hash{} +} + +// GetSealedBatch gets sealed batch information +func (bc *BatchCache) GetSealedBatch(batchIndex uint64) (*eth.RPCRollupBatch, bool) { + bc.mu.RLock() + defer bc.mu.RUnlock() + batch, ok := bc.sealedBatches[batchIndex] + return batch, ok +} + +// GetSealedBatchHeader gets sealed batch header information +func (bc *BatchCache) GetSealedBatchHeader(batchIndex uint64) (*BatchHeaderBytes, bool) { + bc.mu.RLock() + defer bc.mu.RUnlock() + header, ok := bc.sealedBatchHeaders[batchIndex] + if !ok { + loadedHeader, err := bc.batchStorage.LoadSealedBatchHeader(batchIndex) + if err != nil { + return nil, false + } + return loadedHeader, true + } + return header, ok +} + +// GetLatestSealedBatchIndex gets the latest sealed batch index +func (bc *BatchCache) GetLatestSealedBatchIndex() uint64 { + bc.mu.RLock() + defer bc.mu.RUnlock() + + var maxIndex uint64 = 0 + for index := range bc.sealedBatches { + if index > maxIndex { + maxIndex = index + } + } + return maxIndex +} + +// CalculateCapWithProposalBlock calculates batch capacity after including the specified block +func (bc *BatchCache) CalculateCapWithProposalBlock(blockNumber uint64, withdrawRoot common.Hash) (bool, error) { + if len(bc.l2Clients.Clients) == 0 { + return false, fmt.Errorf("l2 client is nil") + } + + // Fetch complete block from L2 client (including transactions) + block, err := bc.l2Clients.BlockByNumber(context.Background(), big.NewInt(int64(blockNumber))) + if err != nil { + return false, fmt.Errorf("failed to fetch block %d: %w", blockNumber, err) + } + + if block == nil { + return false, fmt.Errorf("block is nil for block %d", blockNumber) + } + + header := block.Header() + + // Verify block number matches + if header.Number.Uint64() != blockNumber { + return false, fmt.Errorf("block number mismatch: expected %d, got %d", blockNumber, header.Number.Uint64()) + } + + bc.mu.Lock() + defer bc.mu.Unlock() + // Verify block number continuity + if blockNumber <= bc.lastPackedBlockHeight { + if blockNumber != 0 || bc.lastPackedBlockHeight != 0 { + return false, fmt.Errorf("wrong block number: lastPackedBlockHeight=%d, proposed=%d", bc.lastPackedBlockHeight, blockNumber) + } + } + if blockNumber > bc.lastPackedBlockHeight+1 { + // Some blocks were skipped, need to clear cache + return false, fmt.Errorf("discontinuous block number: lastPackedBlockHeight=%d, proposed=%d", bc.lastPackedBlockHeight, blockNumber) + } + + // Ensure BatchData is initialized + if bc.batchData == nil { + bc.batchData = NewBatchData() + } + + // Parse transactions, distinguish L1 and L2 transactions + txsPayload, l1TxHashes, newTotalL1MessagePopped, l2TxNum, err := parsingTxs(block.Transactions(), bc.totalL1MessagePopped) + if err != nil { + return false, fmt.Errorf("failed to parse transactions: %w", err) + } + + l1TxNum := int(newTotalL1MessagePopped - bc.totalL1MessagePopped) + txsNum := l2TxNum + l1TxNum + + // Build BlockContext (60 bytes) + blockContext := buildBlockContext(header, txsNum, l1TxNum) + + // Store to current, do not immediately append to batch + bc.currentBlockContext = blockContext + bc.currentTxsPayload = txsPayload + bc.currentL1TxsHashes = l1TxHashes + bc.totalL1MessagePoppedAfterCurBlock = newTotalL1MessagePopped + bc.currentStateRoot = header.Root + bc.currentBlockNumber = blockNumber + bc.currentBlockHash = block.Hash() + bc.currentWithdrawRoot = withdrawRoot + + // Check capacity: if compressed size would exceed limit after adding current block + var exceeded bool + if bc.isBatchUpgraded(header.Time) { + exceeded, err = bc.batchData.WillExceedCompressedSizeLimit(blockContext, txsPayload) + } else { + exceeded, err = bc.batchData.EstimateCompressedSizeWithNewPayload(txsPayload) + } + if err != nil { + return false, fmt.Errorf("failed to estimate compressed size: %w", err) + } + + return exceeded, nil +} + +// PackCurrentBlock packs current block data into batch +// References node's PackCurrentBlock +// Parameters: +// - blockNumber: block number to pack (for verification) +// +// Returns: +// - error: returns error if packing fails +// +// Note: This method should be called after block is confirmed, appending data from currentBlockContext to batch +func (bc *BatchCache) PackCurrentBlock(blockNumber uint64) error { + bc.mu.Lock() + defer bc.mu.Unlock() + + // If the current block is empty, return directly + if len(bc.currentBlockContext) == 0 { + return nil // nothing to pack + } + + // Verify block number matches + if bc.currentBlockNumber != blockNumber { + return fmt.Errorf("block number mismatch: expected %d, got %d", blockNumber, bc.currentBlockNumber) + } + + // Ensure BatchData is initialized + if bc.batchData == nil { + bc.batchData = NewBatchData() + } + + // Append current block data to batch + bc.batchData.Append(bc.currentBlockContext, bc.currentTxsPayload, bc.currentL1TxsHashes) + + // Update accumulated state + bc.totalL1MessagePopped = bc.totalL1MessagePoppedAfterCurBlock + bc.withdrawRoot = bc.currentWithdrawRoot + bc.postStateRoot = bc.currentStateRoot + bc.lastPackedBlockHeight = blockNumber + + // Clear current block data + bc.ClearCurrent() + + return nil +} + +// FetchAndCacheHeader fetches complete block from L2 client for specified block number, parses transactions and stores to current +// Note: This method has been replaced by CalculateCapWithProposalBlock and PackCurrentBlock +// Kept for backward compatibility, but recommend using new methods +func (bc *BatchCache) FetchAndCacheHeader(blockNumber uint64, withdrawRoot common.Hash) (*ethtypes.Header, error) { + // Use new method + _, err := bc.CalculateCapWithProposalBlock(blockNumber, withdrawRoot) + if err != nil { + return nil, err + } + + // Pack immediately (backward compatible behavior) + if err := bc.PackCurrentBlock(blockNumber); err != nil { + return nil, err + } + + bc.mu.RLock() + defer bc.mu.RUnlock() + + // Return header (need to re-fetch because current has been cleared) + block, err := bc.l2Clients.BlockByNumber(context.Background(), big.NewInt(int64(blockNumber))) + if err != nil { + return nil, err + } + return block.Header(), nil +} + +// SealBatch seals the currently accumulated batch, generates batch header and stores to sealedBatches +// Parameters: +// - sequencerSetVerifyHash: sequencer set verification hash (obtained from L1 contract) +// - blockTimestamp: current block timestamp (used to determine batch version) +// +// Returns: +// - batchIndex: sealed batch index +// - batchHash: batch hash +// - reachedExpectedSize: whether the sealed data size reaches expected value (compressed payload size close to or reaches MaxBlobBytesSize) +// - error: returns error if sealing fails +// +// Note: Sealed batch will be stored in BatchCache's sealedBatches, not sent anywhere +func (bc *BatchCache) SealBatch(sequencerSets []byte, blockTimestamp uint64) (uint64, BatchHeaderBytes, bool, error) { + bc.mu.Lock() + defer bc.mu.Unlock() + + // Ensure batch data is not empty + if bc.batchData == nil || bc.batchData.IsEmpty() { + return 0, BatchHeaderBytes{}, false, errors.New("failed to seal batch: batch cache is empty") + } + + // Compress data and calculate dataHash + compressedPayload, batchDataHash, err := bc.handleBatchSealing(blockTimestamp) + if err != nil { + return 0, BatchHeaderBytes{}, false, fmt.Errorf("failed to handle batch sealing: %w", err) + } + + // Check if sealed data size reaches expected value + // Expected value: compressed payload size close to or reaches MaxBlobBytesSize + // Use 90% as threshold, i.e., if compressed size >= MaxBlobBytesSize * 0.9, consider it reached expected + threshold := float64(MaxBlobBytesSize) * 0.9 + expectedSizeThreshold := uint64(threshold) + reachedExpectedSize := uint64(len(compressedPayload)) >= expectedSizeThreshold + + // Generate blob sidecar + sidecar, err := MakeBlobTxSidecar(compressedPayload) + if err != nil { + return 0, BatchHeaderBytes{}, false, fmt.Errorf("failed to create blob sidecar: %w", err) + } + + // Create batch header + batchHeader := bc.createBatchHeader(batchDataHash, sidecar, crypto.Keccak256Hash(sequencerSets), blockTimestamp) + + // Calculate batch hash + batchHash, err := batchHeader.Hash() + if err != nil { + return 0, BatchHeaderBytes{}, false, fmt.Errorf("failed to hash batch header: %w", err) + } + + // Get batch index + batchIndex, err := batchHeader.BatchIndex() + if err != nil { + return 0, BatchHeaderBytes{}, false, fmt.Errorf("failed to get batch index: %w", err) + } + + // Build parent batch header bytes + var parentBatchHeaderBytes hexutil.Bytes + if bc.parentBatchHeader != nil { + parentBatchHeaderBytes = hexutil.Bytes(*bc.parentBatchHeader) + } + + // Get the version from batch header + version, err := batchHeader.Version() + if err != nil { + return 0, BatchHeaderBytes{}, false, fmt.Errorf("failed to get batch version: %w", err) + } + + // Build block contexts from batch data (encode block contexts) + blockContextsData, err := bc.batchData.Encode() + if err != nil { + return 0, BatchHeaderBytes{}, false, fmt.Errorf("failed to encode batch data: %w", err) + } + blockContexts := hexutil.Bytes(blockContextsData) + + // Convert sequencerSetVerifyHash to bytes + currentSequencerSetBytes := hexutil.Bytes(sequencerSets) + + // Get L1 message count from batch data + numL1Messages := bc.batchData.l1TxNum + + // Store sealed batch information as RPCRollupBatch + sealedBatch := ð.RPCRollupBatch{ + Version: uint(version), + Hash: batchHash, + ParentBatchHeader: parentBatchHeaderBytes, + BlockContexts: blockContexts, + CurrentSequencerSetBytes: currentSequencerSetBytes, + PrevStateRoot: bc.prevStateRoot, + PostStateRoot: bc.postStateRoot, + WithdrawRoot: bc.withdrawRoot, + LastBlockNumber: bc.lastPackedBlockHeight, + NumL1Messages: numL1Messages, + Sidecar: *sidecar, + Signatures: []eth.RPCBatchSignature{}, + CollectedL1Fee: nil, + } + bc.sealedBatches[batchIndex] = sealedBatch + // Store batch header copy + batchHeaderCopy := make(BatchHeaderBytes, len(batchHeader)) + copy(batchHeaderCopy, batchHeader) + bc.sealedBatchHeaders[batchIndex] = &batchHeaderCopy + + err = bc.batchStorage.StoreSealedBatch(batchIndex, sealedBatch) + if err != nil { + log.Error("failed to store sealed batch", "err", err) + } + err = bc.batchStorage.StoreSealedBatchHeader(batchIndex, &batchHeaderCopy) + if err != nil { + log.Error("failed to store sealed batch header", "err", err) + } + // Update parent batch information for next batch + bc.parentBatchHeader = &batchHeaderCopy + bc.prevStateRoot = bc.postStateRoot + + // Save block count before resetting batch data for logging + blockCount := bc.batchData.BlockNum() + bc.logSealedBatch(batchHeader, batchHash, blockCount) + + // Reset currently accumulated batch data + bc.batchData = NewBatchData() + + return batchIndex, batchHeader, reachedExpectedSize, nil +} + +// handleBatchSealing determines which version to use for compression and calculates data hash +func (bc *BatchCache) handleBatchSealing(blockTimestamp uint64) ([]byte, common.Hash, error) { + var ( + compressedPayload []byte + batchDataHash common.Hash + err error + ) + + // Check if upgraded version should be used + if bc.isBatchUpgraded(blockTimestamp) { + compressedPayload, err = CompressBatchBytes(bc.batchData.TxsPayloadV2()) + if err != nil { + return nil, common.Hash{}, fmt.Errorf("failed to compress upgraded payload: %w", err) + } + + if len(compressedPayload) <= MaxBlobBytesSize { + batchDataHash, err = bc.batchData.DataHashV2() + if err != nil { + return nil, common.Hash{}, fmt.Errorf("failed to calculate upgraded data hash: %w", err) + } + return compressedPayload, batchDataHash, nil + } + } + + // Fall back to the old version + compressedPayload, err = CompressBatchBytes(bc.batchData.TxsPayload()) + if err != nil { + return nil, common.Hash{}, fmt.Errorf("failed to compress payload: %w", err) + } + batchDataHash = bc.batchData.DataHash() + + return compressedPayload, batchDataHash, nil +} + +// createBatchHeader creates BatchHeader +func (bc *BatchCache) createBatchHeader(dataHash common.Hash, sidecar *ethtypes.BlobTxSidecar, sequencerSetVerifyHash common.Hash, blockTimestamp uint64) BatchHeaderBytes { + blobHashes := []common.Hash{EmptyVersionedHash} + if sidecar != nil && len(sidecar.Blobs) > 0 { + blobHashes = sidecar.BlobHashes() + } + + var parentBatchHeaderTotalL1 uint64 + var parentBatchIndex uint64 + var parentBatchHash common.Hash + + if bc.parentBatchHeader != nil { + parentBatchHeaderTotalL1, _ = bc.parentBatchHeader.TotalL1MessagePopped() + parentBatchIndex, _ = bc.parentBatchHeader.BatchIndex() + parentBatchHash, _ = bc.parentBatchHeader.Hash() + } + + l1MessagePopped := bc.totalL1MessagePopped - parentBatchHeaderTotalL1 + + batchHeaderV0 := BatchHeaderV0{ + BatchIndex: parentBatchIndex + 1, + L1MessagePopped: l1MessagePopped, + TotalL1MessagePopped: bc.totalL1MessagePopped, + DataHash: dataHash, + BlobVersionedHash: blobHashes[0], + PrevStateRoot: bc.prevStateRoot, + PostStateRoot: bc.postStateRoot, + WithdrawalRoot: bc.withdrawRoot, + SequencerSetVerifyHash: sequencerSetVerifyHash, + ParentBatchHash: parentBatchHash, + } + + if bc.isBatchUpgraded(blockTimestamp) { + batchHeaderV1 := BatchHeaderV1{ + BatchHeaderV0: batchHeaderV0, + LastBlockNumber: bc.lastPackedBlockHeight, + } + return batchHeaderV1.Bytes() + } + + return batchHeaderV0.Bytes() +} + +// parsingTxs parses transactions, distinguishes L1 and L2 transactions +func parsingTxs(transactions []*ethtypes.Transaction, totalL1MessagePoppedBefore uint64) ( + txsPayload []byte, + l1TxHashes []common.Hash, + totalL1MessagePopped uint64, + l2TxNum int, + err error, +) { + nextIndex := totalL1MessagePoppedBefore + + for i, tx := range transactions { + if isL1MessageTxType(tx) { + l1TxHashes = append(l1TxHashes, tx.Hash()) + currentIndex := tx.L1MessageQueueIndex() + + if currentIndex != nextIndex { + return nil, nil, 0, 0, fmt.Errorf( + "unexpected batch payload, expected queue index: %d, got: %d. transaction hash: %v", + nextIndex, currentIndex, tx.Hash(), + ) + } + + nextIndex = currentIndex + 1 + continue + } + + l2TxNum++ + txBytes, err := tx.MarshalBinary() + if err != nil { + return nil, nil, 0, 0, fmt.Errorf("failed to marshal transaction %d: %w", i, err) + } + txsPayload = append(txsPayload, txBytes...) + } + + totalL1MessagePopped = nextIndex + return +} + +// isL1MessageTxType checks if transaction is L1 message transaction type +func isL1MessageTxType(tx *ethtypes.Transaction) bool { + return tx.Type() == ethtypes.L1MessageTxType +} + +// buildBlockContext builds BlockContext from block header (60 bytes) +// Format: Number(8) || Timestamp(8) || BaseFee(32) || GasLimit(8) || numTxs(2) || numL1Messages(2) +func buildBlockContext(header *ethtypes.Header, txsNum, l1MsgNum int) []byte { + blsBytes := make([]byte, 60) + + // Number (8 bytes) + binary.BigEndian.PutUint64(blsBytes[:8], header.Number.Uint64()) + + // Timestamp (8 bytes) + binary.BigEndian.PutUint64(blsBytes[8:16], header.Time) + + // BaseFee (32 bytes) + if header.BaseFee != nil { + copy(blsBytes[16:48], header.BaseFee.FillBytes(make([]byte, 32))) + } else { + copy(blsBytes[16:48], make([]byte, 32)) + } + + // GasLimit (8 bytes) + binary.BigEndian.PutUint64(blsBytes[48:56], header.GasLimit) + + // numTxs (2 bytes) + binary.BigEndian.PutUint16(blsBytes[56:58], uint16(txsNum)) + + // numL1Messages (2 bytes) + binary.BigEndian.PutUint16(blsBytes[58:60], uint16(l1MsgNum)) + + return blsBytes +} + +func (bc *BatchCache) assembleBatchHeaderFromL2Blocks( + startBlockNum, endBlockNum uint64, +) (*BatchHeaderBytes, error) { + ctx := context.Background() + callOpts := &bind.CallOpts{ + Context: ctx, + } + // Fetch blocks from L2 client in the specified range and accumulate to batch + for blockNum := startBlockNum; blockNum <= endBlockNum; blockNum++ { + callOpts.BlockNumber = new(big.Int).SetUint64(blockNum) + root, err := bc.l2Caller.GetTreeRoot(callOpts) + if err != nil { + return nil, fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) + } + + // Check capacity and store to current + _, err = bc.CalculateCapWithProposalBlock(blockNum, root) + if err != nil { + return nil, fmt.Errorf("failed to calculate cap with block %d: %w", blockNum, err) + } + + // Pack current block (confirm and append to batch) + if err = bc.PackCurrentBlock(blockNum); err != nil { + return nil, fmt.Errorf("failed to pack block %d: %w", blockNum, err) + } + } + + sequencerSet, _, err := bc.l2Caller.GetSequencerSetBytes(callOpts) + if err != nil { + return nil, fmt.Errorf("failed to get sequencer set verify hash at block %d: %w", callOpts.BlockNumber.Uint64(), err) + } + // Get the last block's timestamp for packing + lastBlock, err := bc.l2Clients.BlockByNumber(ctx, big.NewInt(int64(endBlockNum))) + if err != nil { + return nil, fmt.Errorf("failed to get last block %d: %w", endBlockNum, err) + } + blockTimestamp := lastBlock.Time() + + // Seal batch and generate batchHeader + batchIndex, batchHeader, reachedExpectedSize, err := bc.SealBatch(sequencerSet, blockTimestamp) + if err != nil { + return nil, fmt.Errorf("failed to seal batch: %w", err) + } + + batchHeaderHash, err := batchHeader.Hash() + if err != nil { + return nil, fmt.Errorf("failed to hash batch header: %w", err) + } + log.Info("seal batch success", "batchIndex", batchIndex, "batchHash", batchHeaderHash.String(), "reachedExpectedSize", reachedExpectedSize) + return &batchHeader, nil +} + +func (bc *BatchCache) assembleUnFinalizeBatchHeaderFromL2Blocks() error { + ctx := context.Background() + callOpts := &bind.CallOpts{ + Context: ctx, + } + startBlockNum, endBlockNum, ci, err := bc.getUnFinalizeBlockRange() + if err != nil { + return err + } + + // Get start block once to avoid repeated queries + startBlock, err := bc.l2Clients.BlockByNumber(ctx, big.NewInt(int64(startBlockNum))) + if err != nil { + return fmt.Errorf("failed to get start block %d: %w", startBlockNum, err) + } + startBlockTime := startBlock.Time() + + // Fetch blocks from L2 client in the specified range and accumulate to batch + for blockNum := startBlockNum; blockNum <= endBlockNum; blockNum++ { + callOpts.BlockNumber = new(big.Int).SetUint64(blockNum) + root, err := bc.l2Caller.GetTreeRoot(callOpts) + if err != nil { + return fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) + } + + // Check capacity and store to current + exceeded, err := bc.CalculateCapWithProposalBlock(blockNum, root) + if err != nil { + return fmt.Errorf("failed to calculate cap with block %d: %w", blockNum, err) + } + + // Get the current block to check timeout after packing + nowBlock, err := bc.l2Clients.BlockByNumber(ctx, big.NewInt(int64(blockNum))) + if err != nil { + return fmt.Errorf("failed to get block %d: %w", blockNum, err) + } + nowBlockTime := nowBlock.Time() + + // Check timeout: if elapsed time >= batchTimeOut, must seal batch immediately + // This ensures batch is sealed before exceeding the maximum timeout configured in gov contract + timeout := false + if bc.batchTimeOut > 0 { + elapsedTime := nowBlockTime - startBlockTime + if elapsedTime >= bc.batchTimeOut { + timeout = true + log.Info("Batch timeout reached, must seal batch", "startBlock", startBlockNum, "currentBlock", blockNum, + "elapsedTime", elapsedTime, "batchTimeOut", bc.batchTimeOut) + } + } + + // Check if we need to seal batch due to capacity, block interval, or timeout + // check ensures batch is sealed before exceeding the maximum timeout + if exceeded || (bc.blockInterval > 0 && (blockNum-startBlockNum+1) == bc.blockInterval) || timeout { + log.Info("block exceeds limit", "start", startBlockNum, "to", blockNum-1, "exceeded", exceeded, "timeout", timeout) + batchHash, reachedExpectedSize, batchIndex, err := bc.SealBatchAndCheck(callOpts, ci) + if err != nil { + return err + } + batch, ok := bc.GetSealedBatch(batchIndex) + if !ok { + return fmt.Errorf("batch %d not found in cache", batchIndex) + } + startBlockNum = batch.LastBlockNumber + 1 + startBlock, err = bc.l2Clients.BlockByNumber(ctx, big.NewInt(int64(startBlockNum))) + if err != nil { + return fmt.Errorf("failed to get start block %d: %w", startBlockNum, err) + } + startBlockTime = startBlock.Time() + index, err := bc.parentBatchHeader.BatchIndex() + if err != nil { + return err + } + log.Info("seal batch success", "batchIndex", index, "batchHash", batchHash.String(), "reachedExpectedSize", reachedExpectedSize) + } + + // Pack current block (confirm and append to batch) + if err = bc.PackCurrentBlock(blockNum); err != nil { + return fmt.Errorf("failed to pack block %d: %w", blockNum, err) + } + } + return nil +} + +func (bc *BatchCache) SealBatchAndCheck(callOpts *bind.CallOpts, ci *big.Int) (common.Hash, bool, uint64, error) { + sequencerSetBytes, _, err := bc.l2Caller.GetSequencerSetBytes(callOpts) + if err != nil { + return common.Hash{}, false, 0, err + } + lastBlock, err := bc.l2Clients.BlockByNumber(context.Background(), big.NewInt(int64(bc.lastPackedBlockHeight))) + if err != nil { + return common.Hash{}, false, 0, fmt.Errorf("failed to get last block %d: %w", bc.lastPackedBlockHeight, err) + } + blockTimestamp := lastBlock.Time() + // Seal batch and generate batchHeader + batchIndex, batchHeaderBytes, reachedExpectedSize, err := bc.SealBatch(sequencerSetBytes, blockTimestamp) + if err != nil { + return common.Hash{}, false, 0, fmt.Errorf("failed to seal batch: %w", err) + } + sealedBatch, found := bc.GetSealedBatch(batchIndex) + if !found { + return common.Hash{}, false, 0, fmt.Errorf("sealed batch not found for index %d", batchIndex) + } + if batchIndex <= ci.Uint64() { + // batch already committed, check batch hash + correct, err := bc.checkBatchHashCorrect(new(big.Int).SetUint64(batchIndex), sealedBatch.Hash) + if err != nil { + return common.Hash{}, false, 0, err + } + if !correct { + log.Error("batch hash does not match sealed batch", "batchIndex", batchIndex, "sealedBatchHash", sealedBatch.Hash.String()) + return common.Hash{}, false, 0, fmt.Errorf("batch hash does not match sealed batch") + } + } + batchHash, err := batchHeaderBytes.Hash() + if err != nil { + return common.Hash{}, false, 0, err + } + return batchHash, reachedExpectedSize, batchIndex, nil +} + +// Get gets sealed batch information by batch index +// Returns the sealed batch info and a boolean indicating if the batch was found +func (bc *BatchCache) Get(batchIndex uint64) (*eth.RPCRollupBatch, error) { + bc.mu.RLock() + defer bc.mu.RUnlock() + batch, ok := bc.sealedBatches[batchIndex] + var err error + if !ok { + batch, err = bc.batchStorage.LoadSealedBatch(batchIndex) + if err != nil { + return nil, err + } + } + return batch, nil +} + +// Delete deletes a sealed batch from the cache by batch index +// Returns a boolean indicating if the batch was found and deleted +func (bc *BatchCache) Delete(batchIndex uint64) error { + bc.mu.Lock() + defer bc.mu.Unlock() + _, exists := bc.sealedBatches[batchIndex] + if exists { + delete(bc.sealedBatches, batchIndex) + } + _, headerExists := bc.sealedBatchHeaders[batchIndex] + if headerExists { + delete(bc.sealedBatchHeaders, batchIndex) + } + err := bc.batchStorage.DeleteSealedBatch(batchIndex) + if err != nil { + return err + } + return nil +} + +// logSealedBatch logs the details of the sealed batch for debugging purposes. +func (bc *BatchCache) logSealedBatch(batchHeader BatchHeaderBytes, batchHash common.Hash, blockCount uint16) { + log.Info("Sealed batch header", "batchHash", batchHash.Hex()) + batchIndex, _ := batchHeader.BatchIndex() + l1MessagePopped, _ := batchHeader.L1MessagePopped() + totalL1MessagePopped, _ := batchHeader.TotalL1MessagePopped() + dataHash, _ := batchHeader.DataHash() + parentBatchHash, _ := batchHeader.ParentBatchHash() + log.Info(fmt.Sprintf("===batchIndex: %d \n===L1MessagePopped: %d \n===TotalL1MessagePopped: %d \n===dataHash: %x \n===blockCount: %d \n===ParentBatchHash: %x \n", + batchIndex, + l1MessagePopped, + totalL1MessagePopped, + dataHash, + blockCount, + parentBatchHash)) +} + +func (bc *BatchCache) AssembleCurrentBatchHeader() error { + if !bc.initDone { + return errors.New("batch has not been initialized, should wait") + } + callOpts := &bind.CallOpts{ + Context: bc.ctx, + } + endBlockNum, err := bc.l2Clients.BlockNumber(bc.ctx) + if err != nil { + return err + } + if endBlockNum < bc.currentBlockNumber { + return fmt.Errorf("has reorg, should check block status current %v, now %v", bc.currentBlockNumber, endBlockNum) + } + startBlockNum := uint64(0) + if bc.parentBatchHeader == nil { + return fmt.Errorf("parent batch header is nil, cannot assemble batch") + } + version, _ := bc.parentBatchHeader.Version() + if version < 1 { + parentIndex, err := bc.parentBatchHeader.BatchIndex() + if err != nil { + log.Error("failed to get block index", "err", err) + return err + } + store, err := bc.rollupContract.BatchDataStore(nil, new(big.Int).SetUint64(parentIndex)) + if err != nil { + log.Error("failed to get batch store", "err", err) + return err + } + startBlockNum = store.BlockNumber.Uint64() + } else { + startBlockNum, err = bc.parentBatchHeader.LastBlockNumber() + if err != nil { + log.Error("failed to get block number", "err", err) + return err + } + } + currentBlockNum := bc.currentBlockNumber + if currentBlockNum < startBlockNum { + log.Error("invalid block number", "currentBlockNum", currentBlockNum, "startBlockNum", startBlockNum) + return fmt.Errorf("invalid block number") + } + startBlockNum++ + // Get start block once to avoid repeated queries + startBlock, err := bc.l2Clients.BlockByNumber(bc.ctx, big.NewInt(int64(startBlockNum))) + if err != nil { + return fmt.Errorf("failed to get start block %d: %w", startBlockNum, err) + } + startBlockTime := startBlock.Time() + + // Fetch blocks from L2 client in the specified range and accumulate to batch + for blockNum := currentBlockNum + 1; blockNum <= endBlockNum; blockNum++ { + callOpts.BlockNumber = new(big.Int).SetUint64(blockNum) + root, err := bc.l2Caller.GetTreeRoot(callOpts) + if err != nil { + return fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) + } + + // Check capacity and store to current + exceeded, err := bc.CalculateCapWithProposalBlock(blockNum, root) + if err != nil { + return fmt.Errorf("failed to calculate cap with block %d: %w", blockNum, err) + } + + // Get the current block to check timeout after packing + nowBlock, err := bc.l2Clients.BlockByNumber(bc.ctx, big.NewInt(int64(blockNum))) + if err != nil { + return fmt.Errorf("failed to get block %d: %w", blockNum, err) + } + nowBlockTime := nowBlock.Time() + + // Check timeout: if elapsed time >= batchTimeOut, must seal batch immediately + // This ensures batch is sealed before exceeding the maximum timeout configured in gov contract + timeout := false + if bc.batchTimeOut > 0 { + elapsedTime := nowBlockTime - startBlockTime + if elapsedTime >= bc.batchTimeOut { + timeout = true + log.Info("Batch timeout reached, must seal batch", "startBlock", startBlockNum, "currentBlock", blockNum, + "elapsedTime", elapsedTime, "batchTimeOut", bc.batchTimeOut) + } + } + + // Check if we need to seal batch due to capacity, block interval, or timeout + // check ensures batch is sealed before exceeding the maximum timeout + if exceeded || (bc.blockInterval > 0 && (blockNum-startBlockNum+1) == bc.blockInterval) || timeout { + log.Info("block exceeds limit", "start", startBlockNum, "to", blockNum, "exceeded", exceeded, "timeout", timeout) + sequencerSetBytes, _, err := bc.l2Caller.GetSequencerSetBytes(callOpts) + if err != nil { + return fmt.Errorf("failed to get sequencer set verify hash at block %d: %w", callOpts.BlockNumber.Uint64(), err) + } + lastBlock, err := bc.l2Clients.BlockByNumber(context.Background(), big.NewInt(int64(bc.lastPackedBlockHeight))) + if err != nil { + return fmt.Errorf("failed to get last block %d: %w", bc.lastPackedBlockHeight, err) + } + blockTimestamp := lastBlock.Time() + batchIndex, _, _, err := bc.SealBatch(sequencerSetBytes, blockTimestamp) + if err != nil { + return fmt.Errorf("failed to seal batch: %w", err) + } + batch, ok := bc.GetSealedBatch(batchIndex) + if !ok { + return fmt.Errorf("batch %d not found in cache", batchIndex) + } + startBlockNum = batch.LastBlockNumber + 1 + startBlock, err = bc.l2Clients.BlockByNumber(bc.ctx, big.NewInt(int64(startBlockNum))) + if err != nil { + return fmt.Errorf("failed to get start block %d: %w", startBlockNum, err) + } + startBlockTime = startBlock.Time() + } + + // Pack current block (confirm and append to batch) + if err = bc.PackCurrentBlock(blockNum); err != nil { + return fmt.Errorf("failed to pack block %d: %w", blockNum, err) + } + } + return nil +} + +func (bc *BatchCache) DeleteBatchStorageAndInitFromRollup() error { + // should delete invalid batch data and batch header bytes + err := bc.batchStorage.DeleteAllSealedBatches() + if err != nil { + return err + } + // batch not contiguous or batch is invalid + return bc.InitAndSyncFromRollup() +} diff --git a/tx-submitter/batch/batch_cache_test.go b/tx-submitter/batch/batch_cache_test.go new file mode 100644 index 000000000..a15977295 --- /dev/null +++ b/tx-submitter/batch/batch_cache_test.go @@ -0,0 +1,94 @@ +package batch + +import ( + "os" + "os/signal" + "path/filepath" + "sync" + "testing" + "time" + + "morph-l2/bindings/bindings" + "morph-l2/tx-submitter/db" + "morph-l2/tx-submitter/iface" + "morph-l2/tx-submitter/types" + "morph-l2/tx-submitter/utils" + + "github.com/morph-l2/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func init() { + var err error + rollupContract, err = bindings.NewRollup(rollupAddr, l1Client) + if err != nil { + panic(err) + } + l2Caller, err = types.NewL2Caller([]iface.L2Client{l2Client}) + if err != nil { + panic(err) + } +} + +// setupTestDB creates a temporary database for testing +func setupTestDB(t *testing.T) *db.Db { + testDir := filepath.Join(t.TempDir(), "testleveldb") + os.RemoveAll(testDir) + t.Cleanup(func() { + os.RemoveAll(testDir) + }) + + testDB, err := db.New(testDir) + require.NoError(t, err) + return testDB +} + +func TestBatchCacheInitServer(t *testing.T) { + testDB := setupTestDB(t) + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) + + var batchCacheSyncMu sync.Mutex + + go func() { + batchCacheSyncMu.Lock() + defer batchCacheSyncMu.Unlock() + for { + if err := cache.InitAndSyncFromDatabase(); err != nil { + log.Error("init and sync from database failed, wait for the next try", "error", err) + time.Sleep(5 * time.Second) + continue + } + break + } + }() + + go utils.Loop(cache.ctx, 5*time.Second, func() { + batchCacheSyncMu.Lock() + defer batchCacheSyncMu.Unlock() + err := cache.AssembleCurrentBatchHeader() + if err != nil { + log.Error("Assemble current batch failed, wait for the next try", "error", err) + } + }) + + // Catch CTRL-C to ensure a graceful shutdown. + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt) + + // Wait until the interrupt signal is received from an OS signal. + <-interrupt +} + +func TestBatchCacheInit(t *testing.T) { + testDB := setupTestDB(t) + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) + err := cache.InitAndSyncFromRollup() + require.NoError(t, err) +} + +func TestBatchCacheInitByBlockRange(t *testing.T) { + testDB := setupTestDB(t) + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) + err := cache.InitFromRollupByRange() + require.NoError(t, err) +} diff --git a/tx-submitter/batch/batch_data.go b/tx-submitter/batch/batch_data.go new file mode 100644 index 000000000..b28e2c65f --- /dev/null +++ b/tx-submitter/batch/batch_data.go @@ -0,0 +1,164 @@ +package batch + +import ( + "encoding/binary" + "fmt" + + "morph-l2/node/zstd" + "morph-l2/tx-submitter/types" + + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/crypto" +) + +var ( + EmptyVersionedHash = common.HexToHash("0x010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c444014") +) + +type BatchData struct { + blockContexts []byte + l1TxHashes []byte + l1TxNum uint16 + blockNum uint16 + txsPayload []byte + + hash *common.Hash +} + +func NewBatchData() *BatchData { + return &BatchData{ + blockContexts: make([]byte, 0), + l1TxHashes: make([]byte, 0), + txsPayload: make([]byte, 0), + } +} + +func (cks *BatchData) Append(blockContext, txsPayload []byte, l1TxHashes []common.Hash) { + if cks == nil { + return + } + cks.blockContexts = append(cks.blockContexts, blockContext...) + cks.txsPayload = append(cks.txsPayload, txsPayload...) + cks.blockNum++ + for _, txHash := range l1TxHashes { + cks.l1TxHashes = append(cks.l1TxHashes, txHash.Bytes()...) + } + cks.l1TxNum += uint16(len(l1TxHashes)) +} + +// Encode encodes the data into bytes +// Below is the encoding, total 60*n+1+m bytes. +// Field Bytes Type Index Comments +// numBlocks 2 uint16 0 The number of blocks in this chunk +// block[0] 60 BlockContext 1 The first block in this chunk +// ...... +// block[i] 60 BlockContext 60*i+1 The (i+1)'th block in this chunk +// ...... +// block[n-1] 60 BlockContext 60*n-59 The last block in this chunk +func (cks *BatchData) Encode() ([]byte, error) { + if cks == nil || cks.blockNum == 0 { + return []byte{}, nil + } + + data := make([]byte, 2) + binary.BigEndian.PutUint16(data, cks.blockNum) + data = append(data, cks.blockContexts...) + return data, nil +} + +func (cks *BatchData) IsEmpty() bool { + return cks == nil || len(cks.blockContexts) == 0 +} + +func (cks *BatchData) DataHash() common.Hash { + if cks.hash != nil { + return *cks.hash + } + + var bz []byte + for i := 0; i < int(cks.blockNum); i++ { + bz = append(bz, cks.blockContexts[i*60:i*60+58]...) + } + bz = append(bz, cks.l1TxHashes...) + return crypto.Keccak256Hash(bz) +} + +// DataHashV2 computes the Keccak-256 hash of the batch data, incorporating +// the last block height, L1 transaction count, and L1 transaction hashes. +func (cks *BatchData) DataHashV2() (common.Hash, error) { + // Validate blockContexts length + if len(cks.blockContexts) < 60 { + return common.Hash{}, fmt.Errorf("blockContexts too short, length: %d", len(cks.blockContexts)) + } + + // Extract the last 60 bytes + lastBlockContext := cks.blockContexts[len(cks.blockContexts)-60:] + + // Parse block height + height, err := types.HeightFromBlockContextBytes(lastBlockContext) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to parse blockContext: context length=%d, lastBlockContext=%x, err=%w", + len(cks.blockContexts), lastBlockContext, err) + } + + // Compute the hash + return cks.calculateHash(height), nil +} + +func (cks *BatchData) calculateHash(height uint64) common.Hash { + // Preallocate memory for efficiency + hashData := make([]byte, 8+2+len(cks.l1TxHashes)) // 8 bytes for height, 2 bytes for l1TxNum + copy(hashData[:8], types.Uint64ToBigEndianBytes(height)) + copy(hashData[8:10], types.Uint16ToBigEndianBytes(cks.l1TxNum)) + copy(hashData[10:], cks.l1TxHashes) + + return crypto.Keccak256Hash(hashData) +} + +func (cks *BatchData) TxsPayload() []byte { + return cks.txsPayload +} + +// TxsPayloadV2 returns the bytes combining the block contexts with the tx payload +func (cks *BatchData) TxsPayloadV2() []byte { + return append(cks.blockContexts, cks.txsPayload...) +} + +func (cks *BatchData) BlockNum() uint16 { return cks.blockNum } + +func (cks *BatchData) EstimateCompressedSizeWithNewPayload(txPayload []byte) (bool, error) { + blobBytes := append(cks.txsPayload, txPayload...) + if len(blobBytes) <= MaxBlobBytesSize { + return false, nil + } + compressed, err := zstd.CompressBatchBytes(blobBytes) + if err != nil { + return false, err + } + return len(compressed) > MaxBlobBytesSize, nil +} + +func (cks *BatchData) combinePayloads(newBlockContext, newTxPayload []byte) []byte { + totalLength := len(cks.blockContexts) + len(newBlockContext) + len(cks.txsPayload) + len(newTxPayload) + combined := make([]byte, totalLength) + copy(combined, cks.blockContexts) + copy(combined[len(cks.blockContexts):], newBlockContext) + copy(combined[len(cks.blockContexts)+len(newBlockContext):], cks.txsPayload) + copy(combined[len(cks.blockContexts)+len(newBlockContext)+len(cks.txsPayload):], newTxPayload) + return combined +} + +// WillExceedCompressedSizeLimit checks if the size of the combined block contexts +// and transaction payloads (after compression) exceeds the maximum allowed size. +func (cks *BatchData) WillExceedCompressedSizeLimit(newBlockContext, newTxPayload []byte) (bool, error) { + // Combine the existing and new block contexts and transaction payloads + combinedBytes := cks.combinePayloads(newBlockContext, newTxPayload) + if len(combinedBytes) <= MaxBlobBytesSize { + return false, nil + } + compressed, err := zstd.CompressBatchBytes(combinedBytes) + if err != nil { + return false, fmt.Errorf("compression failed: %w", err) + } + return len(compressed) > MaxBlobBytesSize, nil +} diff --git a/tx-submitter/batch/batch_header.go b/tx-submitter/batch/batch_header.go new file mode 100644 index 000000000..81d38c691 --- /dev/null +++ b/tx-submitter/batch/batch_header.go @@ -0,0 +1,215 @@ +package batch + +import ( + "encoding/binary" + "errors" + + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/common/hexutil" + "github.com/morph-l2/go-ethereum/crypto" +) + +type ( + BatchHeaderBytes []byte +) + +const ( + expectedLengthV0 = 249 + expectedLengthV1 = 257 + + BatchHeaderVersion0 = 0 + BatchHeaderVersion1 = 1 +) + +var ( + ErrInvalidBatchHeaderLength = errors.New("invalid BatchHeaderBytes length") + ErrInvalidBatchHeaderVersion = errors.New("invalid BatchHeaderBytes version") + ErrEmptyBatchHeaderBytes = errors.New("empty BatchHeaderBytes") + ErrNotFoundInBatchHeader = errors.New("not found in BatchHeaderBytes") +) + +func (b BatchHeaderBytes) validate() error { + version, err := b.Version() + if err != nil { + return err + } + switch version { + case BatchHeaderVersion0: + if len(b) != expectedLengthV0 { + return ErrInvalidBatchHeaderLength + } + case BatchHeaderVersion1: + if len(b) != expectedLengthV1 { + return ErrInvalidBatchHeaderLength + } + default: + return ErrInvalidBatchHeaderVersion + } + return nil +} + +func (b BatchHeaderBytes) Bytes() []byte { + return b[:] +} + +func (b BatchHeaderBytes) Hash() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return crypto.Keccak256Hash(b), nil +} + +func (b BatchHeaderBytes) Version() (uint8, error) { + if len(b) == 0 { + return 0, ErrEmptyBatchHeaderBytes + } + return b[0], nil +} + +func (b BatchHeaderBytes) BatchIndex() (uint64, error) { + if err := b.validate(); err != nil { + return 0, err + } + return binary.BigEndian.Uint64(b[1:9]), nil +} + +func (b BatchHeaderBytes) L1MessagePopped() (uint64, error) { + if err := b.validate(); err != nil { + return 0, err + } + return binary.BigEndian.Uint64(b[9:17]), nil +} + +func (b BatchHeaderBytes) TotalL1MessagePopped() (uint64, error) { + if err := b.validate(); err != nil { + return 0, err + } + return binary.BigEndian.Uint64(b[17:25]), nil +} + +func (b BatchHeaderBytes) DataHash() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[25:57]), nil +} + +func (b BatchHeaderBytes) BlobVersionedHash() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[57:89]), nil +} + +func (b BatchHeaderBytes) PrevStateRoot() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[89:121]), nil +} + +func (b BatchHeaderBytes) PostStateRoot() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[121:153]), nil +} + +func (b BatchHeaderBytes) WithdrawalRoot() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[153:185]), nil +} + +func (b BatchHeaderBytes) SequencerSetVerifyHash() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[185:217]), nil +} + +func (b BatchHeaderBytes) ParentBatchHash() (common.Hash, error) { + if err := b.validate(); err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b[217:249]), nil +} + +func (b BatchHeaderBytes) LastBlockNumber() (uint64, error) { + if err := b.validate(); err != nil { + return 0, err + } + version, _ := b.Version() + if version < 1 { + return 0, errors.New("LastBlockNumber is not available in version 0") + } + return binary.BigEndian.Uint64(b[249:257]), nil +} + +// structed batch header for version 0 +type BatchHeaderV0 struct { + BatchIndex uint64 + L1MessagePopped uint64 + TotalL1MessagePopped uint64 + DataHash common.Hash + BlobVersionedHash common.Hash + PrevStateRoot common.Hash + PostStateRoot common.Hash + WithdrawalRoot common.Hash + SequencerSetVerifyHash common.Hash + ParentBatchHash common.Hash + + //cache + EncodedBytes hexutil.Bytes +} + +func (b BatchHeaderV0) Bytes() BatchHeaderBytes { + if len(b.EncodedBytes) > 0 { + return BatchHeaderBytes(b.EncodedBytes) + } + batchBytes := make([]byte, expectedLengthV0) + batchBytes[0] = BatchHeaderVersion0 + binary.BigEndian.PutUint64(batchBytes[1:], b.BatchIndex) + binary.BigEndian.PutUint64(batchBytes[9:], b.L1MessagePopped) + binary.BigEndian.PutUint64(batchBytes[17:], b.TotalL1MessagePopped) + copy(batchBytes[25:], b.DataHash[:]) + copy(batchBytes[57:], b.BlobVersionedHash[:]) + copy(batchBytes[89:], b.PrevStateRoot[:]) + copy(batchBytes[121:], b.PostStateRoot[:]) + copy(batchBytes[153:], b.WithdrawalRoot[:]) + copy(batchBytes[185:], b.SequencerSetVerifyHash[:]) + copy(batchBytes[217:], b.ParentBatchHash[:]) + b.EncodedBytes = batchBytes + return batchBytes +} + +type BatchHeaderV1 struct { + BatchHeaderV0 + LastBlockNumber uint64 + + //cache + EncodedBytes hexutil.Bytes +} + +func (b BatchHeaderV1) Bytes() BatchHeaderBytes { + if len(b.EncodedBytes) > 0 { + return BatchHeaderBytes(b.EncodedBytes) + } + batchBytes := make([]byte, expectedLengthV1) + batchBytes[0] = BatchHeaderVersion1 + binary.BigEndian.PutUint64(batchBytes[1:], b.BatchIndex) + binary.BigEndian.PutUint64(batchBytes[9:], b.L1MessagePopped) + binary.BigEndian.PutUint64(batchBytes[17:], b.TotalL1MessagePopped) + copy(batchBytes[25:], b.DataHash[:]) + copy(batchBytes[57:], b.BlobVersionedHash[:]) + copy(batchBytes[89:], b.PrevStateRoot[:]) + copy(batchBytes[121:], b.PostStateRoot[:]) + copy(batchBytes[153:], b.WithdrawalRoot[:]) + copy(batchBytes[185:], b.SequencerSetVerifyHash[:]) + copy(batchBytes[217:], b.ParentBatchHash[:]) + binary.BigEndian.PutUint64(batchBytes[249:], b.LastBlockNumber) + + b.EncodedBytes = batchBytes + return batchBytes +} diff --git a/tx-submitter/batch/batch_query.go b/tx-submitter/batch/batch_query.go new file mode 100644 index 000000000..21d463294 --- /dev/null +++ b/tx-submitter/batch/batch_query.go @@ -0,0 +1,350 @@ +package batch + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/big" + + "morph-l2/bindings/bindings" + + "github.com/morph-l2/go-ethereum/accounts/abi" + "github.com/morph-l2/go-ethereum/accounts/abi/bind" +) + +// getLastFinalizeBatchHeaderFromRollupByIndex gets the batch header with the specified index from the rollup contract's FinalizeBatch event +// The finalizeBatch function only receives one parameter: batchHeader bytes, so it can be parsed directly from the transaction +// Query is limited to 10000 block heights, starting from the latest height and querying backwards until data is found +func (bc *BatchCache) getLastFinalizeBatchHeaderFromRollupByIndex(index uint64) (*BatchHeaderBytes, error) { + // Get the current latest block height + latestBlock, err := bc.l1Client.BlockNumber(context.Background()) + if err != nil { + return nil, fmt.Errorf("failed to get latest block number: %w", err) + } + + const blockRange = uint64(10000) // Query 10000 blocks each time + var endBlock uint64 = latestBlock + var startBlock uint64 + + // Start from the latest height, query backwards 10000 blocks each time until data is found + for endBlock > 0 { + // Calculate the start block for this query + if endBlock >= blockRange { + startBlock = endBlock - blockRange + 1 + } else { + startBlock = 0 + } + + // Set query options + filterOpts := &bind.FilterOpts{ + Start: startBlock, + End: &endBlock, + } + + // Query the FinalizeBatch event with the corresponding index from the rollup contract + finalizeEventIter, err := bc.rollupContract.FilterFinalizeBatch(filterOpts, []*big.Int{new(big.Int).SetUint64(index)}, nil) + if err != nil { + // If query fails, continue querying backwards + if startBlock == 0 { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + continue + } + defer func() { _ = finalizeEventIter.Close() }() + // Iterate through query results + for finalizeEventIter.Next() { + event := finalizeEventIter.Event + // Get transaction hash from event + txHash := event.Raw.TxHash + + // Get transaction details + tx, _, err := bc.l1Client.TransactionByHash(context.Background(), txHash) + if err != nil { + continue // If getting transaction fails, try next event + } + + // Parse finalizeBatch transaction data to get batchHeader + batchHeader, err := parseFinalizeBatchTxData(tx.Data()) + if err != nil { + continue // If parsing fails, try next event + } + + // Verify if batch index matches + batchIndex, err := batchHeader.BatchIndex() + if err != nil { + continue + } + if batchIndex == index { + return &batchHeader, nil + } + } + + // Continue querying backwards + if endBlock < blockRange { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + } + + return nil, fmt.Errorf("failed to find last finalized batch header for batchIndex %d", index) +} + +// parseFinalizeBatchTxData parses the finalizeBatch or importGenesisBatch transaction's input data to get BatchHeaderBytes +// Both finalizeBatch(bytes calldata _batchHeader) and importGenesisBatch(bytes calldata _batchHeader) receive one parameter: batchHeader bytes +// Both methods emit FinalizeBatch event, so we need to support parsing both +func parseFinalizeBatchTxData(txData []byte) (BatchHeaderBytes, error) { + // Get rollup ABI + rollupAbi, err := bindings.RollupMetaData.GetAbi() + if err != nil { + return nil, err + } + + // Check if the first 4 bytes of transaction data match the method ID + if len(txData) < 4 { + return nil, errors.New("transaction data too short") + } + + methodID := txData[:4] + + // Try to get finalizeBatch method + finalizeBatchMethod, ok := rollupAbi.Methods["finalizeBatch"] + if !ok { + return nil, errors.New("finalizeBatch method not found in ABI") + } + + var method abi.Method + var methodName string + + // Check if method ID matches finalizeBatch + if bytes.Equal(methodID, finalizeBatchMethod.ID) { + method = finalizeBatchMethod + methodName = "finalizeBatch" + } else { + // Try importGenesisBatch method + importGenesisBatchMethod, ok := rollupAbi.Methods["importGenesisBatch"] + if !ok { + return nil, errors.New("importGenesisBatch method not found in ABI") + } + if bytes.Equal(methodID, importGenesisBatchMethod.ID) { + method = importGenesisBatchMethod + methodName = "importGenesisBatch" + } else { + return nil, fmt.Errorf("transaction is not a finalizeBatch or importGenesisBatch call, methodID: %x", methodID) + } + } + + // Parse parameters (only one parameter: batchHeader bytes) + args, err := method.Inputs.Unpack(txData[4:]) + if err != nil { + return nil, fmt.Errorf("failed to unpack %s transaction parameters: %w", methodName, err) + } + + if len(args) == 0 { + return nil, fmt.Errorf("no arguments found in %s transaction", methodName) + } + + // The first parameter is batchHeader bytes + batchHeaderBytes, ok := args[0].([]byte) + if !ok { + return nil, fmt.Errorf("failed to cast batchHeader to []byte in %s transaction", methodName) + } + + return BatchHeaderBytes(batchHeaderBytes), nil +} + +// batchDataInputStruct represents the parsed batch data input structure from ABI +type batchDataInputStruct struct { + Version uint8 `json:"version"` + ParentBatchHeader []uint8 `json:"parentBatchHeader"` + LastBlockNumber uint64 `json:"lastBlockNumber"` + NumL1Messages uint16 `json:"numL1Messages"` + PrevStateRoot [32]uint8 `json:"prevStateRoot"` + PostStateRoot [32]uint8 `json:"postStateRoot"` + WithdrawalRoot [32]uint8 `json:"withdrawalRoot"` +} + +// batchSignatureInputStruct represents the parsed batch signature input structure from ABI +type batchSignatureInputStruct struct { + SignedSequencersBitmap *big.Int `json:"signedSequencersBitmap"` + SequencerSets []uint8 `json:"sequencerSets"` + Signature []uint8 `json:"signature"` +} + +// convertBatchDataInput converts the parsed struct to bindings.IRollupBatchDataInput +func convertBatchDataInput(s batchDataInputStruct) *bindings.IRollupBatchDataInput { + // Convert []uint8 to []byte + parentBatchHeader := make([]byte, len(s.ParentBatchHeader)) + copy(parentBatchHeader, s.ParentBatchHeader) + + return &bindings.IRollupBatchDataInput{ + Version: s.Version, + ParentBatchHeader: parentBatchHeader, + LastBlockNumber: s.LastBlockNumber, + NumL1Messages: s.NumL1Messages, + PrevStateRoot: s.PrevStateRoot, + PostStateRoot: s.PostStateRoot, + WithdrawalRoot: s.WithdrawalRoot, + } +} + +// convertBatchSignatureInput converts the parsed struct to bindings.IRollupBatchSignatureInput +func convertBatchSignatureInput(s batchSignatureInputStruct) *bindings.IRollupBatchSignatureInput { + // Convert []uint8 to []byte + sequencerSets := make([]byte, len(s.SequencerSets)) + copy(sequencerSets, s.SequencerSets) + signature := make([]byte, len(s.Signature)) + copy(signature, s.Signature) + + return &bindings.IRollupBatchSignatureInput{ + SignedSequencersBitmap: s.SignedSequencersBitmap, + SequencerSets: sequencerSets, + Signature: signature, + } +} + +// parseBatchDataInputFromArgs safely parses BatchDataInput from ABI unpacked arguments +func parseBatchDataInputFromArgs(args []interface{}) (batchDataInputStruct, error) { + if len(args) < 1 { + return batchDataInputStruct{}, errors.New("insufficient arguments for batch data input") + } + + // Use comma-ok assertion for safe type checking + rawStruct, ok := args[0].(struct { + Version uint8 `json:"version"` + ParentBatchHeader []uint8 `json:"parentBatchHeader"` + LastBlockNumber uint64 `json:"lastBlockNumber"` + NumL1Messages uint16 `json:"numL1Messages"` + PrevStateRoot [32]uint8 `json:"prevStateRoot"` + PostStateRoot [32]uint8 `json:"postStateRoot"` + WithdrawalRoot [32]uint8 `json:"withdrawalRoot"` + }) + if !ok { + return batchDataInputStruct{}, errors.New("failed to cast batch data input to expected struct type") + } + + return batchDataInputStruct{ + Version: rawStruct.Version, + ParentBatchHeader: rawStruct.ParentBatchHeader, + LastBlockNumber: rawStruct.LastBlockNumber, + NumL1Messages: rawStruct.NumL1Messages, + PrevStateRoot: rawStruct.PrevStateRoot, + PostStateRoot: rawStruct.PostStateRoot, + WithdrawalRoot: rawStruct.WithdrawalRoot, + }, nil +} + +// parseBatchSignatureInputFromArgs safely parses BatchSignatureInput from ABI unpacked arguments +func parseBatchSignatureInputFromArgs(args []interface{}) (batchSignatureInputStruct, error) { + if len(args) < 2 { + return batchSignatureInputStruct{}, errors.New("insufficient arguments for batch signature input") + } + + // Use comma-ok assertion for safe type checking + rawStruct, ok := args[1].(struct { + SignedSequencersBitmap *big.Int `json:"signedSequencersBitmap"` + SequencerSets []uint8 `json:"sequencerSets"` + Signature []uint8 `json:"signature"` + }) + if !ok { + return batchSignatureInputStruct{}, errors.New("failed to cast batch signature input to expected struct type") + } + + return batchSignatureInputStruct{ + SignedSequencersBitmap: rawStruct.SignedSequencersBitmap, + SequencerSets: rawStruct.SequencerSets, + Signature: rawStruct.Signature, + }, nil +} + +// parseCommitBatchTxData parses the commitBatch transaction's input data to get BatchDataInput and BatchSignatureInput +func parseCommitBatchTxData(txData []byte) (*bindings.IRollupBatchDataInput, *bindings.IRollupBatchSignatureInput, error) { + // Get rollup ABI + rollupAbi, err := bindings.RollupMetaData.GetAbi() + if err != nil { + return nil, nil, err + } + + // Check if method ID is commitBatch + commitBatchMethod, ok := rollupAbi.Methods["commitBatch"] + if !ok { + return nil, nil, errors.New("commitBatch method not found in ABI") + } + + // Check if the first 4 bytes of transaction data match the method ID + if len(txData) < 4 { + return nil, nil, errors.New("transaction data too short") + } + + methodID := txData[:4] + if !bytes.Equal(methodID, commitBatchMethod.ID) { + // Try commitBatchWithProof + commitBatchWithProofMethod, ok := rollupAbi.Methods["commitBatchWithProof"] + if !ok { + return nil, nil, errors.New("commitBatchWithProof method not found in ABI") + } + if bytes.Equal(methodID, commitBatchWithProofMethod.ID) { + // Use commitBatchWithProof method to parse + return parseCommitBatchWithProofTxData(txData, rollupAbi) + } + return nil, nil, errors.New("transaction is not a commit batch or commitBatchWithProof") + } + + // Parse parameters + args, err := commitBatchMethod.Inputs.Unpack(txData[4:]) + if err != nil { + return nil, nil, err + } + + // Parse BatchDataInput using shared helper + batchDataInputRaw, err := parseBatchDataInputFromArgs(args) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse batch data input: %w", err) + } + batchDataInput := convertBatchDataInput(batchDataInputRaw) + + // Parse BatchSignatureInput using shared helper + batchSignatureInputRaw, err := parseBatchSignatureInputFromArgs(args) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse batch signature input: %w", err) + } + batchSignatureInput := convertBatchSignatureInput(batchSignatureInputRaw) + + return batchDataInput, batchSignatureInput, nil +} + +// parseCommitBatchWithProofTxData parses the commitBatchWithProof transaction's input data +// commitBatchWithProof has 4 parameters: batchDataInput, batchSignatureInput, _batchHeader, _batchProof +func parseCommitBatchWithProofTxData(txData []byte, rollupAbi *abi.ABI) (*bindings.IRollupBatchDataInput, *bindings.IRollupBatchSignatureInput, error) { + commitBatchWithProofMethod, ok := rollupAbi.Methods["commitBatchWithProof"] + if !ok { + return nil, nil, errors.New("commitBatchWithProof method not found in ABI") + } + + // Parse parameters + args, err := commitBatchWithProofMethod.Inputs.Unpack(txData[4:]) + if err != nil { + return nil, nil, err + } + + // Parse BatchDataInput using shared helper + batchDataInputRaw, err := parseBatchDataInputFromArgs(args) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse batch data input: %w", err) + } + batchDataInput := convertBatchDataInput(batchDataInputRaw) + + // Parse BatchSignatureInput using shared helper + batchSignatureInputRaw, err := parseBatchSignatureInputFromArgs(args) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse batch signature input: %w", err) + } + batchSignatureInput := convertBatchSignatureInput(batchSignatureInputRaw) + + // The third parameter is _batchHeader (bytes) + // The fourth parameter is _batchProof (bytes) + // These parameters don't need to be returned, but can be used for verification + + return batchDataInput, batchSignatureInput, nil +} diff --git a/tx-submitter/batch/batch_restart_test.go b/tx-submitter/batch/batch_restart_test.go new file mode 100644 index 000000000..7b6c6cb8b --- /dev/null +++ b/tx-submitter/batch/batch_restart_test.go @@ -0,0 +1,510 @@ +package batch + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "fmt" + "math/big" + "os" + "path/filepath" + "testing" + + "morph-l2/bindings/bindings" + "morph-l2/tx-submitter/db" + "morph-l2/tx-submitter/iface" + "morph-l2/tx-submitter/types" + + "github.com/morph-l2/go-ethereum/accounts/abi/bind" + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/crypto" + "github.com/morph-l2/go-ethereum/ethclient" + "github.com/stretchr/testify/require" +) + +var ( + ErrBatchNotFound = errors.New("batch not found") +) + +var ( + rollupAddr = common.HexToAddress("0x0165878a594ca255338adfa4d48449f69242eb8f") + + l1ClientRpc = "http://localhost:9545" + l2ClientRpc = "http://localhost:8545" + l1Client, _ = ethclient.Dial(l1ClientRpc) + l2Client, _ = ethclient.Dial(l2ClientRpc) + + rollupContract *bindings.Rollup + + l2Caller *types.L2Caller +) + +func init() { + var err error + rollupContract, err = bindings.NewRollup(rollupAddr, l1Client) + if err != nil { + panic(err) + } + l2Caller, err = types.NewL2Caller([]iface.L2Client{l2Client}) + if err != nil { + panic(err) + } +} + +func Test_GetFinalizeBatchHeader(t *testing.T) { + testDir := filepath.Join(t.TempDir(), "testleveldb") + os.RemoveAll(testDir) + t.Cleanup(func() { + os.RemoveAll(testDir) + }) + testDB, err := db.New(testDir) + require.NoError(t, err) + + bc := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) + headerBytes, err := bc.getLastFinalizeBatchHeaderFromRollupByIndex(0) + require.NoError(t, err) + t.Log("headerBytes", hex.EncodeToString(headerBytes.Bytes())) +} + +func Test_CommitBatchParse(t *testing.T) { + data, signature, err := getCommitBatchDataByIndex(5357) + require.NoError(t, err) + t.Log("data", data) + t.Log("signature", signature) + t.Log("data.Version", data.Version) + t.Log("data.ParentBatchHeader", hex.EncodeToString(data.ParentBatchHeader)) + t.Log("data.LastBlockNumber", data.LastBlockNumber) + t.Log("data.NumL1Messages", data.NumL1Messages) + t.Log("data.PrevStateRoot", hex.EncodeToString(data.PrevStateRoot[:])) + t.Log("data.PostStateRoot", hex.EncodeToString(data.PostStateRoot[:])) + t.Log("data.WithdrawalRoot", hex.EncodeToString(data.WithdrawalRoot[:])) +} + +func TestBatchRestartInit(t *testing.T) { + testDir := filepath.Join(t.TempDir(), "testleveldb") + os.RemoveAll(testDir) + t.Cleanup(func() { + os.RemoveAll(testDir) + }) + testDB, err := db.New(testDir) + require.NoError(t, err) + + sequencerSetBytes, sequencerSetVerifyHash, err := l2Caller.GetSequencerSetBytes(nil) + require.NoError(t, err) + t.Log("sequencer set verify hash", hex.EncodeToString(sequencerSetVerifyHash[:])) + ci, fi := getInfosFromContract() + t.Log("commit index", ci, " ", "finalize index", fi) + bc := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) + startBlockNum, endBlockNum, err := getFirstUnFinalizeBatchBlockNumRange(fi) + require.NoError(t, err) + startBlockNum = new(big.Int).Add(startBlockNum, new(big.Int).SetUint64(1)) + t.Log("start block number", startBlockNum, "end block number", endBlockNum) + + // Get the latest finalized batch header + headerBytes, err := getLastFinalizeBatchHeaderByIndex(fi.Uint64()) + require.NoError(t, err, "failed to get last finalized batch header") + parentStateRoot, err := headerBytes.PostStateRoot() + require.NoError(t, err, "failed to get post state root") + + // Initialize BatchCache parent batch information + // prevStateRoot should be the parent batch's postStateRoot (i.e., the current finalized batch's postStateRoot) + bc.parentBatchHeader = headerBytes + bc.prevStateRoot = parentStateRoot // The current batch's prevStateRoot is the parent batch's postStateRoot + bc.lastPackedBlockHeight, err = headerBytes.LastBlockNumber() + if err != nil { + store, err := rollupContract.BatchDataStore(nil, fi) + require.NoError(t, err) + bc.lastPackedBlockHeight = store.BlockNumber.Uint64() + } + bc.totalL1MessagePopped, err = headerBytes.TotalL1MessagePopped() + require.NoError(t, err) + t.Logf("Restored batch header: batchIndex=%d, parentStateRoot=%x (will be used as prevStateRoot for next batch)", + fi, parentStateRoot[:]) + + // Query the first unfinalized batch's block range from rollup contract + firstUnfinalizedIndex := fi.Uint64() + 1 + t.Logf("First unfinalize batch index: %d, block range: %d - %d", firstUnfinalizedIndex, startBlockNum.Uint64(), endBlockNum.Uint64()) + + // Fetch blocks from L2 client in this range and assemble batchHeader + assembledBatchHeader, err := assembleBatchHeaderFromL2Blocks(bc, startBlockNum.Uint64(), endBlockNum.Uint64(), sequencerSetBytes, l2Client, l2Caller) + require.NoError(t, err, "failed to assemble batch header from L2 blocks") + t.Log("assembled batch header success", hex.EncodeToString(assembledBatchHeader.Bytes())) + // Verify the assembled batchHeader + assembledBatchIndex, err := assembledBatchHeader.BatchIndex() + require.NoError(t, err) + require.Equal(t, firstUnfinalizedIndex, assembledBatchIndex, "assembled batch index should match") + assembledBatchHash, err := assembledBatchHeader.Hash() + require.NoError(t, err) + + batchDataInput, batchSignatureInput, err := getCommitBatchDataByIndex(firstUnfinalizedIndex) + require.NoError(t, err) + t.Logf("batchDataInput.Version=%d", batchDataInput.Version) + require.Equal(t, hex.EncodeToString(batchDataInput.ParentBatchHeader), hex.EncodeToString(headerBytes.Bytes())) + t.Logf("batchDataInput.LastBlockNumber=%d, %d", batchDataInput.LastBlockNumber, endBlockNum) + l1MsgNum, err := assembledBatchHeader.L1MessagePopped() + require.NoError(t, err) + require.Equal(t, uint64(batchDataInput.NumL1Messages), l1MsgNum) + prevStateRoot, err := assembledBatchHeader.PrevStateRoot() + require.NoError(t, err) + require.Equal(t, batchDataInput.PrevStateRoot[:], prevStateRoot.Bytes()) + postStateRoot, err := assembledBatchHeader.PostStateRoot() + require.NoError(t, err) + require.Equal(t, batchDataInput.PostStateRoot[:], postStateRoot.Bytes()) + + // Compare assembledBatchHeader with the batch header built from commitBatch data + // Note: batchDataInput and batchSignatureInput can be used to verify data, but need to build a complete batch header + compareBatchHeaderWithCommitData(t, assembledBatchHeader, batchDataInput, batchSignatureInput, sequencerSetVerifyHash) + + committedBatchHash, err := rollupContract.CommittedBatches(nil, new(big.Int).SetUint64(assembledBatchIndex)) + require.NoError(t, err) + require.Equal(t, assembledBatchHash, common.Hash(committedBatchHash), "assembled batch hash should match") + t.Logf("Successfully assembled batch hash: %x", assembledBatchHash) + t.Logf("Successfully assembled batch header: batchIndex=%d", assembledBatchIndex) +} + +// compareBatchHeaderWithCommitData compares the assembled batch header with information extracted from commitBatch data +func compareBatchHeaderWithCommitData(t *testing.T, assembledBatchHeader *BatchHeaderBytes, batchDataInput *bindings.IRollupBatchDataInput, batchSignatureInput *bindings.IRollupBatchSignatureInput, sequencerSetVerifyHash common.Hash) { + t.Logf("\n=== Comparing assembled batch header with commitBatch data ===") + + // Compare Version + version, err := assembledBatchHeader.Version() + require.NoError(t, err) + if version != batchDataInput.Version { + t.Errorf("❌ Version mismatch: assembled=%d, commitBatch=%d", version, batchDataInput.Version) + } else { + t.Logf("✓ Version: %d (match)", version) + } + + // Compare ParentBatchHeader + // Note: We should use batch index instead of version, but we need to get batch index from assembledBatchHeader + batchIndex, err := assembledBatchHeader.BatchIndex() + if err == nil && batchIndex > 0 { + parentBatchHeader, err := getLastFinalizeBatchHeaderByIndex(batchIndex - 1) + if err == nil { + parentBytes := parentBatchHeader.Bytes() + if !bytes.Equal(parentBytes, batchDataInput.ParentBatchHeader) { + t.Errorf("❌ ParentBatchHeader mismatch: assembled=%x, commitBatch=%x", parentBytes[:min(32, len(parentBytes))], batchDataInput.ParentBatchHeader[:min(32, len(batchDataInput.ParentBatchHeader))]) + } else { + t.Logf("✓ ParentBatchHeader: match") + } + } + } + + // Compare LastBlockNumber + lastBlock, err := assembledBatchHeader.LastBlockNumber() + if err == nil { + if lastBlock != batchDataInput.LastBlockNumber { + t.Errorf("❌ LastBlockNumber mismatch: assembled=%d, commitBatch=%d", lastBlock, batchDataInput.LastBlockNumber) + } else { + t.Logf("✓ LastBlockNumber: %d (match)", lastBlock) + } + } + + // Compare NumL1Messages + l1MsgPopped, err := assembledBatchHeader.L1MessagePopped() + require.NoError(t, err) + if l1MsgPopped != uint64(batchDataInput.NumL1Messages) { + t.Errorf("❌ NumL1Messages mismatch: assembled=%d, commitBatch=%d", l1MsgPopped, batchDataInput.NumL1Messages) + } else { + t.Logf("✓ NumL1Messages: %d (match)", l1MsgPopped) + } + + // 比较 PrevStateRoot + prevStateRoot, err := assembledBatchHeader.PrevStateRoot() + require.NoError(t, err) + prevStateRootFromCommit := common.BytesToHash(batchDataInput.PrevStateRoot[:]) + if prevStateRoot != prevStateRootFromCommit { + t.Errorf("❌ PrevStateRoot mismatch: assembled=%x, commitBatch=%x", prevStateRoot, prevStateRootFromCommit) + } else { + t.Logf("✓ PrevStateRoot: %x (match)", prevStateRoot) + } + + // 比较 PostStateRoot + postStateRoot, err := assembledBatchHeader.PostStateRoot() + require.NoError(t, err) + postStateRootFromCommit := common.BytesToHash(batchDataInput.PostStateRoot[:]) + if postStateRoot != postStateRootFromCommit { + t.Errorf("❌ PostStateRoot mismatch: assembled=%x, commitBatch=%x", postStateRoot, postStateRootFromCommit) + } else { + t.Logf("✓ PostStateRoot: %x (match)", postStateRoot) + } + + // 比较 WithdrawalRoot + withdrawRoot, err := assembledBatchHeader.WithdrawalRoot() + require.NoError(t, err) + withdrawRootFromCommit := common.BytesToHash(batchDataInput.WithdrawalRoot[:]) + if withdrawRoot != withdrawRootFromCommit { + t.Errorf("❌ WithdrawalRoot mismatch: assembled=%x, commitBatch=%x", withdrawRoot, withdrawRootFromCommit) + } else { + t.Logf("✓ WithdrawalRoot: %x (match)", withdrawRoot) + } + + // 比较 SequencerSetVerifyHash + sequencerSetsHash := crypto.Keccak256Hash(batchSignatureInput.SequencerSets) + seqHash, err := assembledBatchHeader.SequencerSetVerifyHash() + require.NoError(t, err) + if seqHash != sequencerSetsHash { + t.Errorf("❌ SequencerSetVerifyHash mismatch: assembled=%x, from SequencerSets=%x", seqHash, sequencerSetsHash) + } else { + t.Logf("✓ SequencerSetVerifyHash: %x (match)", seqHash) + } + + if seqHash != sequencerSetVerifyHash { + t.Errorf("❌ SequencerSetVerifyHash mismatch with provided hash: assembled=%x, provided=%x", seqHash, sequencerSetVerifyHash) + } else { + t.Logf("✓ SequencerSetVerifyHash matches provided hash: %x", sequencerSetVerifyHash) + } +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// getLastFinalizeBatchHeaderByIndex gets the batch header with the specified index from the rollup contract's FinalizeBatch event +// The finalizeBatch function only receives one parameter: batchHeader bytes, so it can be parsed directly from the transaction +// Query is limited to 10000 block heights, starting from the latest height and querying backwards until data is found +func getLastFinalizeBatchHeaderByIndex(index uint64) (*BatchHeaderBytes, error) { + // Get the current latest block height + latestBlock, err := l1Client.BlockNumber(context.Background()) + if err != nil { + return nil, fmt.Errorf("failed to get latest block number: %w", err) + } + + const blockRange = uint64(10000) // Query 10000 blocks each time + var endBlock uint64 = latestBlock + var startBlock uint64 + + // Start from the latest height, query backwards 10000 blocks each time until data is found + for endBlock > 0 { + // Calculate the start block for this query + if endBlock >= blockRange { + startBlock = endBlock - blockRange + 1 + } else { + startBlock = 0 + } + + // Set query options + filterOpts := &bind.FilterOpts{ + Start: startBlock, + End: &endBlock, + } + + // Query the FinalizeBatch event with the corresponding index from the rollup contract + finalizeEventIter, err := rollupContract.FilterFinalizeBatch(filterOpts, []*big.Int{new(big.Int).SetUint64(index)}, nil) + if err != nil { + // If query fails, continue querying backwards + if endBlock < blockRange { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + continue + } + + // Iterate through query results + for finalizeEventIter.Next() { + event := finalizeEventIter.Event + // Get transaction hash from event + txHash := event.Raw.TxHash + + // Get transaction details + tx, _, err := l1Client.TransactionByHash(context.Background(), txHash) + if err != nil { + continue // If getting transaction fails, try next event + } + + // Parse finalizeBatch transaction data to get batchHeader + batchHeader, err := parseFinalizeBatchTxData(tx.Data()) + if err != nil { + continue // If parsing fails, try next event + } + + // Verify if batch index matches + batchIndex, err := batchHeader.BatchIndex() + if err != nil { + continue + } + if batchIndex == index { + finalizeEventIter.Close() + return &batchHeader, nil + } + } + finalizeEventIter.Close() + + // Continue querying backwards + if endBlock < blockRange { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + } + + return nil, ErrBatchNotFound +} + +func getInfosFromContract() (*big.Int, *big.Int) { + latestCommitBatchIndex, _ := rollupContract.LastCommittedBatchIndex(nil) + lastFinalizedBatchIndex, _ := rollupContract.LastFinalizedBatchIndex(nil) + return latestCommitBatchIndex, lastFinalizedBatchIndex +} + +func getFirstUnFinalizeBatchBlockNumRange(lastFinalizedBatchIndex *big.Int) (*big.Int, *big.Int, error) { + fis, err := rollupContract.BatchDataStore(nil, lastFinalizedBatchIndex) + if err != nil { + return nil, nil, err + } + ufis, err := rollupContract.BatchDataStore(nil, new(big.Int).SetUint64(lastFinalizedBatchIndex.Uint64()+1)) + if err != nil { + return nil, nil, err + } + + return fis.BlockNumber, ufis.BlockNumber, nil +} + +// getCommitBatchDataByIndex gets batchDataInput and batchSignatureInput with the specified index from the rollup contract's CommitBatch event +// Reference the implementation of getLastFinalizeBatchHeaderByIndex +// Query is limited to 10000 block heights, starting from the latest height and querying backwards until data is found +func getCommitBatchDataByIndex(index uint64) (*bindings.IRollupBatchDataInput, *bindings.IRollupBatchSignatureInput, error) { + // Get the current latest block height + latestBlock, err := l1Client.BlockNumber(context.Background()) + if err != nil { + return nil, nil, fmt.Errorf("failed to get latest block number: %w", err) + } + + const blockRange = uint64(10000) // Query 10000 blocks each time + var endBlock uint64 = latestBlock + var startBlock uint64 + + // Start from the latest height, query backwards 10000 blocks each time until data is found + for endBlock > 0 { + // Calculate the start block for this query + if endBlock >= blockRange { + startBlock = endBlock - blockRange + 1 + } else { + startBlock = 0 + } + + // Set query options + filterOpts := &bind.FilterOpts{ + Start: startBlock, + End: &endBlock, + } + + // Query the CommitBatch event with the corresponding index from the rollup contract + commitEventIter, err := rollupContract.FilterCommitBatch(filterOpts, []*big.Int{new(big.Int).SetUint64(index)}, nil) + if err != nil { + // If query fails, continue querying backwards + if endBlock < blockRange { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + continue + } + + // Iterate through query results + for commitEventIter.Next() { + event := commitEventIter.Event + // Get transaction hash from event + txHash := event.Raw.TxHash + + // Get transaction details + tx, _, err := l1Client.TransactionByHash(context.Background(), txHash) + if err != nil { + return nil, nil, fmt.Errorf("failed to get transaction by hash: %w", err) + } + + // Parse commitBatch transaction data to get batchDataInput and batchSignatureInput + batchDataInput, batchSignatureInput, err := parseCommitBatchTxData(tx.Data()) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse commit batch data: %w", err) + } + + // Verify if batch index matches (by checking batchIndex in parentBatchHeader) + if len(batchDataInput.ParentBatchHeader) > 0 { + parentHeader := BatchHeaderBytes(batchDataInput.ParentBatchHeader) + parentBatchIndex, err := parentHeader.BatchIndex() + if err == nil && parentBatchIndex+1 == index { + commitEventIter.Close() + return batchDataInput, batchSignatureInput, nil + } + } + } + commitEventIter.Close() + + // Continue querying backwards + if endBlock < blockRange { + break // Already queried to block 0, exit loop + } + endBlock = startBlock - 1 + } + + return nil, nil, ErrBatchNotFound +} + +// assembleBatchHeaderFromL2Blocks fetches blocks from L2 client in the specified range and assembles batchHeader +// Parameters: +// - bc: BatchCache instance (parentBatchHeader and prevStateRoot already initialized) +// - startBlockNum: starting block number +// - endBlockNum: ending block number +// - sequencerSetVerifyHash: sequencer set verification hash +// - l2Client: L2 client +// +// Returns: +// - batchHeader: assembled batchHeader +// - error: returns error if assembly fails +func assembleBatchHeaderFromL2Blocks( + bc *BatchCache, + startBlockNum, endBlockNum uint64, + sequencerBytes []byte, + l2Client iface.L2Client, + l2Caller *types.L2Caller, +) (*BatchHeaderBytes, error) { + ctx := context.Background() + + // Fetch blocks from L2 client in the specified range and accumulate to batch + for blockNum := startBlockNum; blockNum <= endBlockNum; blockNum++ { + root, err := l2Caller.GetTreeRoot(&bind.CallOpts{ + Context: ctx, + BlockNumber: new(big.Int).SetUint64(blockNum), + }) + if err != nil { + return nil, fmt.Errorf("failed to get withdraw root at block %d: %w", blockNum, err) + } + // Check capacity and store to current + exceeded, err := bc.CalculateCapWithProposalBlock(blockNum, root) + if err != nil { + return nil, fmt.Errorf("failed to calculate cap with block %d: %w", blockNum, err) + } + + // Pack current block (confirm and append to batch) + if err = bc.PackCurrentBlock(blockNum); err != nil { + return nil, fmt.Errorf("failed to pack block %d: %w", blockNum, err) + } + + // If capacity exceeds limit, can stop early (optional) + _ = exceeded // Checked but not used in this test + } + + // Get the last block's timestamp for packing + lastBlock, err := l2Client.BlockByNumber(ctx, big.NewInt(int64(endBlockNum))) + if err != nil { + return nil, fmt.Errorf("failed to get last block %d: %w", endBlockNum, err) + } + blockTimestamp := lastBlock.Time() + + // Seal batch and generate batchHeader + batchIndex, batchHeaderBytes, _, err := bc.SealBatch(sequencerBytes, blockTimestamp) + if err != nil { + return nil, fmt.Errorf("failed to seal batch: %w", err) + } + + // Get the sealed batch header + _, found := bc.GetSealedBatch(batchIndex) + if !found { + return nil, fmt.Errorf("sealed batch not found for index %d", batchIndex) + } + + return &batchHeaderBytes, nil +} diff --git a/tx-submitter/batch/batch_storage.go b/tx-submitter/batch/batch_storage.go new file mode 100644 index 000000000..1f7c1e0f7 --- /dev/null +++ b/tx-submitter/batch/batch_storage.go @@ -0,0 +1,386 @@ +package batch + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "sync" + + "morph-l2/tx-submitter/db" + + "github.com/morph-l2/go-ethereum/eth" + "github.com/morph-l2/go-ethereum/log" +) + +const ( + // Key prefixes for LevelDB storage + SealedBatchKeyPrefix = "sealed_batch_" + SealedBatchHeaderKeyPrefix = "sealed_batch_header_" + SealedBatchIndicesKey = "sealed_batch_indices" +) + +// BatchStorage handles persistence of sealed batches using JSON encoding +type BatchStorage struct { + db db.Database + mu sync.RWMutex +} + +// NewBatchStorage creates a new BatchStorage instance +func NewBatchStorage(db db.Database) *BatchStorage { + return &BatchStorage{ + db: db, + } +} + +// StoreSealedBatch stores a single sealed batch to LevelDB +// Uses JSON encoding for serialization +func (s *BatchStorage) StoreSealedBatch(batchIndex uint64, batch *eth.RPCRollupBatch) error { + s.mu.Lock() + defer s.mu.Unlock() + + // Serialize batch to JSON + encoded, err := json.Marshal(batch) + if err != nil { + return fmt.Errorf("failed to marshal sealed batch %d: %w", batchIndex, err) + } + + // Store batch data + key := encodeBatchKey(batchIndex) + if err := s.db.PutBytes(key, encoded); err != nil { + return fmt.Errorf("failed to store sealed batch %d: %w", batchIndex, err) + } + + // Update indices list + if err = s.updateBatchIndices(batchIndex, true); err != nil { + log.Warn("Failed to update batch indices", "batch_index", batchIndex, "error", err) + // Don't fail the operation if indices update fails + } + + return nil +} + +// LoadSealedBatch loads a single sealed batch from LevelDB +func (s *BatchStorage) LoadSealedBatch(batchIndex uint64) (*eth.RPCRollupBatch, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + key := encodeBatchKey(batchIndex) + encoded, err := s.db.GetBytes(key) + if err != nil { + if errors.Is(err, db.ErrKeyNotFound) { + return nil, fmt.Errorf("sealed batch %d not found", batchIndex) + } + return nil, fmt.Errorf("failed to get sealed batch %d: %w", batchIndex, err) + } + + // Deserialize from JSON + var batch eth.RPCRollupBatch + if err := json.Unmarshal(encoded, &batch); err != nil { + return nil, fmt.Errorf("failed to unmarshal sealed batch %d: %w", batchIndex, err) + } + + return &batch, nil +} + +// LoadAllSealedBatches loads all sealed batches from LevelDB +// Returns a map of batchIndex -> RPCRollupBatch +func (s *BatchStorage) LoadAllSealedBatches() (map[uint64]*eth.RPCRollupBatch, []uint64, error) { + s.mu.RLock() + // Load batch indices + indices, err := s.loadBatchIndices() + s.mu.RUnlock() + if err != nil { + if errors.Is(err, db.ErrKeyNotFound) { + // No batches stored yet + return make(map[uint64]*eth.RPCRollupBatch), nil, nil + } + return nil, nil, fmt.Errorf("failed to load batch indices: %w", err) + } + + // Load each batch (without holding the lock to avoid deadlock) + batches := make(map[uint64]*eth.RPCRollupBatch, len(indices)) + for _, idx := range indices { + batch, err := s.LoadSealedBatch(idx) + if err != nil { + log.Warn("Failed to load sealed batch, skipping", + "batch_index", idx, "error", err) + continue + } + batches[idx] = batch + } + + return batches, indices, nil +} + +// LoadAllSealedBatchesAndHeader loads all sealed batches and batch header from LevelDB +func (s *BatchStorage) LoadAllSealedBatchesAndHeader() (map[uint64]*eth.RPCRollupBatch, map[uint64]*BatchHeaderBytes, []uint64, error) { + s.mu.RLock() + // Load batch indices + indices, err := s.loadBatchIndices() + s.mu.RUnlock() + if err != nil { + if errors.Is(err, db.ErrKeyNotFound) { + // No batches stored yet + return make(map[uint64]*eth.RPCRollupBatch), make(map[uint64]*BatchHeaderBytes), nil, nil + } + return nil, nil, nil, fmt.Errorf("failed to load batch indices: %w", err) + } + + // Load each batch (without holding the lock to avoid deadlock) + batches := make(map[uint64]*eth.RPCRollupBatch, len(indices)) + for i, idx := range indices { + batch, err := s.LoadSealedBatch(idx) + if err != nil { + log.Warn("Failed to load sealed batch, skipping", + "batch_index", idx, "error", err) + return nil, nil, nil, fmt.Errorf("failed to load batch: %w", err) + } + if i > 0 { + parentBatch := batches[idx-1] + parentBatchHash, err := BatchHeaderBytes(batch.ParentBatchHeader).Hash() + if err != nil { + log.Error("Failed to load parent batch header", "batch_index", idx, "error", err) + return nil, nil, nil, fmt.Errorf("failed to load batch header: %w", err) + } + if !bytes.Equal(parentBatch.Hash.Bytes(), parentBatchHash.Bytes()) { + log.Error("parent batch hash check failed", + "batch_index", idx, + "parent_batch_hash", parentBatch.Hash.String(), + "pre_batch_hash", parentBatchHash.String()) + return nil, nil, nil, fmt.Errorf("parent batch hash check failed") + } + } + batches[idx] = batch + } + // Load each batch header (without holding the lock to avoid deadlock) + headers := make(map[uint64]*BatchHeaderBytes, len(indices)) + for _, idx := range indices { + header, err := s.LoadSealedBatchHeader(idx) + if err != nil { + log.Warn("Failed to load sealed batch header, skipping", + "batch_index", idx, "error", err) + return nil, nil, nil, fmt.Errorf("failed to load batch header bytes: %w", err) + } + headers[idx] = header + headerHash, err := header.Hash() + if err != nil { + log.Warn("Failed to hash sealed batch header, skipping", + "batch_index", idx, "error", err) + return nil, nil, nil, fmt.Errorf("failed to load batch header bytes: %w", err) + } + // check header and batch hash equal + if !bytes.Equal(headerHash.Bytes(), batches[idx].Hash.Bytes()) { + log.Error("Sealed batch header bytes do not match", + "batch_index", idx, "expected", batches[idx].Hash, "actual", headerHash.Bytes()) + return nil, nil, nil, fmt.Errorf("sealed batch header bytes do not match") + } + } + return batches, headers, indices, nil +} + +// DeleteSealedBatch removes a sealed batch from LevelDB +func (s *BatchStorage) DeleteSealedBatch(batchIndex uint64) error { + s.mu.Lock() + defer s.mu.Unlock() + + key := encodeBatchKey(batchIndex) + if err := s.db.Delete(key); err != nil { + return fmt.Errorf("failed to delete sealed batch %d: %w", batchIndex, err) + } + + // Update indices list + if err := s.updateBatchIndices(batchIndex, false); err != nil { + log.Warn("Failed to update batch indices after deletion", + "batch_index", batchIndex, "error", err) + // Don't fail the operation if indices update fails + } + + return nil +} + +func (s *BatchStorage) DeleteAllSealedBatches() error { + s.mu.RLock() + // Load batch indices + indices, err := s.loadBatchIndices() + s.mu.RUnlock() + if err != nil { + if errors.Is(err, db.ErrKeyNotFound) { + // No batches stored yet + return nil + } + return fmt.Errorf("failed to load batch indices: %w", err) + } + + for _, idx := range indices { + err = s.DeleteSealedBatch(idx) + if err != nil { + log.Error("Failed to delete sealed batch", + "batch_index", idx, "error", err) + return err + } + err = s.DeleteSealedBatchHeader(idx) + if err != nil { + log.Error("Failed to delete sealed batch header", + "batch_index", idx, "error", err) + return err + } + } + + return nil +} + +// encodeBatchKey encodes batch index to a byte key +func encodeBatchKey(batchIndex uint64) []byte { + key := make([]byte, len(SealedBatchKeyPrefix)+8) + copy(key, SealedBatchKeyPrefix) + binary.BigEndian.PutUint64(key[len(SealedBatchKeyPrefix):], batchIndex) + return key +} + +// updateBatchIndices updates the list of stored batch indices +// add: true to add index, false to remove +func (s *BatchStorage) updateBatchIndices(batchIndex uint64, add bool) error { + indices, err := s.loadBatchIndices() + if err != nil { + if errors.Is(err, db.ErrKeyNotFound) { + indices = []uint64{} + } else { + return err + } + } + + if add { + // Add index if not already present + found := false + for _, idx := range indices { + if idx == batchIndex { + found = true + break + } + } + if !found { + indices = append(indices, batchIndex) + } + } else { + // Remove index + newIndices := make([]uint64, 0, len(indices)) + for _, idx := range indices { + if idx != batchIndex { + newIndices = append(newIndices, idx) + } + } + indices = newIndices + } + + return s.saveBatchIndices(indices) +} + +// loadBatchIndices loads the list of stored batch indices +func (s *BatchStorage) loadBatchIndices() ([]uint64, error) { + encoded, err := s.db.GetBytes([]byte(SealedBatchIndicesKey)) + if err != nil { + return nil, err + } + + var indices []uint64 + if err := json.Unmarshal(encoded, &indices); err != nil { + return nil, fmt.Errorf("failed to unmarshal batch indices: %w", err) + } + + return indices, nil +} + +// saveBatchIndices saves the list of batch indices +func (s *BatchStorage) saveBatchIndices(indices []uint64) error { + encoded, err := json.Marshal(indices) + if err != nil { + return fmt.Errorf("failed to marshal batch indices: %w", err) + } + + return s.db.PutBytes([]byte(SealedBatchIndicesKey), encoded) +} + +// StoreSealedBatchHeader stores a single sealed batch header to LevelDB +func (s *BatchStorage) StoreSealedBatchHeader(batchIndex uint64, header *BatchHeaderBytes) error { + s.mu.Lock() + defer s.mu.Unlock() + + // Store batch header data + key := encodeBatchHeaderKey(batchIndex) + if err := s.db.PutBytes(key, header.Bytes()); err != nil { + return fmt.Errorf("failed to store sealed batch header %d: %w", batchIndex, err) + } + + return nil +} + +// LoadSealedBatchHeader loads a single sealed batch header from LevelDB +func (s *BatchStorage) LoadSealedBatchHeader(batchIndex uint64) (*BatchHeaderBytes, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + key := encodeBatchHeaderKey(batchIndex) + headerBytes, err := s.db.GetBytes(key) + if err != nil { + if errors.Is(err, db.ErrKeyNotFound) { + return nil, fmt.Errorf("sealed batch header %d not found", batchIndex) + } + return nil, fmt.Errorf("failed to get sealed batch header %d: %w", batchIndex, err) + } + + header := BatchHeaderBytes(headerBytes) + return &header, nil +} + +// LoadAllSealedBatchHeaders loads all sealed batch headers from LevelDB +// Returns a map of batchIndex -> BatchHeaderBytes +func (s *BatchStorage) LoadAllSealedBatchHeaders() (map[uint64]*BatchHeaderBytes, error) { + s.mu.RLock() + // Load batch indices + indices, err := s.loadBatchIndices() + s.mu.RUnlock() + if err != nil { + if errors.Is(err, db.ErrKeyNotFound) { + // No batches stored yet + return make(map[uint64]*BatchHeaderBytes), nil + } + return nil, fmt.Errorf("failed to load batch indices: %w", err) + } + + // Load each batch header (without holding the lock to avoid deadlock) + headers := make(map[uint64]*BatchHeaderBytes, len(indices)) + for _, idx := range indices { + header, err := s.LoadSealedBatchHeader(idx) + if err != nil { + log.Warn("Failed to load sealed batch header, skipping", + "batch_index", idx, "error", err) + continue + } + headers[idx] = header + } + + return headers, nil +} + +// DeleteSealedBatchHeader removes a sealed batch header from LevelDB +func (s *BatchStorage) DeleteSealedBatchHeader(batchIndex uint64) error { + s.mu.Lock() + defer s.mu.Unlock() + + key := encodeBatchHeaderKey(batchIndex) + if err := s.db.Delete(key); err != nil { + return fmt.Errorf("failed to delete sealed batch header %d: %w", batchIndex, err) + } + + return nil +} + +// encodeBatchHeaderKey encodes batch index to a byte key for batch header +func encodeBatchHeaderKey(batchIndex uint64) []byte { + key := make([]byte, len(SealedBatchHeaderKeyPrefix)+8) + copy(key, SealedBatchHeaderKeyPrefix) + binary.BigEndian.PutUint64(key[len(SealedBatchHeaderKeyPrefix):], batchIndex) + return key +} diff --git a/tx-submitter/batch/batch_storage_test.go b/tx-submitter/batch/batch_storage_test.go new file mode 100644 index 000000000..9346379a2 --- /dev/null +++ b/tx-submitter/batch/batch_storage_test.go @@ -0,0 +1,21 @@ +package batch + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "morph-l2/tx-submitter/iface" +) + +func Test_storageBatch(t *testing.T) { + testDB := setupTestDB(t) + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) + err := cache.InitAndSyncFromRollup() + require.NoError(t, err) + + batches, _, err := cache.batchStorage.LoadAllSealedBatches() + require.NoError(t, err) + require.NotNil(t, batches) + t.Log("loaded batches count", len(batches)) +} diff --git a/tx-submitter/batch/blob.go b/tx-submitter/batch/blob.go new file mode 100644 index 000000000..399b0c15f --- /dev/null +++ b/tx-submitter/batch/blob.go @@ -0,0 +1,210 @@ +package batch + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "morph-l2/node/zstd" + + eth "github.com/morph-l2/go-ethereum/core/types" + "github.com/morph-l2/go-ethereum/crypto/kzg4844" + "github.com/morph-l2/go-ethereum/rlp" +) + +const MaxBlobBytesSize = 4096 * 31 + +var ( + emptyBlob = new(kzg4844.Blob) + emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob) + emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit) +) + +// MakeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements. +func MakeBlobCanonical(blobBytes []byte) (b *kzg4844.Blob, err error) { + if len(blobBytes) > MaxBlobBytesSize { + return nil, fmt.Errorf("data is too large for blob. len=%v", len(blobBytes)) + } + offset := 0 + b = new(kzg4844.Blob) + // encode (up to) 31 bytes of remaining input data at a time into the subsequent field element + for i := 0; i < 4096; i++ { + offset += copy(b[i*32+1:i*32+32], blobBytes[offset:]) + if offset == len(blobBytes) { + break + } + } + if offset < len(blobBytes) { + return nil, fmt.Errorf("failed to fit all data into blob. bytes remaining: %v", len(blobBytes)-offset) + } + return +} + +func RetrieveBlobBytes(blob *kzg4844.Blob) ([]byte, error) { + data := make([]byte, MaxBlobBytesSize) + for i := 0; i < 4096; i++ { + if blob[i*32] != 0 { + return nil, fmt.Errorf("invalid blob, found non-zero high order byte %x of field element %d", data[i*32], i) + } + copy(data[i*31:i*31+31], blob[i*32+1:i*32+32]) + } + return data, nil +} + +func makeBlobCommitment(bz []byte) (b kzg4844.Blob, c kzg4844.Commitment, err error) { + blob, err := MakeBlobCanonical(bz) + if err != nil { + return + } + b = *blob + c, err = kzg4844.BlobToCommitment(&b) + if err != nil { + return + } + return +} + +func MakeBlobTxSidecar(blobBytes []byte) (*eth.BlobTxSidecar, error) { + if len(blobBytes) == 0 { + return ð.BlobTxSidecar{ + Blobs: []kzg4844.Blob{*emptyBlob}, + Commitments: []kzg4844.Commitment{emptyBlobCommit}, + Proofs: []kzg4844.Proof{emptyBlobProof}, + }, nil + } + if len(blobBytes) > 2*MaxBlobBytesSize { + return nil, errors.New("only 2 blobs at most is allowed") + } + blobCount := len(blobBytes)/(MaxBlobBytesSize+1) + 1 + var ( + err error + blobs = make([]kzg4844.Blob, blobCount) + commitments = make([]kzg4844.Commitment, blobCount) + ) + switch blobCount { + case 1: + blobs[0], commitments[0], err = makeBlobCommitment(blobBytes) + if err != nil { + return nil, err + } + case 2: + blobs[0], commitments[0], err = makeBlobCommitment(blobBytes[:MaxBlobBytesSize]) + if err != nil { + return nil, err + } + blobs[1], commitments[1], err = makeBlobCommitment(blobBytes[MaxBlobBytesSize:]) + if err != nil { + return nil, err + } + } + return ð.BlobTxSidecar{ + Blobs: blobs, + Commitments: commitments, + }, nil +} + +func CompressBatchBytes(batchBytes []byte) ([]byte, error) { + if len(batchBytes) == 0 { + return nil, nil + } + compressedBatchBytes, err := zstd.CompressBatchBytes(batchBytes) + if err != nil { + return nil, fmt.Errorf("failed to compress batch bytes, err: %w", err) + } + return compressedBatchBytes, nil +} + +func DecodeTxsFromBytes(txsBytes []byte) (eth.Transactions, error) { + reader := bytes.NewReader(txsBytes) + txs := make(eth.Transactions, 0) + for { + var ( + firstByte byte + fullTxBytes []byte + innerTx eth.TxData + err error + ) + if err = binary.Read(reader, binary.BigEndian, &firstByte); err != nil { + // if the blob byte array is completely consumed, then break the loop + if err == io.EOF { + break + } + return nil, err + } + // zero byte is found after valid tx bytes, break the loop + if firstByte == 0 { + break + } + + switch firstByte { + case eth.AccessListTxType: + if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil { + return nil, err + } + innerTx = new(eth.AccessListTx) + case eth.DynamicFeeTxType: + if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil { + return nil, err + } + innerTx = new(eth.DynamicFeeTx) + case eth.SetCodeTxType: + if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil { + return nil, err + } + innerTx = new(eth.SetCodeTx) + case eth.MorphTxType: + if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil { + return nil, err + } + innerTx = new(eth.MorphTx) + default: + if firstByte <= 0xf7 { // legacy tx first byte must be greater than 0xf7(247) + return nil, fmt.Errorf("not supported tx type: %d", firstByte) + } + innerTx = new(eth.LegacyTx) + } + + // we support the tx types of LegacyTxType/AccessListTxType/DynamicFeeTxType + //if firstByte == eth.AccessListTxType || firstByte == eth.DynamicFeeTxType { + // // the firstByte here is used to indicate tx type, so skip it + // if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil { + // return nil, err + // } + //} else if firstByte <= 0xf7 { // legacy tx first byte must be greater than 0xf7(247) + // return nil, fmt.Errorf("not supported tx type: %d", firstByte) + //} + fullTxBytes, err = extractInnerTxFullBytes(firstByte, reader) + if err != nil { + return nil, err + } + if err = rlp.DecodeBytes(fullTxBytes, innerTx); err != nil { + return nil, err + } + txs = append(txs, eth.NewTx(innerTx)) + } + return txs, nil +} + +func extractInnerTxFullBytes(firstByte byte, reader io.Reader) ([]byte, error) { + //the occupied byte length for storing the size of the following rlp encoded bytes + sizeByteLen := firstByte - 0xf7 + + // the size of the following rlp encoded bytes + sizeByte := make([]byte, sizeByteLen) + if err := binary.Read(reader, binary.BigEndian, sizeByte); err != nil { + return nil, err + } + size := binary.BigEndian.Uint32(append(make([]byte, 4-len(sizeByte)), sizeByte...)) + + txRaw := make([]byte, size) + if err := binary.Read(reader, binary.BigEndian, txRaw); err != nil { + return nil, err + } + fullTxBytes := make([]byte, 1+uint32(sizeByteLen)+size) + copy(fullTxBytes[:1], []byte{firstByte}) + copy(fullTxBytes[1:1+sizeByteLen], sizeByte) + copy(fullTxBytes[1+sizeByteLen:], txRaw) + + return fullTxBytes, nil +} diff --git a/tx-submitter/batch/commit_test.go b/tx-submitter/batch/commit_test.go new file mode 100644 index 000000000..2acb26d37 --- /dev/null +++ b/tx-submitter/batch/commit_test.go @@ -0,0 +1,229 @@ +package batch + +import ( + "context" + "crypto/ecdsa" + "fmt" + "math/big" + "os" + "path/filepath" + "testing" + "time" + + "morph-l2/bindings/bindings" + "morph-l2/tx-submitter/db" + "morph-l2/tx-submitter/iface" + "morph-l2/tx-submitter/types" + "morph-l2/tx-submitter/utils" + + "github.com/holiman/uint256" + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/consensus/misc/eip4844" + ethtypes "github.com/morph-l2/go-ethereum/core/types" + "github.com/morph-l2/go-ethereum/crypto" + "github.com/morph-l2/go-ethereum/eth" + "github.com/morph-l2/go-ethereum/ethclient" + "github.com/morph-l2/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +var pk = "" + +func TestRollupWithProof(t *testing.T) { + testDir := filepath.Join(t.TempDir(), "testleveldb") + os.RemoveAll(testDir) + t.Cleanup(func() { + os.RemoveAll(testDir) + }) + testDB, err := db.New(testDir) + require.NoError(t, err) + + cache := NewBatchCache(nil, l1Client, []iface.L2Client{l2Client}, rollupContract, l2Caller, testDB) + err = cache.InitFromRollupByRange() + require.NoError(t, err) + + privateKey, err := crypto.HexToECDSA(pk[2:]) + require.NoError(t, err) + address := crypto.PubkeyToAddress(privateKey.PublicKey) + ctx := context.Background() + l1ChainId, err := l1Client.ChainID(ctx) + require.NoError(t, err) + rollup, err := bindings.NewRollup(rollupAddr, l1Client) + require.NoError(t, err) + abi, err := bindings.RollupMetaData.GetAbi() + require.NoError(t, err) + latestCommitBatchIndex, err := rollup.LastCommittedBatchIndex(nil) + require.NoError(t, err) + + batch, err := cache.Get(latestCommitBatchIndex.Uint64() + 1) + require.NoError(t, err) + h := crypto.Keccak256Hash(batch.CurrentSequencerSetBytes) + t.Log("sequencer verify hash:", h.String()) + + signature, err := buildSigInput(batch) + require.NoError(t, err) + rollupBatch := bindings.IRollupBatchDataInput{ + Version: uint8(batch.Version), + ParentBatchHeader: batch.ParentBatchHeader, + LastBlockNumber: batch.LastBlockNumber, + NumL1Messages: batch.NumL1Messages, + PrevStateRoot: batch.PrevStateRoot, + PostStateRoot: batch.PostStateRoot, + WithdrawalRoot: batch.WithdrawRoot, + } + tip, gasFeeCap, blobFee, head, err := getGasTipAndCap(l1Client) + require.NoError(t, err) + + calldata, err := abi.Pack("commitBatch", rollupBatch, *signature) + require.NoError(t, err) + nonce, err := l1Client.NonceAt(context.Background(), address, nil) + require.NoError(t, err) + tx, err := createBlobTx(l1Client, batch, nonce, 3200000, tip, gasFeeCap, blobFee, calldata, head) + require.NoError(t, err) + transaction, err := sign(tx, ethtypes.LatestSignerForChainID(l1ChainId), privateKey) + require.NoError(t, err) + t.Log("txHash", transaction.Hash().String()) + err = sendTx(l1Client, 500000000000000000, transaction) + require.NoError(t, err) + time.Sleep(2 * time.Second) + receipt, err := l1Client.TransactionReceipt(ctx, transaction.Hash()) + require.NoError(t, err) + t.Log("receipt status", receipt.Status) + t.Log("receipt", receipt) + +} + +func sign(tx *ethtypes.Transaction, signer ethtypes.Signer, prv *ecdsa.PrivateKey) (*ethtypes.Transaction, error) { + signedTx, err := ethtypes.SignTx(tx, signer, prv) + if err != nil { + return nil, fmt.Errorf("sign tx error:%v", err) + } + return signedTx, nil +} + +func createBlobTx(l1client *ethclient.Client, batch *eth.RPCRollupBatch, nonce, gas uint64, tip, gasFeeCap, blobFee *big.Int, calldata []byte, head *ethtypes.Header) (*ethtypes.Transaction, error) { + versionedHashes := types.BlobHashes(batch.Sidecar.Blobs, batch.Sidecar.Commitments) + sidecar := ðtypes.BlobTxSidecar{ + Blobs: batch.Sidecar.Blobs, + Commitments: batch.Sidecar.Commitments, + } + chainID, err := l1client.ChainID(context.Background()) + if err != nil { + return nil, err + } + switch types.DetermineBlobVersion(head, chainID.Uint64()) { + case ethtypes.BlobSidecarVersion0: + sidecar.Version = ethtypes.BlobSidecarVersion0 + proof, err := types.MakeBlobProof(sidecar.Blobs, sidecar.Commitments) + if err != nil { + return nil, fmt.Errorf("gen blob proof failed %v", err) + } + sidecar.Proofs = proof + case ethtypes.BlobSidecarVersion1: + sidecar.Version = ethtypes.BlobSidecarVersion1 + proof, err := types.MakeCellProof(sidecar.Blobs) + if err != nil { + return nil, fmt.Errorf("gen cell proof failed %v", err) + } + sidecar.Proofs = proof + default: + return nil, fmt.Errorf("unsupported blob version") + } + + return ethtypes.NewTx(ðtypes.BlobTx{ + ChainID: uint256.MustFromBig(chainID), + Nonce: nonce, + GasTipCap: uint256.MustFromBig(tip), + GasFeeCap: uint256.MustFromBig(gasFeeCap), + Gas: gas, + To: rollupAddr, + Data: calldata, + BlobFeeCap: uint256.MustFromBig(blobFee), + BlobHashes: versionedHashes, + Sidecar: sidecar, + }), nil +} + +func getGasTipAndCap(l1client *ethclient.Client) (*big.Int, *big.Int, *big.Int, *ethtypes.Header, error) { + head, err := l1client.HeaderByNumber(context.Background(), nil) + if err != nil { + return nil, nil, nil, nil, err + } + if head.BaseFee != nil { + log.Info("market fee info", "feecap", head.BaseFee) + } + + tip, err := l1client.SuggestGasTipCap(context.Background()) + if err != nil { + return nil, nil, nil, nil, err + } + log.Info("market fee info", "tip", tip) + + tip = new(big.Int).Mul(tip, big.NewInt(int64(200))) + tip = new(big.Int).Div(tip, big.NewInt(100)) + + var gasFeeCap *big.Int + if head.BaseFee != nil { + gasFeeCap = new(big.Int).Add( + tip, + new(big.Int).Mul(head.BaseFee, big.NewInt(2)), + ) + } else { + gasFeeCap = new(big.Int).Set(tip) + } + + // calc blob fee cap + var blobFee *big.Int + if head.ExcessBlobGas != nil { + id, err := l1client.ChainID(context.Background()) + if err != nil { + return nil, nil, nil, nil, err + } + log.Info("market blob fee info", "excess blob gas", *head.ExcessBlobGas) + blobConfig, exist := types.ChainConfigMap[id.Uint64()] + if !exist { + blobConfig = types.DefaultBlobConfig + } + blobFeeDenominator := types.GetBlobFeeDenominator(blobConfig, head.Time) + blobFee = eip4844.CalcBlobFee(*head.ExcessBlobGas, blobFeeDenominator.Uint64()) + // Set to 3x to handle blob market congestion + blobFee = new(big.Int).Mul(blobFee, big.NewInt(3)) + } + + return tip, gasFeeCap, blobFee, head, nil +} + +func buildSigInput(batch *eth.RPCRollupBatch) (*bindings.IRollupBatchSignatureInput, error) { + sigData := &bindings.IRollupBatchSignatureInput{ + SignedSequencersBitmap: common.Big0, + SequencerSets: batch.CurrentSequencerSetBytes, + Signature: []byte("0x"), + } + return sigData, nil +} + +// send tx to l1 with business logic check +func sendTx(client iface.Client, txFeeLimit uint64, tx *ethtypes.Transaction) error { + // fee limit + if txFeeLimit > 0 { + var fee uint64 + // calc tx gas fee + if tx.Type() == ethtypes.BlobTxType { + blobFee := new(big.Int).Mul(tx.BlobGasFeeCap(), new(big.Int).SetUint64(tx.BlobGas())) + txFee := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())) + totalFee := new(big.Int).Add(blobFee, txFee) + if !totalFee.IsUint64() || totalFee.Uint64() > txFeeLimit { + return fmt.Errorf("%v:limit=%v,but got=%v", utils.ErrExceedFeeLimit, txFeeLimit, totalFee) + } + return client.SendTransaction(context.Background(), tx) + } else { + fee = tx.GasPrice().Uint64() * tx.Gas() + } + + if fee > txFeeLimit { + return fmt.Errorf("%v:limit=%v,but got=%v", utils.ErrExceedFeeLimit, txFeeLimit, fee) + } + } + + return client.SendTransaction(context.Background(), tx) +} diff --git a/tx-submitter/db/db.go b/tx-submitter/db/db.go index ce62bd6eb..13c2d34a2 100644 --- a/tx-submitter/db/db.go +++ b/tx-submitter/db/db.go @@ -68,6 +68,28 @@ func (d *Db) PutString(key, val string) error { defer d.m.Unlock() return d.db.Put([]byte(key), []byte(val)) } +func (d *Db) GetBytes(key []byte) ([]byte, error) { + d.m.Lock() + defer d.m.Unlock() + v, err := d.db.Get(key) + if err != nil { + if err == errors.ErrNotFound { + return nil, ErrKeyNotFound + } + return nil, fmt.Errorf("failed to get key from leveldb: %w", err) + } + return v, nil +} +func (d *Db) PutBytes(key, val []byte) error { + d.m.Lock() + defer d.m.Unlock() + return d.db.Put(key, val) +} +func (d *Db) Delete(key []byte) error { + d.m.Lock() + defer d.m.Unlock() + return d.db.Delete(key) +} func (d *Db) Close() error { return d.db.Close() } diff --git a/tx-submitter/db/interface.go b/tx-submitter/db/interface.go index 0bec57f6f..9b26d4795 100644 --- a/tx-submitter/db/interface.go +++ b/tx-submitter/db/interface.go @@ -6,5 +6,8 @@ type Database interface { PutString(key, val string) error GetFloat(key string) (float64, error) PutFloat(key string, val float64) error + GetBytes(key []byte) ([]byte, error) + PutBytes(key, val []byte) error + Delete(key []byte) error Close() error } diff --git a/tx-submitter/entry.go b/tx-submitter/entry.go index bc5675fcd..b67cf0da9 100644 --- a/tx-submitter/entry.go +++ b/tx-submitter/entry.go @@ -19,6 +19,7 @@ import ( "morph-l2/tx-submitter/l1checker" "morph-l2/tx-submitter/metrics" "morph-l2/tx-submitter/services" + "morph-l2/tx-submitter/types" "morph-l2/tx-submitter/utils" "github.com/morph-l2/externalsign" @@ -74,6 +75,7 @@ func Main() func(ctx *cli.Context) error { "max_tip", cfg.MaxTip, "max_base", cfg.MaxBaseFee, "tip_bump", cfg.TipFeeBump, + "seal_batch", cfg.SealBatch, ) ctx, cancel := context.WithCancel(context.Background()) @@ -203,9 +205,13 @@ func Main() func(ctx *cli.Context) error { // start rorator event indexer rotator.StartEventIndexer() - // blockmonitor + // block monitor bm := l1checker.NewBlockMonitor(cfg.BlockNotIncreasedThreshold, l1Client) + l2Caller, err := types.NewL2Caller(l2Clients) + if err != nil { + return err + } // new rollup service sr := services.NewRollup( ctx, @@ -225,6 +231,7 @@ func Main() func(ctx *cli.Context) error { ldb, bm, eventInfoStorage, + l2Caller, ) // metrics diff --git a/tx-submitter/event/storage.go b/tx-submitter/event/storage.go index e40b83ed8..dcec97af0 100644 --- a/tx-submitter/event/storage.go +++ b/tx-submitter/event/storage.go @@ -2,6 +2,7 @@ package event import ( "encoding/json" + "errors" "fmt" "sync" @@ -59,7 +60,7 @@ func (e *EventInfoStorage) Load() error { jsonStr, err := e.db.GetString(params.EventInfoKey) if err != nil { - if err == db.ErrKeyNotFound { + if errors.Is(err, db.ErrKeyNotFound) { // Initialize with default values if not found e.eventInfo = EventInfo{} return nil diff --git a/tx-submitter/flags/flags.go b/tx-submitter/flags/flags.go index e7de38ff6..2dedec0fe 100644 --- a/tx-submitter/flags/flags.go +++ b/tx-submitter/flags/flags.go @@ -159,7 +159,7 @@ var ( RollupInterval = cli.DurationFlag{ Name: "rollup_interval", Usage: "Interval for rollup", - Value: 500 * time.Millisecond, + Value: 5 * time.Second, EnvVar: prefixEnvVar("ROLLUP_INTERVAL"), } // finalize interval @@ -324,6 +324,13 @@ var ( Value: 5, EnvVar: prefixEnvVar("BLOCK_NOT_INCREASED_THRESHOLD"), } + + // seal batch + SealBatch = cli.BoolFlag{ + Name: "seal_batch", + Usage: "Enable seal batch", + EnvVar: prefixEnvVar("SEAL_BATCH"), + } ) var requiredFlags = []cli.Flag{ @@ -382,6 +389,8 @@ var optionalFlags = []cli.Flag{ EventIndexStepFlag, LeveldbPathNameFlag, BlockNotIncreasedThreshold, + + SealBatch, } // Flags contains the list of configuration options available to the binary. diff --git a/tx-submitter/go.mod b/tx-submitter/go.mod index ead642c78..2023dab85 100644 --- a/tx-submitter/go.mod +++ b/tx-submitter/go.mod @@ -2,14 +2,14 @@ module morph-l2/tx-submitter go 1.24.0 -replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.3 +replace github.com/tendermint/tendermint => github.com/morph-l2/tendermint v0.3.4-0.20260313040448-999449fd4d23 require ( github.com/consensys/gnark-crypto v0.16.0 github.com/crate-crypto/go-eth-kzg v1.4.0 github.com/holiman/uint256 v1.2.4 github.com/morph-l2/externalsign v0.3.1 - github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 + github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d github.com/prometheus/client_golang v1.17.0 github.com/stretchr/testify v1.10.0 github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a @@ -70,7 +70,6 @@ require ( github.com/scroll-tech/zktrie v0.8.4 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/status-im/keycard-go v0.3.2 // indirect - github.com/stretchr/objx v0.5.2 // indirect github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe // indirect github.com/tklauser/go-sysconf v0.3.13 // indirect github.com/tklauser/numcpus v0.7.0 // indirect diff --git a/tx-submitter/go.sum b/tx-submitter/go.sum index 725f75a79..2a49640c6 100644 --- a/tx-submitter/go.sum +++ b/tx-submitter/go.sum @@ -163,8 +163,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/morph-l2/externalsign v0.3.1 h1:UYFDZFB0L85A4rDvuwLNBiGEi0kSmg9AZ2v8Q5O4dQo= github.com/morph-l2/externalsign v0.3.1/go.mod h1:b6NJ4GUiiG/gcSJsp3p8ExsIs4ZdphlrVALASnVoGJE= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141 h1:A8eygErKU6WKMipGWIemzwLeYkIGLd9yb/Ry3x+J9PQ= -github.com/morph-l2/go-ethereum v1.10.14-0.20260211074551-4f0f6e6bd141/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d h1:Qy3ytYw/PGnrPDAWen1MsMUhUXclk1F2Q36A07+bBv4= +github.com/morph-l2/go-ethereum v1.10.14-0.20260312125309-280bfb9cfd1d/go.mod h1:nkVzHjQWCOjvukQW8ittlwX+Xz9gmVHrP7mUi7zoHTs= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= @@ -231,7 +231,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/tx-submitter/iface/client.go b/tx-submitter/iface/client.go index 0c275bec3..6fafffdac 100644 --- a/tx-submitter/iface/client.go +++ b/tx-submitter/iface/client.go @@ -2,6 +2,7 @@ package iface import ( "context" + "errors" "math/big" "github.com/morph-l2/go-ethereum" @@ -32,3 +33,232 @@ type L2Client interface { GetBlockTraceByNumber(ctx context.Context, number *big.Int) (*types.BlockTrace, error) GetRollupBatchByIndex(ctx context.Context, batchIndex uint64) (*eth.RPCRollupBatch, error) } + +type L2Clients struct { + Clients []L2Client +} + +// getFirstClient returns the first available client, or an error if no clients are available +func (c *L2Clients) getFirstClient() (L2Client, error) { + if len(c.Clients) == 0 { + return nil, errors.New("no L2 clients available") + } + return c.Clients[0], nil +} + +// tryAllClients tries all clients until one succeeds, returns the last error if all fail +func (c *L2Clients) tryAllClients(fn func(L2Client) error) error { + if len(c.Clients) == 0 { + return errors.New("no L2 clients available") + } + var lastErr error + for _, client := range c.Clients { + if err := fn(client); err == nil { + return nil + } else { + lastErr = err + } + } + return lastErr +} + +// CodeAt implements bind.ContractCaller +func (c *L2Clients) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { + var result []byte + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.CodeAt(ctx, contract, blockNumber) + return err + }) + return result, err +} + +// CallContract implements bind.ContractCaller +func (c *L2Clients) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + var result []byte + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.CallContract(ctx, call, blockNumber) + return err + }) + return result, err +} + +// PendingCodeAt implements bind.PendingContractCaller and bind.ContractTransactor +func (c *L2Clients) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + client, err := c.getFirstClient() + if err != nil { + return nil, err + } + return client.PendingCodeAt(ctx, account) +} + +// PendingNonceAt implements bind.ContractTransactor +func (c *L2Clients) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + client, err := c.getFirstClient() + if err != nil { + return 0, err + } + return client.PendingNonceAt(ctx, account) +} + +// SuggestGasPrice implements bind.ContractTransactor +func (c *L2Clients) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + client, err := c.getFirstClient() + if err != nil { + return nil, err + } + return client.SuggestGasPrice(ctx) +} + +// SuggestGasTipCap implements bind.ContractTransactor +func (c *L2Clients) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + client, err := c.getFirstClient() + if err != nil { + return nil, err + } + return client.SuggestGasTipCap(ctx) +} + +// EstimateGas implements bind.ContractTransactor +func (c *L2Clients) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { + client, err := c.getFirstClient() + if err != nil { + return 0, err + } + return client.EstimateGas(ctx, call) +} + +// SendTransaction implements bind.ContractTransactor +func (c *L2Clients) SendTransaction(ctx context.Context, tx *types.Transaction) error { + client, err := c.getFirstClient() + if err != nil { + return err + } + return client.SendTransaction(ctx, tx) +} + +// FilterLogs implements bind.ContractFilterer +func (c *L2Clients) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) { + var result []types.Log + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.FilterLogs(ctx, query) + return err + }) + return result, err +} + +// SubscribeFilterLogs implements bind.ContractFilterer +func (c *L2Clients) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + client, err := c.getFirstClient() + if err != nil { + return nil, err + } + return client.SubscribeFilterLogs(ctx, query, ch) +} + +// TransactionByHash implements Client +func (c *L2Clients) TransactionByHash(ctx context.Context, hash common.Hash) (tx *types.Transaction, isPending bool, err error) { + err = c.tryAllClients(func(client L2Client) error { + var e error + tx, isPending, e = client.TransactionByHash(ctx, hash) + return e + }) + return tx, isPending, err +} + +// BlockByNumber implements Client +func (c *L2Clients) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + var result *types.Block + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.BlockByNumber(ctx, number) + return err + }) + return result, err +} + +// NonceAt implements Client +func (c *L2Clients) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) { + var result uint64 + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.NonceAt(ctx, account, blockNumber) + return err + }) + return result, err +} + +// TransactionReceipt implements Client +func (c *L2Clients) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { + var result *types.Receipt + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.TransactionReceipt(ctx, txHash) + return err + }) + return result, err +} + +// BalanceAt implements Client +func (c *L2Clients) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { + var result *big.Int + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.BalanceAt(ctx, account, blockNumber) + return err + }) + return result, err +} + +// HeaderByNumber implements Client and bind.ContractTransactor +func (c *L2Clients) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + var result *types.Header + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.HeaderByNumber(ctx, number) + return err + }) + return result, err +} + +// BlockNumber implements Client +func (c *L2Clients) BlockNumber(ctx context.Context) (uint64, error) { + var result uint64 + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.BlockNumber(ctx) + return err + }) + return result, err +} + +// GetBlockTraceByNumber implements L2Client +func (c *L2Clients) GetBlockTraceByNumber(ctx context.Context, number *big.Int) (*types.BlockTrace, error) { + var result *types.BlockTrace + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.GetBlockTraceByNumber(ctx, number) + return err + }) + return result, err +} + +// GetRollupBatchByIndex implements L2Client +func (c *L2Clients) GetRollupBatchByIndex(ctx context.Context, batchIndex uint64) (*eth.RPCRollupBatch, error) { + var result *eth.RPCRollupBatch + err := c.tryAllClients(func(client L2Client) error { + var err error + result, err = client.GetRollupBatchByIndex(ctx, batchIndex) + if err != nil { + return err + } + if result != nil && len(result.Signatures) > 0 { + return nil + } + return nil + }) + + return result, err +} diff --git a/tx-submitter/iface/rollup.go b/tx-submitter/iface/rollup.go index 1b2ea85a0..afa7d46a2 100644 --- a/tx-submitter/iface/rollup.go +++ b/tx-submitter/iface/rollup.go @@ -17,20 +17,36 @@ type IRollup interface { FinalizeBatch(*bind.TransactOpts, []byte) (*types.Transaction, error) BatchInsideChallengeWindow(opts *bind.CallOpts, batchIndex *big.Int) (bool, error) BatchExist(opts *bind.CallOpts, batchIndex *big.Int) (bool, error) + CommittedBatches(opts *bind.CallOpts, batchIndex *big.Int) ([32]byte, error) + BatchDataStore(opts *bind.CallOpts, batchIndex *big.Int) (struct { + OriginTimestamp *big.Int + FinalizeTimestamp *big.Int + BlockNumber *big.Int + SignedSequencersBitmap *big.Int + }, error) + + FilterCommitBatch(opts *bind.FilterOpts, batchIndex []*big.Int, batchHash [][32]byte) (*bindings.RollupCommitBatchIterator, error) + FilterFinalizeBatch(opts *bind.FilterOpts, batchIndex []*big.Int, batchHash [][32]byte) (*bindings.RollupFinalizeBatchIterator, error) } // IL2Sequencer is the interface for the sequencer on L2 type IL2Sequencer interface { - UpdateTime(opts *bind.CallOpts) (*big.Int, error) - GetSequencerSet2() ([]common.Address, error) + SequencerSetVerifyHash(opts *bind.CallOpts) ([32]byte, error) } type IL2Gov interface { RollupEpoch(opts *bind.CallOpts) (*big.Int, error) + BatchBlockInterval(opts *bind.CallOpts) (*big.Int, error) + BatchTimeout(opts *bind.CallOpts) (*big.Int, error) } + type IL1Staking interface { IsStaker(opts *bind.CallOpts, addr common.Address) (bool, error) GetStakersBitmap(opts *bind.CallOpts, _stakers []common.Address) (*big.Int, error) GetActiveStakers(opts *bind.CallOpts) ([]common.Address, error) GetStakers(opts *bind.CallOpts) ([255]common.Address, error) } + +type IL2MessagePasser interface { + GetTreeRoot(opts *bind.CallOpts) ([32]byte, error) +} diff --git a/tx-submitter/metrics/metrics.go b/tx-submitter/metrics/metrics.go index 6ac53b617..be9507270 100644 --- a/tx-submitter/metrics/metrics.go +++ b/tx-submitter/metrics/metrics.go @@ -23,6 +23,7 @@ type Metrics struct { LastCommittedBatch prometheus.Gauge LastFinalizedBatch prometheus.Gauge HasPendingFinalizeBatch prometheus.Gauge + LastCacheBatchIndex prometheus.Gauge reorgs prometheus.Counter reorgDepthVal uint64 reorgCountVal uint64 @@ -72,6 +73,10 @@ func NewMetrics() *Metrics { Name: "tx_submitter_last_finalized_batch", Help: "Latest batch finalized by the submitter", }), + LastCacheBatchIndex: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "tx_submitter_last_batch_index", + Help: "Latest batch index by the submitter", + }), HasPendingFinalizeBatch: prometheus.NewGauge(prometheus.GaugeOpts{ Name: "tx_submitter_has_pending_finalize_batch", Help: "Whether there are batches pending finalization (1 = yes, 0 = no)", @@ -101,6 +106,7 @@ func NewMetrics() *Metrics { _ = prometheus.Register(m.IndexerBlockProcessed) _ = prometheus.Register(m.LastCommittedBatch) _ = prometheus.Register(m.LastFinalizedBatch) + _ = prometheus.Register(m.LastCacheBatchIndex) _ = prometheus.Register(m.HasPendingFinalizeBatch) _ = prometheus.Register(m.reorgs) _ = prometheus.Register(m.confirmedTxs) @@ -150,6 +156,11 @@ func (m *Metrics) SetLastFinalizedBatch(index uint64) { m.LastFinalizedBatch.Set(float64(index)) } +// SetLastCacheBatchIndex sets the last batch index metric +func (m *Metrics) SetLastCacheBatchIndex(index uint64) { + m.LastCacheBatchIndex.Set(float64(index)) +} + // SetHasPendingFinalizeBatch sets whether there are batches pending finalization // hasPending should be true if there are pending batches, false otherwise func (m *Metrics) SetHasPendingFinalizeBatch(hasPending bool) { diff --git a/tx-submitter/mock/db.go b/tx-submitter/mock/db.go index 2eb44844f..c6b25c809 100644 --- a/tx-submitter/mock/db.go +++ b/tx-submitter/mock/db.go @@ -50,6 +50,31 @@ func (d *MockDB) PutFloat(key string, val float64) error { return nil } +func (d *MockDB) GetBytes(key []byte) ([]byte, error) { + d.m.RLock() + defer d.m.RUnlock() + if val, ok := d.store[string(key)]; ok { + return []byte(val), nil + } + return nil, db.ErrKeyNotFound +} + +func (d *MockDB) PutBytes(key, val []byte) error { + d.m.Lock() + defer d.m.Unlock() + keyStr := string(key) + d.store[keyStr] = string(val) + return nil +} + +func (d *MockDB) Delete(key []byte) error { + d.m.Lock() + defer d.m.Unlock() + keyStr := string(key) + delete(d.store, keyStr) + return nil +} + func (d *MockDB) Close() error { return nil } diff --git a/tx-submitter/mock/rollup.go b/tx-submitter/mock/rollup.go index 6dd534ffd..dbf424a59 100644 --- a/tx-submitter/mock/rollup.go +++ b/tx-submitter/mock/rollup.go @@ -60,6 +60,41 @@ func (m *MockRollup) BatchExist(opts *bind.CallOpts, batchIndex *big.Int) (bool, return m.batchExists, nil } +// CommittedBatches implements IRollup +func (m *MockRollup) CommittedBatches(opts *bind.CallOpts, batchIndex *big.Int) ([32]byte, error) { + return [32]byte{}, nil +} + +// BatchDataStore implements IRollup +func (m *MockRollup) BatchDataStore(opts *bind.CallOpts, batchIndex *big.Int) (struct { + OriginTimestamp *big.Int + FinalizeTimestamp *big.Int + BlockNumber *big.Int + SignedSequencersBitmap *big.Int +}, error) { + return struct { + OriginTimestamp *big.Int + FinalizeTimestamp *big.Int + BlockNumber *big.Int + SignedSequencersBitmap *big.Int + }{ + OriginTimestamp: big.NewInt(0), + FinalizeTimestamp: big.NewInt(0), + BlockNumber: big.NewInt(0), + SignedSequencersBitmap: big.NewInt(0), + }, nil +} + +// FilterCommitBatch implements IRollup +func (m *MockRollup) FilterCommitBatch(opts *bind.FilterOpts, batchIndex []*big.Int, batchHash [][32]byte) (*bindings.RollupCommitBatchIterator, error) { + return nil, nil +} + +// FilterFinalizeBatch implements IRollup +func (m *MockRollup) FilterFinalizeBatch(opts *bind.FilterOpts, batchIndex []*big.Int, batchHash [][32]byte) (*bindings.RollupFinalizeBatchIterator, error) { + return nil, nil +} + // SetLastCommittedBatchIndex sets the mock value for LastCommittedBatchIndex func (m *MockRollup) SetLastCommittedBatchIndex(index *big.Int) { m.lastCommittedBatchIndex = index diff --git a/tx-submitter/services/batch_fetcher.go b/tx-submitter/services/batch_fecher.go similarity index 100% rename from tx-submitter/services/batch_fetcher.go rename to tx-submitter/services/batch_fecher.go diff --git a/tx-submitter/services/rollup.go b/tx-submitter/services/rollup.go index 3999477b2..e9d6b875e 100644 --- a/tx-submitter/services/rollup.go +++ b/tx-submitter/services/rollup.go @@ -26,6 +26,7 @@ import ( "github.com/morph-l2/go-ethereum/rpc" "morph-l2/bindings/bindings" + "morph-l2/tx-submitter/batch" "morph-l2/tx-submitter/constants" "morph-l2/tx-submitter/db" "morph-l2/tx-submitter/event" @@ -76,7 +77,8 @@ type Rollup struct { // collectedL1FeeSum collectedL1FeeSum float64 // batchcache - batchCache *types.BatchCache + batchCache *batch.BatchCache + batchCacheLegacy *types.BatchCacheLegacy bm *l1checker.BlockMonitor eventInfoStorage *event.EventInfoStorage reorgDetector iface.IReorgDetector @@ -102,8 +104,8 @@ func NewRollup( ldb *db.Db, bm *l1checker.BlockMonitor, eventInfoStorage *event.EventInfoStorage, + l2Caller *types.L2Caller, ) *Rollup { - batchFetcher := NewBatchFetcher(l2Clients) reorgDetector := NewReorgDetector(l1, metrics) r := &Rollup{ ctx: ctx, @@ -121,13 +123,17 @@ func NewRollup( cfg: cfg, signer: ethtypes.LatestSignerForChainID(chainId), externalRsaPriv: rsaPriv, - batchCache: types.NewBatchCache(batchFetcher), + batchCache: batch.NewBatchCache(nil, l1, l2Clients, rollup, l2Caller, ldb), ldb: ldb, bm: bm, eventInfoStorage: eventInfoStorage, reorgDetector: reorgDetector, ChainConfigMap: types.ChainConfigMap, } + if !cfg.SealBatch { + fetcher := NewBatchFetcher(l2Clients) + r.batchCacheLegacy = types.NewBatchCacheLegacy(fetcher) + } return r } @@ -166,7 +172,7 @@ func (r *Rollup) Start() error { // metrics go utils.Loop(r.ctx, 10*time.Second, func() { - // get balacnce of wallet + // get balance of wallet balance, err := r.L1Client.BalanceAt(context.Background(), r.WalletAddr(), nil) if err != nil { log.Error("get wallet balance error", "error", err) @@ -228,7 +234,6 @@ func (r *Rollup) Start() error { }) if r.cfg.Finalize { - go utils.Loop(r.ctx, r.cfg.FinalizeInterval, func() { r.rollupFinalizeMu.Lock() defer r.rollupFinalizeMu.Unlock() @@ -254,6 +259,40 @@ func (r *Rollup) Start() error { } } }) + + if r.cfg.SealBatch { + var batchCacheSyncMu sync.Mutex + + go func() { + batchCacheSyncMu.Lock() + defer batchCacheSyncMu.Unlock() + for { + + if err = r.batchCache.InitAndSyncFromDatabase(); err != nil { + log.Error("init and sync from database failed, wait for the next try", "error", err) + time.Sleep(5 * time.Second) + continue + } + break + } + }() + + go utils.Loop(r.ctx, r.cfg.TxProcessInterval, func() { + batchCacheSyncMu.Lock() + defer batchCacheSyncMu.Unlock() + if err = r.batchCache.AssembleCurrentBatchHeader(); err != nil { + log.Error("assemble current batch failed, wait for the next try", "error", err) + return + } + if index, err := r.batchCache.LatestBatchIndex(); err != nil { + log.Error("cannot get the latest batch index from batch cache", "error", err) + return + } else { + r.metrics.SetLastCacheBatchIndex(index) + } + }) + } + return nil } @@ -271,8 +310,8 @@ func (r *Rollup) ProcessTx() error { } // Check if this submitter should process transactions - if err := r.checkSubmitterTurn(); err != nil { - if err == errNotMyTurn { + if err = r.checkSubmitterTurn(); err != nil { + if errors.Is(err, errNotMyTurn) { // Get current submitter index for logging activeSubmitter, activeIndex, _ := r.rotator.CurrentSubmitter(r.L2Clients, r.Staking) @@ -454,9 +493,20 @@ func (r *Rollup) updateFeeMetrics(tx *ethtypes.Transaction, receipt *ethtypes.Re // Calculate and update L1 fee metrics batchIndex := utils.ParseParentBatchIndex(tx.Data()) + 1 - batch, ok := r.batchCache.Get(batchIndex) - if ok { - collectedL1Fee := new(big.Float).Quo(new(big.Float).SetInt(batch.CollectedL1Fee.ToInt()), new(big.Float).SetInt(big.NewInt(params.Ether))) + var rollupBatch *eth.RPCRollupBatch + exist := true + if r.cfg.SealBatch { + rollupBatch, err = r.batchCache.Get(batchIndex) + } else { + rollupBatch, exist = r.batchCacheLegacy.Get(batchIndex) + } + if err != nil || !exist || rollupBatch == nil { + log.Warn("rollupBatch not found in cache", "batch_index", batchIndex, "error", err) + } else { + if rollupBatch.CollectedL1Fee == nil { + return nil + } + collectedL1Fee := new(big.Float).Quo(new(big.Float).SetInt(rollupBatch.CollectedL1Fee.ToInt()), new(big.Float).SetInt(big.NewInt(params.Ether))) collectedL1FeeFloat, _ := collectedL1Fee.Float64() // Update metrics @@ -464,16 +514,13 @@ func (r *Rollup) updateFeeMetrics(tx *ethtypes.Transaction, receipt *ethtypes.Re r.metrics.CollectedL1FeeSum.Add(collectedL1FeeFloat) // Update leveldb - err := r.ldb.PutFloat(collectedL1FeeSumKey, r.collectedL1FeeSum) + err = r.ldb.PutFloat(collectedL1FeeSumKey, r.collectedL1FeeSum) if err != nil { log.Error("failed to update collected L1 fee sum in leveldb", "error", err) } - log.Info("Updated L1 fee metrics", "batch_index", batchIndex, "l1_fee_eth", collectedL1FeeFloat) - } else { - log.Warn("batch not found in cache", "batch_index", batchIndex) } } else if method == constants.MethodFinalizeBatch { r.finalizeFeeSum += txFeeFloat @@ -681,24 +728,37 @@ func (r *Rollup) handleDiscardedTx(txRecord *types.TxRecord, tx *ethtypes.Transa replacedTx, err := r.ReSubmitTx(true, tx) if err != nil { if utils.ErrStringMatch(err, core.ErrNonceTooLow) { - // Transaction was probably confirmed in a reorg + // The tx was probably confirmed in a reorg log.Info("Discarded transaction removed (nonce too low)", "hash", tx.Hash().String(), "nonce", tx.Nonce(), "method", method) - if err := r.pendingTxs.Remove(tx.Hash()); err != nil { + if err = r.pendingTxs.Remove(tx.Hash()); err != nil { log.Error("failed to remove transaction", "hash", tx.Hash().String(), "error", err) } return nil } - return fmt.Errorf("resend discarded tx: %w", err) + + // If resubmit failed, try to replace it with a simple transfer transaction + log.Warn("Resubmit failed, attempting to replace with simple transfer transaction", + "hash", tx.Hash().String(), + "nonce", tx.Nonce(), + "error", err) + + replacedTx, err = r.createReplacementTransferTx(tx) + if err != nil { + return fmt.Errorf("failed to create replacement transfer tx: %w", err) + } } - if err := r.pendingTxs.Remove(tx.Hash()); err != nil { + if err = r.pendingTxs.Remove(tx.Hash()); err != nil { log.Error("failed to remove transaction", "hash", tx.Hash().String(), "error", err) } - if err := r.pendingTxs.Add(replacedTx); err != nil { - log.Error("failed to add replaced transaction", "hash", replacedTx.Hash().String(), "error", err) + record := r.pendingTxs.GetTxRecord(replacedTx.Hash()) + if record == nil { + if err = r.pendingTxs.Add(replacedTx); err != nil { + log.Error("failed to add replaced transaction", "hash", replacedTx.Hash().String(), "error", err) + } } log.Info("Successfully resubmitted discarded transaction", "old_tx", tx.Hash().String(), @@ -715,7 +775,7 @@ func (r *Rollup) handleConfirmedTx(txRecord *types.TxRecord, tx *ethtypes.Transa return fmt.Errorf("get tx status error: %w", err) } - // Get current block number for confirmation count + // Get the current block number for confirmation count currentBlock, err := r.L1Client.BlockNumber(context.Background()) if err != nil { return fmt.Errorf("get current block number error: %w", err) @@ -740,8 +800,6 @@ func (r *Rollup) handleConfirmedTx(txRecord *types.TxRecord, tx *ethtypes.Transa if batchIndex <= lastCommitted.Uint64() { // Another submitter has already committed this batch log.Warn("Batch commit transaction failed but batch is already committed by another submitter", "batch_index", batchIndex, "tx_hash", tx.Hash().String()) - // Clean up batch from cache since it's already committed - r.batchCache.Delete(batchIndex) } else { // Contract bug detected - batch is not committed by anyone else but our transaction failed log.Warn("Critical error: batch commit transaction failed and batch is not committed by anyone", "batch_index", batchIndex, "tx_hash", tx.Hash().String()) @@ -763,20 +821,27 @@ func (r *Rollup) handleConfirmedTx(txRecord *types.TxRecord, tx *ethtypes.Transa } } else { // Transaction succeeded // Get current block number for confirmation count only for successful transactions - currentBlock, err := r.L1Client.BlockNumber(context.Background()) + currentBlock, err = r.L1Client.BlockNumber(context.Background()) if err != nil { return fmt.Errorf("get current block number error: %w", err) } - confirmations := currentBlock - status.receipt.BlockNumber.Uint64() + confirmations = currentBlock - status.receipt.BlockNumber.Uint64() if method == constants.MethodCommitBatch { batchIndex := utils.ParseParentBatchIndex(tx.Data()) + 1 log.Info("Successfully committed batch", "batch_index", batchIndex, "tx_hash", tx.Hash().String(), "block_number", status.receipt.BlockNumber.Uint64(), "gas_used", status.receipt.GasUsed, "confirm", confirmations) - - // Clean up batch from cache after successful commit - r.batchCache.Delete(batchIndex) } else if method == constants.MethodFinalizeBatch { batchIndex := utils.ParseFBatchIndex(tx.Data()) + if batchIndex > 0 { + if r.cfg.SealBatch { + err = r.batchCache.Delete(batchIndex - 1) + if err != nil { + log.Error("failed to delete batch", "batch_index", batchIndex, "tx_hash", tx.Hash().String()) + } + } else { + r.batchCacheLegacy.Delete(batchIndex - 1) + } + } log.Info("Successfully finalized batch", "batch_index", batchIndex, "tx_hash", tx.Hash().String(), "block_number", status.receipt.BlockNumber.Uint64(), "gas_used", status.receipt.GasUsed, "confirm", confirmations) } } @@ -808,48 +873,60 @@ func (r *Rollup) finalize() error { } log.Info("finalize info", - "last_fianlzied", lastFinalized, + "last_finalized", lastFinalized, "last_committed", lastCommitted, "finalize_index", target, ) - // batch exist + // rollupBatch exists existed, err := r.Rollup.BatchExist(nil, target) if err != nil { - log.Error("query batch exist", "err", err) + log.Error("query rollupBatch exist", "err", err) return err } if !existed { - log.Warn("finalized batch not existed") + log.Warn("finalized rollupBatch not existed") return nil } - // in challenge window + // inside challenge window inWindow, err := r.Rollup.BatchInsideChallengeWindow(nil, target) if err != nil { - return fmt.Errorf("get batch inside challenge window error:%v", err) + return fmt.Errorf("get rollupBatch inside challenge window error:%v", err) } if inWindow { - log.Info("batch inside challenge window, wait") + log.Info("rollupBatch inside challenge window, wait") return nil } - // get next batch - nextBatchIndex := target.Uint64() + 1 - - batch, err := GetRollupBatchByIndex(nextBatchIndex, r.L2Clients) - if err != nil { - log.Error("get next batch by index error", - "batch_index", nextBatchIndex, - ) - return fmt.Errorf("get next batch by index err:%v", err) - } - if batch == nil { - log.Info("next batch is nil,wait next batch header to finalize", "next_batch_index", nextBatchIndex) - return nil + var headerBytes []byte + if r.cfg.SealBatch { + // get batch header + rollupBatchHeader, exist := r.batchCache.GetSealedBatchHeader(target.Uint64()) + if !exist { + log.Warn("get rollupBatch by index failed, rollupBatch not found", + "batch_index", target.Uint64(), + ) + return nil + } + if rollupBatchHeader == nil { + log.Info("next rollupBatch is nil,wait rollupBatch header to finalize", "batch_index", target.Uint64()) + return nil + } + headerBytes = rollupBatchHeader.Bytes() + } else { + nextBatchIndex := target.Uint64() + 1 + nextBatch, exist := r.batchCacheLegacy.Get(nextBatchIndex) + if !exist { + log.Warn("get next rollupBatch by index failed, rollupBatch not found", + "batch_index", nextBatchIndex, + ) + return nil + } + headerBytes = []byte(nextBatch.ParentBatchHeader) } // calldata - calldata, err := r.abi.Pack("finalizeBatch", []byte(batch.ParentBatchHeader)) + calldata, err := r.abi.Pack("finalizeBatch", headerBytes) if err != nil { return fmt.Errorf("pack finalizeBatch error:%v", err) } @@ -932,17 +1009,15 @@ func (r *Rollup) finalize() error { } return fmt.Errorf("send tx error:%v", err.Error()) } else { - log.Info("finalzie tx sent") + log.Info("finalize tx sent") r.pendingTxs.SetNonce(signedTx.Nonce()) r.pendingTxs.SetPFinalize(target.Uint64()) - if err := r.pendingTxs.Add(signedTx); err != nil { + if err = r.pendingTxs.Add(signedTx); err != nil { log.Error("failed to add signed transaction", "hash", signedTx.Hash().String(), "error", err) } } - return nil - } func (r *Rollup) rollup() error { @@ -961,7 +1036,7 @@ func (r *Rollup) rollup() error { "blocks_processed", r.eventInfoStorage.BlockProcessed(), "last_event_time", r.eventInfoStorage.BlockTime()) - // get current blocknumber + // get current block number blockNumber, err := r.L1Client.BlockNumber(context.Background()) if err != nil { return fmt.Errorf("failed to get block number in rollup: %w", err) @@ -1035,15 +1110,12 @@ func (r *Rollup) rollup() error { cindexBig, err := r.Rollup.LastCommittedBatchIndex(nil) if err != nil { - return fmt.Errorf("get last committed batch index error:%v", err) + return fmt.Errorf("get last committed rpcRollupBatch index error:%v", err) } cindex := cindexBig.Uint64() - - switch { - case r.pendingTxs.pindex != 0: + batchIndex = cindex + 1 + if len(r.pendingTxs.getAll()) != 0 && r.pendingTxs.pindex != 0 { batchIndex = max(cindex, r.pendingTxs.pindex) + 1 - default: - batchIndex = cindex + 1 } log.Debug("Batch status", @@ -1056,26 +1128,31 @@ func (r *Rollup) rollup() error { "batch_index", batchIndex) return nil } - - batch, ok := r.batchCache.Get(batchIndex) - if !ok { + var rpcRollupBatch *eth.RPCRollupBatch + exist := true + if r.cfg.SealBatch { + rpcRollupBatch, err = r.batchCache.Get(batchIndex) + } else { + rpcRollupBatch, exist = r.batchCacheLegacy.Get(batchIndex) + } + if err != nil || !exist || rpcRollupBatch == nil { log.Info("Batch not found in cache", "batch_index", batchIndex) return nil } - signature, err := r.buildSignatureInput(batch) + signature, err := r.buildSignatureInput(rpcRollupBatch) if err != nil { return err } rollupBatch := bindings.IRollupBatchDataInput{ - Version: uint8(batch.Version), - ParentBatchHeader: batch.ParentBatchHeader, - LastBlockNumber: batch.LastBlockNumber, - NumL1Messages: batch.NumL1Messages, - PrevStateRoot: batch.PrevStateRoot, - PostStateRoot: batch.PostStateRoot, - WithdrawalRoot: batch.WithdrawRoot, + Version: uint8(rpcRollupBatch.Version), + ParentBatchHeader: rpcRollupBatch.ParentBatchHeader, + LastBlockNumber: rpcRollupBatch.LastBlockNumber, + NumL1Messages: rpcRollupBatch.NumL1Messages, + PrevStateRoot: rpcRollupBatch.PrevStateRoot, + PostStateRoot: rpcRollupBatch.PostStateRoot, + WithdrawalRoot: rpcRollupBatch.WithdrawRoot, } // tip and cap @@ -1093,9 +1170,9 @@ func (r *Rollup) rollup() error { gas, err := r.EstimateGas(r.WalletAddr(), r.rollupAddr, calldata, gasFeeCap, tip) if err != nil { log.Warn("Estimate gas failed", "batch_index", batchIndex, "error", err) - // Use rough estimation based on L1 message count + // Use estimation based on L1 message count if r.cfg.RoughEstimateGas { - msgcnt := utils.ParseL1MessageCnt(batch.BlockContexts) + msgcnt := utils.ParseL1MessageCnt(rpcRollupBatch.BlockContexts) gas = r.RoughRollupGasEstimate(msgcnt) log.Info("Using rough gas estimation", "batch_index", batchIndex, @@ -1116,7 +1193,7 @@ func (r *Rollup) rollup() error { } // Create and sign transaction - tx, err := r.createRollupTx(batch, nonce, gas, tip, gasFeeCap, blobFee, calldata, head) + tx, err := r.createRollupTx(rpcRollupBatch, nonce, gas, tip, gasFeeCap, blobFee, calldata, head) if err != nil { return fmt.Errorf("failed to create rollup tx: %w", err) } @@ -1232,28 +1309,8 @@ func (r *Rollup) logTxInfo(tx *ethtypes.Transaction, batchIndex uint64) { } func (r *Rollup) buildSignatureInput(batch *eth.RPCRollupBatch) (*bindings.IRollupBatchSignatureInput, error) { - blsSignatures := batch.Signatures - if len(blsSignatures) == 0 { - return nil, fmt.Errorf("invalid batch signature") - } - signers := make([]common.Address, len(blsSignatures)) - for i, bz := range blsSignatures { - if len(bz.Signature) > 0 { - signers[i] = bz.Signer - } - } - - // query bitmap of signers - bm, err := r.Staking.GetStakersBitmap(nil, signers) - if err != nil { - return nil, fmt.Errorf("query stakers bitmap error:%v", err) - } - if bm == nil { - return nil, errors.New("stakers bitmap is nil") - } - sigData := bindings.IRollupBatchSignatureInput{ - SignedSequencersBitmap: bm, + SignedSequencersBitmap: common.Big0, SequencerSets: batch.CurrentSequencerSetBytes, Signature: []byte("0x"), } @@ -1431,7 +1488,6 @@ func GetEpoch(addr common.Address, clients []iface.L2Client) (*big.Int, error) { // query sequencer set update time from sequencer contract on l2 func GetSequencerSetUpdateTime(addr common.Address, clients []iface.L2Client) (*big.Int, error) { - if len(clients) < 1 { return nil, fmt.Errorf("no client to query sequencer set update time") } @@ -1544,7 +1600,7 @@ func (r *Rollup) SendTx(tx *ethtypes.Transaction) error { // after send tx // add to pending txs if r.pendingTxs != nil { - if err := r.pendingTxs.Add(tx); err != nil { + if err = r.pendingTxs.Add(tx); err != nil { log.Error("failed to add transaction", "hash", tx.Hash().String(), "error", err) } } @@ -1655,12 +1711,15 @@ func (r *Rollup) ReSubmitTx(resend bool, tx *ethtypes.Transaction) (*ethtypes.Tr case ethtypes.BlobTxType: sidecar := tx.BlobTxSidecar() version := types.DetermineBlobVersion(head, r.chainId.Uint64()) - if sidecar.Version == ethtypes.BlobSidecarVersion0 && version == ethtypes.BlobSidecarVersion1 { - err = types.BlobSidecarVersionToV1(sidecar) - if err != nil { - return nil, err + if sidecar != nil { + if sidecar.Version == ethtypes.BlobSidecarVersion0 && version == ethtypes.BlobSidecarVersion1 { + err = types.BlobSidecarVersionToV1(sidecar) + if err != nil { + return nil, err + } } } + newTx = ethtypes.NewTx(ðtypes.BlobTx{ ChainID: uint256.MustFromBig(tx.ChainId()), Nonce: tx.Nonce(), @@ -1744,7 +1803,6 @@ func (r *Rollup) BumpGas(origin uint64) uint64 { } } -// for rollup func (r *Rollup) RoughRollupGasEstimate(msgcnt uint64) uint64 { return r.cfg.RollupTxGasBase + msgcnt*r.cfg.RollupTxGasPerL1Msg } @@ -1930,3 +1988,94 @@ func (r *Rollup) CancelTx(tx *ethtypes.Transaction) (*ethtypes.Transaction, erro return newTx, nil } + +// createReplacementTransferTx creates a simple transfer transaction with the same nonce +// to replace the original transaction. This is used when resubmission fails. +func (r *Rollup) createReplacementTransferTx(tx *ethtypes.Transaction) (*ethtypes.Transaction, error) { + if tx == nil { + return nil, errors.New("nil tx") + } + + log.Info("creating replacement transfer transaction", + "original_hash", tx.Hash().String(), + "nonce", tx.Nonce(), + ) + + // Get current gas prices + tip, gasFeeCap, _, head, err := r.GetGasTipAndCap() + if err != nil { + return nil, fmt.Errorf("get gas tip and cap error: %w", err) + } + + // Bump gas prices to ensure replacement + bumpedFeeCap := calcThresholdValue(tx.GasFeeCap(), false) + bumpedTip := calcThresholdValue(tx.GasTipCap(), false) + + if bumpedTip.Cmp(tip) > 0 { + tip = bumpedTip + } + if bumpedFeeCap.Cmp(gasFeeCap) > 0 { + gasFeeCap = bumpedFeeCap + } + + // Ensure minimum tip if configured + if r.cfg.MinTip > 0 && tip.Cmp(big.NewInt(int64(r.cfg.MinTip))) < 0 { + log.Info("replacement tip is too low, update tip to min tip", "tip", tip, "min_tip", r.cfg.MinTip) + tip = big.NewInt(int64(r.cfg.MinTip)) + if head.BaseFee != nil { + recalculatedFeecap := new(big.Int).Add( + tip, + new(big.Int).Mul(head.BaseFee, big.NewInt(2)), + ) + if recalculatedFeecap.Cmp(gasFeeCap) > 0 { + gasFeeCap = recalculatedFeecap + } + } + } + + // Get sender address (send to self) + senderAddr := r.WalletAddr() + + // Create a simple transfer transaction (send to self with empty calldata) + // Use minimum gas limit for a simple transfer (21000) + transferGas := uint64(21000) + + newTx := ethtypes.NewTx(ðtypes.DynamicFeeTx{ + ChainID: r.chainId, + To: &senderAddr, // Send it to self + Nonce: tx.Nonce(), // Same nonce as original transaction + GasFeeCap: gasFeeCap, + GasTipCap: tip, + Gas: transferGas, + Value: big.NewInt(0), // Zero value transfer + Data: []byte{}, // Empty call data + }) + + log.Info("replacement transfer tx info", + "tx_type", newTx.Type(), + "gas_tip_gwei", utils.WeiToGwei(tip), + "gas_fee_cap_gwei", utils.WeiToGwei(gasFeeCap), + "nonce", newTx.Nonce(), + "to", senderAddr.Hex(), + ) + + // Sign transaction + newTx, err = r.Sign(newTx) + if err != nil { + return nil, fmt.Errorf("sign tx error: %w", err) + } + + // Send transaction + err = r.SendTx(newTx) + if err != nil { + return nil, fmt.Errorf("send tx error: %w", err) + } + + log.Info("successfully sent replacement transfer transaction", + "original_hash", tx.Hash().String(), + "replacement_hash", newTx.Hash().String(), + "nonce", newTx.Nonce(), + ) + + return newTx, nil +} diff --git a/tx-submitter/services/rollup_handle_test.go b/tx-submitter/services/rollup_handle_test.go index 64f03b197..465965272 100644 --- a/tx-submitter/services/rollup_handle_test.go +++ b/tx-submitter/services/rollup_handle_test.go @@ -114,6 +114,7 @@ func setupTestRollup(t *testing.T) (*Rollup, *mock.L1ClientWrapper, *mock.L2Clie nil, nil, eventStorage, + nil, ) // Initialize pending transactions diff --git a/tx-submitter/types/batch_cache.go b/tx-submitter/types/batch_cache.go index 9be2d6031..7ebe49b81 100644 --- a/tx-submitter/types/batch_cache.go +++ b/tx-submitter/types/batch_cache.go @@ -9,15 +9,15 @@ import ( "github.com/morph-l2/go-ethereum/log" ) -type BatchCache struct { +type BatchCacheLegacy struct { m sync.RWMutex batchCache map[uint64]*eth.RPCRollupBatch fetcher iface.BatchFetcher } -// NewBatchCache creates a new batch cache instance -func NewBatchCache(fetcher iface.BatchFetcher) *BatchCache { - return &BatchCache{ +// NewBatchCacheLegacy creates a new batch cache instance +func NewBatchCacheLegacy(fetcher iface.BatchFetcher) *BatchCacheLegacy { + return &BatchCacheLegacy{ batchCache: make(map[uint64]*eth.RPCRollupBatch), fetcher: fetcher, } @@ -25,7 +25,7 @@ func NewBatchCache(fetcher iface.BatchFetcher) *BatchCache { // Get retrieves a batch from the cache by its index // If not found in cache, tries to fetch from node -func (b *BatchCache) Get(batchIndex uint64) (*eth.RPCRollupBatch, bool) { +func (b *BatchCacheLegacy) Get(batchIndex uint64) (*eth.RPCRollupBatch, bool) { // First try to get from cache b.m.RLock() batch, ok := b.batchCache[batchIndex] @@ -66,7 +66,7 @@ func (b *BatchCache) Get(batchIndex uint64) (*eth.RPCRollupBatch, bool) { return nil, false } -func (b *BatchCache) Set(batchIndex uint64, batch *eth.RPCRollupBatch) { +func (b *BatchCacheLegacy) Set(batchIndex uint64, batch *eth.RPCRollupBatch) { // Validate batch before caching - batch must exist and have signatures if batch == nil || len(batch.Signatures) == 0 { log.Debug("Refusing to cache invalid batch", @@ -82,7 +82,7 @@ func (b *BatchCache) Set(batchIndex uint64, batch *eth.RPCRollupBatch) { b.batchCache[batchIndex] = batch } -func (b *BatchCache) Delete(batchIndex uint64) { +func (b *BatchCacheLegacy) Delete(batchIndex uint64) { b.m.Lock() defer b.m.Unlock() @@ -90,7 +90,7 @@ func (b *BatchCache) Delete(batchIndex uint64) { } // Clear removes all entries from the batch cache -func (bc *BatchCache) Clear() { +func (bc *BatchCacheLegacy) Clear() { bc.m.Lock() defer bc.m.Unlock() bc.batchCache = make(map[uint64]*eth.RPCRollupBatch) diff --git a/tx-submitter/types/batch_cache_test.go b/tx-submitter/types/batch_cache_test.go deleted file mode 100644 index ae1449fd0..000000000 --- a/tx-submitter/types/batch_cache_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package types - -import ( - "sync" - "testing" - - "github.com/morph-l2/go-ethereum/eth" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -// MockBatchFetcher implements the BatchFetcher interface for testing -type MockBatchFetcher struct { - mock.Mock -} - -func (m *MockBatchFetcher) GetRollupBatchByIndex(index uint64) (*eth.RPCRollupBatch, error) { - args := m.Called(index) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).(*eth.RPCRollupBatch), args.Error(1) -} - -func TestBatchCache(t *testing.T) { - t.Run("Get non-existent batch - fetch from node", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - expectedBatch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{ - { - Signature: []byte("signature"), - }, - }, - } - mockFetcher.On("GetRollupBatchByIndex", uint64(1)).Return(expectedBatch, nil) - - batch, ok := cache.Get(1) - assert.True(t, ok) - assert.Equal(t, expectedBatch, batch) - - mockFetcher.AssertExpectations(t) - - // Second get should use cache - batch, ok = cache.Get(1) - assert.True(t, ok) - assert.Equal(t, expectedBatch, batch) - }) - - t.Run("Get non-existent batch - fetch fails", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - mockFetcher.On("GetRollupBatchByIndex", uint64(2)).Return(nil, assert.AnError).Once() - - batch, ok := cache.Get(2) - assert.False(t, ok) - assert.Nil(t, batch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Set and Get batch", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - batch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{ - { - Signature: []byte("signature"), - }, - }, - } - - // Add this line to set up the mock expectation - mockFetcher.On("GetRollupBatchByIndex", uint64(3)).Return(batch, nil).Maybe() - - cache.Set(3, batch) - - gotBatch, ok := cache.Get(3) - assert.True(t, ok) - assert.Equal(t, batch, gotBatch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Delete batch", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - batch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{ - { - Signature: []byte("signature"), - }, - }, - } - - cache.Set(4, batch) - gotBatch, ok := cache.Get(4) - assert.True(t, ok) - assert.Equal(t, batch, gotBatch) - - cache.Delete(4) - - // Setup mock for fetching after delete - mockFetcher.On("GetRollupBatchByIndex", uint64(4)).Return(nil, assert.AnError).Once() - - gotBatch, ok = cache.Get(4) - assert.False(t, ok) - assert.Nil(t, gotBatch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Clear cache", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - batch1 := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{ - { - Signature: []byte("signature1"), - }, - }, - } - batch2 := ð.RPCRollupBatch{ - Version: 2, - Signatures: []eth.RPCBatchSignature{ - { - Signature: []byte("signature2"), - }, - }, - } - - cache.Set(5, batch1) - cache.Set(6, batch2) - - cache.Clear() - - // Setup mocks for fetching after clear - mockFetcher.On("GetRollupBatchByIndex", uint64(5)).Return(nil, assert.AnError).Once() - mockFetcher.On("GetRollupBatchByIndex", uint64(6)).Return(nil, assert.AnError).Once() - - gotBatch, ok := cache.Get(5) - assert.False(t, ok) - assert.Nil(t, gotBatch) - - gotBatch, ok = cache.Get(6) - assert.False(t, ok) - assert.Nil(t, gotBatch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Concurrent access", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - // Pre-set a batch to avoid nil pointer in concurrent access - testBatch := ð.RPCRollupBatch{ - Version: 7, - Signatures: []eth.RPCBatchSignature{ - { - Signature: []byte("signature"), - }, - }, - } - cache.Set(7, testBatch) - - // Setup mock expectation to allow any number of calls - mockFetcher.On("GetRollupBatchByIndex", uint64(7)).Return(testBatch, nil).Maybe() - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - batch, ok := cache.Get(7) - if ok && batch != nil { - cache.Set(7, batch) - } - }() - } - - wg.Wait() - - // Final validation of cache state - batch, ok := cache.Get(7) - assert.True(t, ok) - assert.NotNil(t, batch) - assert.Equal(t, testBatch.Version, batch.Version) - }) -} diff --git a/tx-submitter/types/batch_cache_validation_test.go b/tx-submitter/types/batch_cache_validation_test.go deleted file mode 100644 index c809b5e5e..000000000 --- a/tx-submitter/types/batch_cache_validation_test.go +++ /dev/null @@ -1,135 +0,0 @@ -package types - -import ( - "testing" - - "github.com/morph-l2/go-ethereum/common" - "github.com/morph-l2/go-ethereum/eth" - "github.com/stretchr/testify/assert" -) - -func TestBatchValidation(t *testing.T) { - t.Run("Get - Valid batch with signatures is cached", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - // Create valid batch with signatures - validBatch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{ - { - Signer: common.HexToAddress("0x1234567890123456789012345678901234567890"), - Signature: []byte("test-signature"), - }, - }, - } - - mockFetcher.On("GetRollupBatchByIndex", uint64(1)).Return(validBatch, nil).Once() - - // Get should return the batch and cache it - batch, ok := cache.Get(1) - assert.True(t, ok) - assert.Equal(t, validBatch, batch) - assert.Equal(t, 1, len(batch.Signatures)) - - // Second get should use cache without calling fetcher - batch, ok = cache.Get(1) - assert.True(t, ok) - assert.Equal(t, validBatch, batch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Get - Invalid batch without signatures is not cached", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - // Create invalid batch without signatures - invalidBatch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{}, // Empty signatures - } - - mockFetcher.On("GetRollupBatchByIndex", uint64(2)).Return(invalidBatch, nil).Once() - mockFetcher.On("GetRollupBatchByIndex", uint64(2)).Return(invalidBatch, nil).Once() // Second call because not cached - - // Get should return the batch but not cache it - batch, ok := cache.Get(2) - assert.True(t, ok) // Still returns true because batch was found, just not cached - assert.Equal(t, invalidBatch, batch) - assert.Equal(t, 0, len(batch.Signatures)) - - // Second get should call fetcher again since it wasn't cached - batch, ok = cache.Get(2) - assert.True(t, ok) - assert.Equal(t, invalidBatch, batch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Set - Valid batch with signatures is stored", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - // Create valid batch with signatures - validBatch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{ - { - Signer: common.HexToAddress("0x1234567890123456789012345678901234567890"), - Signature: []byte("test-signature"), - }, - }, - } - - // Set should store the batch - cache.Set(3, validBatch) - - // Get should retrieve from cache - batch, ok := cache.Get(3) - assert.True(t, ok) - assert.Equal(t, validBatch, batch) - }) - - t.Run("Set - Invalid batch without signatures is not stored", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - // Create invalid batch without signatures - invalidBatch := ð.RPCRollupBatch{ - Version: 1, - Signatures: []eth.RPCBatchSignature{}, // Empty signatures - } - - // Set should not store the batch - cache.Set(4, invalidBatch) - - // Setup mock for fetching since batch shouldn't be in cache - mockFetcher.On("GetRollupBatchByIndex", uint64(4)).Return(nil, assert.AnError).Once() - - // Get should try to fetch from node and fail - batch, ok := cache.Get(4) - assert.False(t, ok) - assert.Nil(t, batch) - - mockFetcher.AssertExpectations(t) - }) - - t.Run("Set - Nil batch is not stored", func(t *testing.T) { - mockFetcher := new(MockBatchFetcher) - cache := NewBatchCache(mockFetcher) - - // Set with nil batch should not store anything - cache.Set(5, nil) - - // Setup mock for fetching since nothing should be in cache - mockFetcher.On("GetRollupBatchByIndex", uint64(5)).Return(nil, assert.AnError).Once() - - // Get should try to fetch from node and fail - batch, ok := cache.Get(5) - assert.False(t, ok) - assert.Nil(t, batch) - - mockFetcher.AssertExpectations(t) - }) -} diff --git a/tx-submitter/types/converter.go b/tx-submitter/types/converter.go new file mode 100644 index 000000000..d5b16398d --- /dev/null +++ b/tx-submitter/types/converter.go @@ -0,0 +1,25 @@ +package types + +import ( + "encoding/binary" + "fmt" +) + +func Uint64ToBigEndianBytes(value uint64) []byte { + valueBytes := make([]byte, 8) + binary.BigEndian.PutUint64(valueBytes, value) + return valueBytes +} + +func Uint16ToBigEndianBytes(value uint16) []byte { + valueBytes := make([]byte, 2) + binary.BigEndian.PutUint16(valueBytes, value) + return valueBytes +} + +func HeightFromBlockContextBytes(blockContextBytes []byte) (uint64, error) { + if len(blockContextBytes) != 60 { + return 0, fmt.Errorf("wrong block context bytes length, input: %x", blockContextBytes) + } + return binary.BigEndian.Uint64(blockContextBytes[:8]), nil +} diff --git a/tx-submitter/types/l2Caller.go b/tx-submitter/types/l2Caller.go new file mode 100644 index 000000000..cef0ab903 --- /dev/null +++ b/tx-submitter/types/l2Caller.go @@ -0,0 +1,95 @@ +package types + +import ( + "bytes" + "fmt" + "math/big" + + "morph-l2/bindings/bindings" + "morph-l2/bindings/predeploys" + "morph-l2/tx-submitter/iface" + + "github.com/morph-l2/go-ethereum/accounts/abi/bind" + "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/common/hexutil" + "github.com/morph-l2/go-ethereum/crypto" +) + +type L2Caller struct { + l2Clients *iface.L2Clients + sequencerContract *bindings.SequencerCaller + l2MessagePasserContract *bindings.L2ToL1MessagePasserCaller + govContract *bindings.GovCaller +} + +func NewL2Caller(l2Clients []iface.L2Client) (*L2Caller, error) { + if len(l2Clients) == 0 { + return nil, fmt.Errorf("no l2clients provided") + } + for _, l2Client := range l2Clients { + if l2Client == nil { + return nil, fmt.Errorf("nil l2client") + } + } + clients := &iface.L2Clients{Clients: l2Clients} + + // Initialize Sequencer contract + sequencerContract, err := bindings.NewSequencerCaller(predeploys.SequencerAddr, clients) + if err != nil { + return nil, err + } + + // Initialize L2ToL1MessagePasser contract + l2MessagePasserContract, err := bindings.NewL2ToL1MessagePasserCaller(predeploys.L2ToL1MessagePasserAddr, clients) + if err != nil { + return nil, err + } + + // Initialize Gov contract + govContract, err := bindings.NewGovCaller(predeploys.GovAddr, clients) + if err != nil { + return nil, err + } + + return &L2Caller{ + l2Clients: clients, + sequencerContract: sequencerContract, + l2MessagePasserContract: l2MessagePasserContract, + govContract: govContract, + }, nil +} + +// SequencerSetVerifyHash gets the sequencer set verify hash from the Sequencer contract +func (c *L2Caller) SequencerSetVerifyHash(opts *bind.CallOpts) ([32]byte, error) { + return c.sequencerContract.SequencerSetVerifyHash(opts) +} + +// GetTreeRoot gets the tree root from the L2ToL1MessagePasser contract +func (c *L2Caller) GetTreeRoot(opts *bind.CallOpts) ([32]byte, error) { + return c.l2MessagePasserContract.GetTreeRoot(opts) +} + +// BatchBlockInterval gets the batch block interval from the Gov contract +func (c *L2Caller) BatchBlockInterval(opts *bind.CallOpts) (*big.Int, error) { + return c.govContract.BatchBlockInterval(opts) +} + +// BatchTimeout gets the batch timeout from the Gov contract +func (c *L2Caller) BatchTimeout(opts *bind.CallOpts) (*big.Int, error) { + return c.govContract.BatchTimeout(opts) +} + +func (c *L2Caller) GetSequencerSetBytes(opts *bind.CallOpts) ([]byte, common.Hash, error) { + hash, err := c.sequencerContract.SequencerSetVerifyHash(opts) + if err != nil { + return nil, common.Hash{}, err + } + setBytes, err := c.sequencerContract.GetSequencerSetBytes(opts) + if err != nil { + return nil, common.Hash{}, err + } + if bytes.Equal(hash[:], crypto.Keccak256Hash(setBytes).Bytes()) { + return setBytes, hash, nil + } + return nil, common.Hash{}, fmt.Errorf("sequencer set hash verify failed %v: %v", hexutil.Encode(setBytes), common.BytesToHash(hash[:]).String()) +} diff --git a/tx-submitter/utils/config.go b/tx-submitter/utils/config.go index 31bd9c9ea..a24d5604b 100644 --- a/tx-submitter/utils/config.go +++ b/tx-submitter/utils/config.go @@ -110,6 +110,8 @@ type Config struct { // leveldb path name LeveldbPathName string BlockNotIncreasedThreshold int64 + // enable seal batch + SealBatch bool } // NewConfig parses the DriverConfig from the provided flags or environment variables. @@ -183,6 +185,8 @@ func NewConfig(ctx *cli.Context) (Config, error) { LeveldbPathName: ctx.GlobalString(flags.LeveldbPathNameFlag.Name), // BlockNotIncreasedThreshold BlockNotIncreasedThreshold: ctx.GlobalInt64(flags.BlockNotIncreasedThreshold.Name), + // SealBatch + SealBatch: ctx.GlobalBool(flags.SealBatch.Name), } return cfg, nil diff --git a/tx-submitter/utils/methods.go b/tx-submitter/utils/methods.go index 65a96c15a..b8b4514af 100644 --- a/tx-submitter/utils/methods.go +++ b/tx-submitter/utils/methods.go @@ -69,7 +69,7 @@ func ParseStringToType[T any](s string) (T, error) { var result T var err error - // 获取目标类型的名称 + // Get target type name switch any(result).(type) { case int: var v int64