op-geth

diff: ignored:
+4242
-377
+8709
-128

This is an overview of the changes in op-geth, a fork of go-ethereum, part of the OP-stack.

The OP-stack architecture is modular, following the Consensus/Execution split of post-Merge Ethereum L1:

  • op-node implements most rollup-specific functionality as Consensus-Layer, similar to a L1 beacon-node.
  • op-geth implements the Execution-Layer, with minimal changes for a secure Ethereum-equivalent application environment.

Related op-stack specifications:

The Bedrock upgrade introduces a Deposit transaction-type (0x7E) to enable both users and the rollup system itself to change the L2 state based on L1 events and system rules as specified.

diff --git go-ethereum/core/types/deposit_tx.go op-geth/core/types/deposit_tx.go new file mode 100644 index 0000000000000000000000000000000000000000..4131ee7af037275dcacd795baab4e0d55a1acb07 --- /dev/null +++ op-geth/core/types/deposit_tx.go @@ -0,0 +1,103 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package types + +import ( + "bytes" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" +) + +const DepositTxType = 0x7E + +type DepositTx struct { + // SourceHash uniquely identifies the source of the deposit + SourceHash common.Hash + // From is exposed through the types.Signer, not through TxData + From common.Address + // nil means contract creation + To *common.Address `rlp:"nil"` + // Mint is minted on L2, locked on L1, nil if no minting. + Mint *big.Int `rlp:"nil"` + // Value is transferred from L2 balance, executed after Mint (if any) + Value *big.Int + // gas limit + Gas uint64 + // Field indicating if this transaction is exempt from the L2 gas limit. + IsSystemTransaction bool + // Normal Tx data + Data []byte +} + +// copy creates a deep copy of the transaction data and initializes all fields. +func (tx *DepositTx) copy() TxData { + cpy := &DepositTx{ + SourceHash: tx.SourceHash, + From: tx.From, + To: copyAddressPtr(tx.To), + Mint: nil, + Value: new(big.Int), + Gas: tx.Gas, + IsSystemTransaction: tx.IsSystemTransaction, + Data: common.CopyBytes(tx.Data), + } + if tx.Mint != nil { + cpy.Mint = new(big.Int).Set(tx.Mint) + } + if tx.Value != nil { + cpy.Value.Set(tx.Value) + } + return cpy +} + +// accessors for innerTx. +func (tx *DepositTx) txType() byte { return DepositTxType } +func (tx *DepositTx) chainID() *big.Int { return common.Big0 } +func (tx *DepositTx) accessList() AccessList { return nil } +func (tx *DepositTx) data() []byte { return tx.Data } +func (tx *DepositTx) gas() uint64 { return tx.Gas } +func (tx *DepositTx) gasFeeCap() *big.Int { return new(big.Int) } +func (tx *DepositTx) gasTipCap() *big.Int { return new(big.Int) } +func (tx *DepositTx) gasPrice() *big.Int { return new(big.Int) } +func (tx *DepositTx) value() *big.Int { return tx.Value } +func (tx *DepositTx) nonce() uint64 { return 0 } +func (tx *DepositTx) to() *common.Address { return tx.To } +func (tx *DepositTx) isSystemTx() bool { return tx.IsSystemTransaction } + +func (tx *DepositTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { + return dst.Set(new(big.Int)) +} + +func (tx *DepositTx) effectiveNonce() *uint64 { return nil } + +func (tx *DepositTx) rawSignatureValues() (v, r, s *big.Int) { + return common.Big0, common.Big0, common.Big0 +} + +func (tx *DepositTx) setSignatureValues(chainID, v, r, s *big.Int) { + // this is a noop for deposit transactions +} + +func (tx *DepositTx) encode(b *bytes.Buffer) error { + return rlp.Encode(b, tx) +} + +func (tx *DepositTx) decode(input []byte) error { + return rlp.DecodeBytes(input, tx) +}
diff --git go-ethereum/core/types/transaction_marshalling.go op-geth/core/types/transaction_marshalling.go index 4d5b2bcdd4ce375d6c9896cbd321ff7949b071fb..253e4f03de82c53bfe2b48037a728d6397e1a6ec 100644 --- go-ethereum/core/types/transaction_marshalling.go +++ op-geth/core/types/transaction_marshalling.go @@ -19,11 +19,13 @@ import ( "encoding/json" "errors" + "io" "math/big"   "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/rlp" "github.com/holiman/uint256" )   @@ -47,6 +49,12 @@ V *hexutil.Big `json:"v"` R *hexutil.Big `json:"r"` S *hexutil.Big `json:"s"` YParity *hexutil.Uint64 `json:"yParity,omitempty"` + + // Deposit transaction fields + SourceHash *common.Hash `json:"sourceHash,omitempty"` + From *common.Address `json:"from,omitempty"` + Mint *hexutil.Big `json:"mint,omitempty"` + IsSystemTx *bool `json:"isSystemTx,omitempty"`   // Blob transaction sidecar encoding: Blobs []kzg4844.Blob `json:"blobs,omitempty"` @@ -153,6 +161,19 @@ enc.Blobs = itx.Sidecar.Blobs enc.Commitments = itx.Sidecar.Commitments enc.Proofs = itx.Sidecar.Proofs } + + case *DepositTx: + enc.Gas = (*hexutil.Uint64)(&itx.Gas) + enc.Value = (*hexutil.Big)(itx.Value) + enc.Input = (*hexutil.Bytes)(&itx.Data) + enc.To = tx.To() + enc.SourceHash = &itx.SourceHash + enc.From = &itx.From + if itx.Mint != nil { + enc.Mint = (*hexutil.Big)(itx.Mint) + } + enc.IsSystemTx = &itx.IsSystemTransaction + // other fields will show up as null. } return json.Marshal(&enc) } @@ -409,6 +430,54 @@ return err } }   + case DepositTxType: + if dec.AccessList != nil || dec.MaxFeePerGas != nil || + dec.MaxPriorityFeePerGas != nil { + return errors.New("unexpected field(s) in deposit transaction") + } + if dec.GasPrice != nil && dec.GasPrice.ToInt().Cmp(common.Big0) != 0 { + return errors.New("deposit transaction GasPrice must be 0") + } + if (dec.V != nil && dec.V.ToInt().Cmp(common.Big0) != 0) || + (dec.R != nil && dec.R.ToInt().Cmp(common.Big0) != 0) || + (dec.S != nil && dec.S.ToInt().Cmp(common.Big0) != 0) { + return errors.New("deposit transaction signature must be 0 or unset") + } + var itx DepositTx + inner = &itx + if dec.To != nil { + itx.To = dec.To + } + if dec.Gas == nil { + return errors.New("missing required field 'gas' for txdata") + } + itx.Gas = uint64(*dec.Gas) + if dec.Value == nil { + return errors.New("missing required field 'value' in transaction") + } + itx.Value = (*big.Int)(dec.Value) + // mint may be omitted or nil if there is nothing to mint. + itx.Mint = (*big.Int)(dec.Mint) + if dec.Input == nil { + return errors.New("missing required field 'input' in transaction") + } + itx.Data = *dec.Input + if dec.From == nil { + return errors.New("missing required field 'from' in transaction") + } + itx.From = *dec.From + if dec.SourceHash == nil { + return errors.New("missing required field 'sourceHash' in transaction") + } + itx.SourceHash = *dec.SourceHash + // IsSystemTx may be omitted. Defaults to false. + if dec.IsSystemTx != nil { + itx.IsSystemTransaction = *dec.IsSystemTx + } + + if dec.Nonce != nil { + inner = &depositTxWithNonce{DepositTx: itx, EffectiveNonce: uint64(*dec.Nonce)} + } default: return ErrTxTypeNotSupported } @@ -419,3 +488,15 @@ // TODO: check hash here? return nil } + +type depositTxWithNonce struct { + DepositTx + EffectiveNonce uint64 +} + +// EncodeRLP ensures that RLP encoding this transaction excludes the nonce. Otherwise, the tx Hash would change +func (tx *depositTxWithNonce) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, tx.DepositTx) +} + +func (tx *depositTxWithNonce) effectiveNonce() *uint64 { return &tx.EffectiveNonce }
diff --git go-ethereum/core/types/transaction_signing.go op-geth/core/types/transaction_signing.go index 9e26642f753db4f0b370aef0ce84f327defbf7b7..0ffe8c92d770bdb938ce10a0a253404cbc32824b 100644 --- go-ethereum/core/types/transaction_signing.go +++ op-geth/core/types/transaction_signing.go @@ -40,7 +40,7 @@ // MakeSigner returns a Signer based on the given chain config and block number. func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint64) Signer { var signer Signer switch { - case config.IsCancun(blockNumber, blockTime): + case config.IsCancun(blockNumber, blockTime) && !config.IsOptimism(): signer = NewCancunSigner(config.ChainID) case config.IsLondon(blockNumber): signer = NewLondonSigner(config.ChainID) @@ -65,7 +65,7 @@ // Use this in transaction-handling code where the current block number is unknown. If you // have the current block number available, use MakeSigner instead. func LatestSigner(config *params.ChainConfig) Signer { if config.ChainID != nil { - if config.CancunTime != nil { + if config.CancunTime != nil && !config.IsOptimism() { return NewCancunSigner(config.ChainID) } if config.LondonBlock != nil { @@ -256,6 +256,14 @@ return londonSigner{eip2930Signer{NewEIP155Signer(chainId)}} }   func (s londonSigner) Sender(tx *Transaction) (common.Address, error) { + if tx.Type() == DepositTxType { + switch tx.inner.(type) { + case *DepositTx: + return tx.inner.(*DepositTx).From, nil + case *depositTxWithNonce: + return tx.inner.(*depositTxWithNonce).From, nil + } + } if tx.Type() != DynamicFeeTxType { return s.eip2930Signer.Sender(tx) } @@ -275,6 +283,9 @@ return ok && x.chainId.Cmp(s.chainId) == 0 }   func (s londonSigner) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) { + if tx.Type() == DepositTxType { + return nil, nil, nil, fmt.Errorf("deposits do not have a signature") + } txdata, ok := tx.inner.(*DynamicFeeTx) if !ok { return s.eip2930Signer.SignatureValues(tx, sig) @@ -292,6 +303,9 @@ // Hash returns the hash to be signed by the sender. // It does not uniquely identify the transaction. func (s londonSigner) Hash(tx *Transaction) common.Hash { + if tx.Type() == DepositTxType { + panic("deposits cannot be signed and do not have a signing hash") + } if tx.Type() != DynamicFeeTxType { return s.eip2930Signer.Hash(tx) }

The Transaction type now exposes the deposit-transaction and L1-cost properties required for the rollup.

diff --git go-ethereum/core/types/transaction.go op-geth/core/types/transaction.go index 7d2e9d5325a642ea87fe388f59f592a264e287f8..95ecac5fae971e8096865aea25b9955dc34e0ad5 100644 --- go-ethereum/core/types/transaction.go +++ op-geth/core/types/transaction.go @@ -28,6 +28,7 @@ "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" )   @@ -60,6 +61,9 @@ // caches hash atomic.Value size atomic.Value from atomic.Value + + // cache of details to compute the data availability fee + rollupCostData atomic.Value }   // NewTx creates a new transaction. @@ -86,6 +90,7 @@ gasFeeCap() *big.Int value() *big.Int nonce() uint64 to() *common.Address + isSystemTx() bool   rawSignatureValues() (v, r, s *big.Int) setSignatureValues(chainID, v, r, s *big.Int) @@ -206,6 +211,8 @@ case DynamicFeeTxType: inner = new(DynamicFeeTx) case BlobTxType: inner = new(BlobTx) + case DepositTxType: + inner = new(DepositTx) default: return nil, ErrTxTypeNotSupported } @@ -302,6 +309,20 @@ func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) }   // Nonce returns the sender account nonce of the transaction. func (tx *Transaction) Nonce() uint64 { return tx.inner.nonce() } + +// EffectiveNonce returns the nonce that was actually used as part of transaction execution +// Returns nil if the effective nonce is not known +func (tx *Transaction) EffectiveNonce() *uint64 { + type txWithEffectiveNonce interface { + effectiveNonce() *uint64 + } + + if itx, ok := tx.inner.(txWithEffectiveNonce); ok { + return itx.effectiveNonce() + } + nonce := tx.inner.nonce() + return &nonce +}   // To returns the recipient address of the transaction. // For contract-creation transactions, To returns nil. @@ -309,6 +330,36 @@ func (tx *Transaction) To() *common.Address { return copyAddressPtr(tx.inner.to()) }   +// SourceHash returns the hash that uniquely identifies the source of the deposit tx, +// e.g. a user deposit event, or a L1 info deposit included in a specific L2 block height. +// Non-deposit transactions return a zeroed hash. +func (tx *Transaction) SourceHash() common.Hash { + if dep, ok := tx.inner.(*DepositTx); ok { + return dep.SourceHash + } + return common.Hash{} +} + +// Mint returns the ETH to mint in the deposit tx. +// This returns nil if there is nothing to mint, or if this is not a deposit tx. +func (tx *Transaction) Mint() *big.Int { + if dep, ok := tx.inner.(*DepositTx); ok { + return dep.Mint + } + return nil +} + +// IsDepositTx returns true if the transaction is a deposit tx type. +func (tx *Transaction) IsDepositTx() bool { + return tx.Type() == DepositTxType +} + +// IsSystemTx returns true for deposits that are system transactions. These transactions +// are executed in an unmetered environment & do not contribute to the block gas limit. +func (tx *Transaction) IsSystemTx() bool { + return tx.inner.isSystemTx() +} + // Cost returns (gas * gasPrice) + (blobGas * blobGasPrice) + value. func (tx *Transaction) Cost() *big.Int { total := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())) @@ -319,6 +370,23 @@ total.Add(total, tx.Value()) return total }   +// RollupCostData caches the information needed to efficiently compute the data availability fee +func (tx *Transaction) RollupCostData() RollupCostData { + if tx.Type() == DepositTxType { + return RollupCostData{} + } + if v := tx.rollupCostData.Load(); v != nil { + return v.(RollupCostData) + } + data, err := tx.MarshalBinary() + if err != nil { // Silent error, invalid txs will not be marshalled/unmarshalled for batch submission anyway. + log.Error("failed to encode tx for L1 cost computation", "err", err) + } + out := NewRollupCostData(data) + tx.rollupCostData.Store(out) + return out +} + // RawSignatureValues returns the V, R, S signature values of the transaction. // The return values should not be modified by the caller. // The return values may be nil or zero, if the transaction is unsigned. @@ -350,6 +418,9 @@ // EffectiveGasTip returns the effective miner gasTipCap for the given base fee. // Note: if the effective gasTipCap is negative, this method returns both error // the actual negative value, _and_ ErrGasFeeCapTooLow func (tx *Transaction) EffectiveGasTip(baseFee *big.Int) (*big.Int, error) { + if tx.Type() == DepositTxType { + return new(big.Int), nil + } if baseFee == nil { return tx.GasTipCap(), nil } @@ -413,6 +484,18 @@ func (tx *Transaction) BlobTxSidecar() *BlobTxSidecar { if blobtx, ok := tx.inner.(*BlobTx); ok { return blobtx.Sidecar } + return nil +} + +// SetBlobTxSidecar sets the sidecar of a transaction. +// The sidecar should match the blob-tx versioned hashes, or the transaction will be invalid. +// This allows tools to easily re-attach blob sidecars to signed transactions that omit the sidecar. +func (tx *Transaction) SetBlobTxSidecar(sidecar *BlobTxSidecar) error { + blobtx, ok := tx.inner.(*BlobTx) + if !ok { + return fmt.Errorf("not a blob tx, type = %d", tx.Type()) + } + blobtx.Sidecar = sidecar return nil }
diff --git go-ethereum/core/types/tx_access_list.go op-geth/core/types/tx_access_list.go index 730a77b752866afb5e6ba3d7ea8f4a5af6456d00..59880b0eabb182cd889a27b58d6c449187f4701a 100644 --- go-ethereum/core/types/tx_access_list.go +++ op-geth/core/types/tx_access_list.go @@ -107,6 +107,7 @@ func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice } func (tx *AccessListTx) value() *big.Int { return tx.Value } func (tx *AccessListTx) nonce() uint64 { return tx.Nonce } func (tx *AccessListTx) to() *common.Address { return tx.To } +func (tx *AccessListTx) isSystemTx() bool { return false }   func (tx *AccessListTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { return dst.Set(tx.GasPrice)
diff --git go-ethereum/core/types/tx_blob.go op-geth/core/types/tx_blob.go index 25a85695efc907e24aa76cdd99f350a08082b864..ceaeedb20d219cc3b075256695b9b101b72d5ea3 100644 --- go-ethereum/core/types/tx_blob.go +++ op-geth/core/types/tx_blob.go @@ -162,6 +162,7 @@ func (tx *BlobTx) value() *big.Int { return tx.Value.ToBig() } func (tx *BlobTx) nonce() uint64 { return tx.Nonce } func (tx *BlobTx) to() *common.Address { tmp := tx.To; return &tmp } func (tx *BlobTx) blobGas() uint64 { return params.BlobTxBlobGasPerBlob * uint64(len(tx.BlobHashes)) } +func (tx *BlobTx) isSystemTx() bool { return false }   func (tx *BlobTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { if baseFee == nil {
diff --git go-ethereum/core/types/tx_dynamic_fee.go op-geth/core/types/tx_dynamic_fee.go index 8b5b514fdec5b58fc8058e582a0a7c355e0ccba8..9c52cde57754d4826e6d673f297238ce02212657 100644 --- go-ethereum/core/types/tx_dynamic_fee.go +++ op-geth/core/types/tx_dynamic_fee.go @@ -96,6 +96,7 @@ func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap } func (tx *DynamicFeeTx) value() *big.Int { return tx.Value } func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce } func (tx *DynamicFeeTx) to() *common.Address { return tx.To } +func (tx *DynamicFeeTx) isSystemTx() bool { return false }   func (tx *DynamicFeeTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { if baseFee == nil {
diff --git go-ethereum/core/types/tx_legacy.go op-geth/core/types/tx_legacy.go index 71025b78fc060692c1d034f84b59b4cdc8e6cf65..bcb65c39349a3a1bef78948af48a30a1e14c71ba 100644 --- go-ethereum/core/types/tx_legacy.go +++ op-geth/core/types/tx_legacy.go @@ -103,6 +103,7 @@ func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice } func (tx *LegacyTx) value() *big.Int { return tx.Value } func (tx *LegacyTx) nonce() uint64 { return tx.Nonce } func (tx *LegacyTx) to() *common.Address { return tx.To } +func (tx *LegacyTx) isSystemTx() bool { return false }   func (tx *LegacyTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { return dst.Set(tx.GasPrice)

Transactions must pay an additional L1 cost based on the amount of rollup-data-gas they consume, estimated based on gas-price-oracle information and encoded tx size.”

diff --git go-ethereum/core/evm.go op-geth/core/evm.go index 73f6d7bc20a09ad5e2e9ae4f4a81018a5efe1444..d1e826d2a343a656650c26648e19cfe292998000 100644 --- go-ethereum/core/evm.go +++ op-geth/core/evm.go @@ -24,6 +24,7 @@ "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" )   @@ -38,7 +39,7 @@ GetHeader(common.Hash, uint64) *types.Header }   // NewEVMBlockContext creates a new context for use in the EVM. -func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common.Address) vm.BlockContext { +func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common.Address, config *params.ChainConfig, statedb types.StateGetter) vm.BlockContext { var ( beneficiary common.Address baseFee *big.Int @@ -73,6 +74,7 @@ BaseFee: baseFee, BlobBaseFee: blobBaseFee, GasLimit: header.GasLimit, Random: random, + L1CostFunc: types.NewL1CostFunc(config, statedb), } }
diff --git go-ethereum/core/state_prefetcher.go op-geth/core/state_prefetcher.go index ff867309de302b90dc3e071e97cbe3705d34c970..bb6835202b9758e49611f8da677ddee3edc346f1 100644 --- go-ethereum/core/state_prefetcher.go +++ op-geth/core/state_prefetcher.go @@ -51,7 +51,7 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *atomic.Bool) { var ( header = block.Header() gaspool = new(GasPool).AddGas(block.GasLimit()) - blockContext = NewEVMBlockContext(header, p.bc, nil) + blockContext = NewEVMBlockContext(header, p.bc, nil, p.config, statedb) evm = vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) signer = types.MakeSigner(p.config, header.Number, header.Time) )
diff --git go-ethereum/core/state_processor.go op-geth/core/state_processor.go index 9e32ab4e569600636e88bd8c6579771bfdb18733..e11041a87c50a614c1c30c1c3f9a427a845af64e 100644 --- go-ethereum/core/state_processor.go +++ op-geth/core/state_processor.go @@ -71,8 +71,9 @@ // Mutate the block and state according to any hard-fork specs if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 { misc.ApplyDAOHardFork(statedb) } + misc.EnsureCreate2Deployer(p.config, block.Time(), statedb) var ( - context = NewEVMBlockContext(header, p.bc, nil) + context = NewEVMBlockContext(header, p.bc, nil, p.config, statedb) vmenv = vm.NewEVM(context, vm.TxContext{}, statedb, p.config, cfg) signer = types.MakeSigner(p.config, header.Number, header.Time) ) @@ -109,6 +110,11 @@ // Create a new context to be used in the EVM environment. txContext := NewEVMTxContext(msg) evm.Reset(txContext, statedb)   + nonce := tx.Nonce() + if msg.IsDepositTx && config.IsOptimismRegolith(evm.Context.Time) { + nonce = statedb.GetNonce(msg.From) + } + // Apply the transaction to the current state (included in the env). result, err := ApplyMessage(evm, msg, gp) if err != nil { @@ -135,6 +141,17 @@ } receipt.TxHash = tx.Hash() receipt.GasUsed = result.UsedGas   + if msg.IsDepositTx && config.IsOptimismRegolith(evm.Context.Time) { + // The actual nonce for deposit transactions is only recorded from Regolith onwards and + // otherwise must be nil. + receipt.DepositNonce = &nonce + // The DepositReceiptVersion for deposit transactions is only recorded from Canyon onwards + // and otherwise must be nil. + if config.IsOptimismCanyon(evm.Context.Time) { + receipt.DepositReceiptVersion = new(uint64) + *receipt.DepositReceiptVersion = types.CanyonDepositReceiptVersion + } + } if tx.Type() == types.BlobTxType { receipt.BlobGasUsed = uint64(len(tx.BlobHashes()) * params.BlobTxBlobGasPerBlob) receipt.BlobGasPrice = evm.Context.BlobBaseFee @@ -142,7 +159,7 @@ }   // If the transaction created a contract, store the creation address in the receipt. if msg.To == nil { - receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) + receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, nonce) }   // Set the receipt logs and create the bloom filter. @@ -164,7 +181,7 @@ if err != nil { return nil, err } // Create a new context to be used in the EVM environment - blockContext := NewEVMBlockContext(header, bc, author) + blockContext := NewEVMBlockContext(header, bc, author, config, statedb) txContext := NewEVMTxContext(msg) vmenv := vm.NewEVM(blockContext, txContext, statedb, config, cfg) return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv)
diff --git go-ethereum/core/types/rollup_cost.go op-geth/core/types/rollup_cost.go new file mode 100644 index 0000000000000000000000000000000000000000..e0bc26d92f0e6c747787bfdd8b2e632af7ad4a1e --- /dev/null +++ op-geth/core/types/rollup_cost.go @@ -0,0 +1,473 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package types + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" +) + +const ( + // The two 4-byte Ecotone fee scalar values are packed into the same storage slot as the 8-byte + // sequence number and have the following Solidity offsets within the slot. Note that Solidity + // offsets correspond to the last byte of the value in the slot, counting backwards from the + // end of the slot. For example, The 8-byte sequence number has offset 0, and is therefore + // stored as big-endian format in bytes [24:32) of the slot. + BaseFeeScalarSlotOffset = 12 // bytes [16:20) of the slot + BlobBaseFeeScalarSlotOffset = 8 // bytes [20:24) of the slot + + // scalarSectionStart is the beginning of the scalar values segment in the slot + // array. baseFeeScalar is in the first four bytes of the segment, blobBaseFeeScalar the next + // four. + scalarSectionStart = 32 - BaseFeeScalarSlotOffset - 4 +) + +func init() { + if BlobBaseFeeScalarSlotOffset != BaseFeeScalarSlotOffset-4 { + panic("this code assumes the scalars are at adjacent positions in the scalars slot") + } +} + +var ( + // BedrockL1AttributesSelector is the function selector indicating Bedrock style L1 gas + // attributes. + BedrockL1AttributesSelector = []byte{0x01, 0x5d, 0x8e, 0xb9} + // EcotoneL1AttributesSelector is the selector indicating Ecotone style L1 gas attributes. + EcotoneL1AttributesSelector = []byte{0x44, 0x0a, 0x5e, 0x20} + + // L1BlockAddr is the address of the L1Block contract which stores the L1 gas attributes. + L1BlockAddr = common.HexToAddress("0x4200000000000000000000000000000000000015") + + L1BaseFeeSlot = common.BigToHash(big.NewInt(1)) + OverheadSlot = common.BigToHash(big.NewInt(5)) + ScalarSlot = common.BigToHash(big.NewInt(6)) + + // L1BlobBaseFeeSlot was added with the Ecotone upgrade and stores the blobBaseFee L1 gas + // attribute. + L1BlobBaseFeeSlot = common.BigToHash(big.NewInt(7)) + // L1FeeScalarsSlot as of the Ecotone upgrade stores the 32-bit basefeeScalar and + // blobBaseFeeScalar L1 gas attributes at offsets `BaseFeeScalarSlotOffset` and + // `BlobBaseFeeScalarSlotOffset` respectively. + L1FeeScalarsSlot = common.BigToHash(big.NewInt(3)) + + oneMillion = big.NewInt(1_000_000) + ecotoneDivisor = big.NewInt(1_000_000 * 16) + fjordDivisor = big.NewInt(1_000_000_000_000) + sixteen = big.NewInt(16) + + L1CostIntercept = big.NewInt(-42_585_600) + L1CostFastlzCoef = big.NewInt(836_500) + + MinTransactionSize = big.NewInt(100) + MinTransactionSizeScaled = new(big.Int).Mul(MinTransactionSize, big.NewInt(1e6)) + + emptyScalars = make([]byte, 8) +) + +// RollupCostData is a transaction structure that caches data for quickly computing the data +// availability costs for the transaction. +type RollupCostData struct { + Zeroes, Ones uint64 + FastLzSize uint64 +} + +type StateGetter interface { + GetState(common.Address, common.Hash) common.Hash +} + +// L1CostFunc is used in the state transition to determine the data availability fee charged to the +// sender of non-Deposit transactions. It returns nil if no data availability fee is charged. +type L1CostFunc func(rcd RollupCostData, blockTime uint64) *big.Int + +// l1CostFunc is an internal version of L1CostFunc that also returns the gasUsed for use in +// receipts. +type l1CostFunc func(rcd RollupCostData) (fee, gasUsed *big.Int) + +func NewRollupCostData(data []byte) (out RollupCostData) { + for _, b := range data { + if b == 0 { + out.Zeroes++ + } else { + out.Ones++ + } + } + out.FastLzSize = uint64(FlzCompressLen(data)) + return out +} + +// NewL1CostFunc returns a function used for calculating data availability fees, or nil if this is +// not an op-stack chain. +func NewL1CostFunc(config *params.ChainConfig, statedb StateGetter) L1CostFunc { + if config.Optimism == nil { + return nil + } + forBlock := ^uint64(0) + var cachedFunc l1CostFunc + selectFunc := func(blockTime uint64) l1CostFunc { + if !config.IsOptimismEcotone(blockTime) { + return newL1CostFuncBedrock(config, statedb, blockTime) + } + + // Note: the various state variables below are not initialized from the DB until this + // point to allow deposit transactions from the block to be processed first by state + // transition. This behavior is consensus critical! + l1FeeScalars := statedb.GetState(L1BlockAddr, L1FeeScalarsSlot).Bytes() + l1BlobBaseFee := statedb.GetState(L1BlockAddr, L1BlobBaseFeeSlot).Big() + l1BaseFee := statedb.GetState(L1BlockAddr, L1BaseFeeSlot).Big() + + // Edge case: the very first Ecotone block requires we use the Bedrock cost + // function. We detect this scenario by checking if the Ecotone parameters are + // unset. Note here we rely on assumption that the scalar parameters are adjacent + // in the buffer and l1BaseFeeScalar comes first. We need to check this prior to + // other forks, as the first block of Fjord and Ecotone could be the same block. + firstEcotoneBlock := l1BlobBaseFee.BitLen() == 0 && + bytes.Equal(emptyScalars, l1FeeScalars[scalarSectionStart:scalarSectionStart+8]) + if firstEcotoneBlock { + log.Info("using bedrock l1 cost func for first Ecotone block", "time", blockTime) + return newL1CostFuncBedrock(config, statedb, blockTime) + } + + l1BaseFeeScalar, l1BlobBaseFeeScalar := extractEcotoneFeeParams(l1FeeScalars) + + if config.IsOptimismFjord(blockTime) { + return NewL1CostFuncFjord( + l1BaseFee, + l1BlobBaseFee, + l1BaseFeeScalar, + l1BlobBaseFeeScalar, + ) + } else { + return newL1CostFuncEcotone(l1BaseFee, l1BlobBaseFee, l1BaseFeeScalar, l1BlobBaseFeeScalar) + } + } + + return func(rollupCostData RollupCostData, blockTime uint64) *big.Int { + if rollupCostData == (RollupCostData{}) { + return nil // Do not charge if there is no rollup cost-data (e.g. RPC call or deposit). + } + if forBlock != blockTime { + if forBlock != ^uint64(0) { + // best practice is not to re-use l1 cost funcs across different blocks, but we + // make it work just in case. + log.Info("l1 cost func re-used for different L1 block", "oldTime", forBlock, "newTime", blockTime) + } + forBlock = blockTime + cachedFunc = selectFunc(blockTime) + } + fee, _ := cachedFunc(rollupCostData) + return fee + } +} + +// newL1CostFuncBedrock returns an L1 cost function suitable for Bedrock, Regolith, and the first +// block only of the Ecotone upgrade. +func newL1CostFuncBedrock(config *params.ChainConfig, statedb StateGetter, blockTime uint64) l1CostFunc { + l1BaseFee := statedb.GetState(L1BlockAddr, L1BaseFeeSlot).Big() + overhead := statedb.GetState(L1BlockAddr, OverheadSlot).Big() + scalar := statedb.GetState(L1BlockAddr, ScalarSlot).Big() + isRegolith := config.IsRegolith(blockTime) + return newL1CostFuncBedrockHelper(l1BaseFee, overhead, scalar, isRegolith) +} + +// newL1CostFuncBedrockHelper is lower level version of newL1CostFuncBedrock that expects already +// extracted parameters +func newL1CostFuncBedrockHelper(l1BaseFee, overhead, scalar *big.Int, isRegolith bool) l1CostFunc { + return func(rollupCostData RollupCostData) (fee, gasUsed *big.Int) { + if rollupCostData == (RollupCostData{}) { + return nil, nil // Do not charge if there is no rollup cost-data (e.g. RPC call or deposit) + } + gas := rollupCostData.Zeroes * params.TxDataZeroGas + if isRegolith { + gas += rollupCostData.Ones * params.TxDataNonZeroGasEIP2028 + } else { + gas += (rollupCostData.Ones + 68) * params.TxDataNonZeroGasEIP2028 + } + gasWithOverhead := new(big.Int).SetUint64(gas) + gasWithOverhead.Add(gasWithOverhead, overhead) + l1Cost := l1CostHelper(gasWithOverhead, l1BaseFee, scalar) + return l1Cost, gasWithOverhead + } +} + +// newL1CostFuncEcotone returns an l1 cost function suitable for the Ecotone upgrade except for the +// very first block of the upgrade. +func newL1CostFuncEcotone(l1BaseFee, l1BlobBaseFee, l1BaseFeeScalar, l1BlobBaseFeeScalar *big.Int) l1CostFunc { + return func(costData RollupCostData) (fee, calldataGasUsed *big.Int) { + calldataGasUsed = bedrockCalldataGasUsed(costData) + + // Ecotone L1 cost function: + // + // (calldataGas/16)*(l1BaseFee*16*l1BaseFeeScalar + l1BlobBaseFee*l1BlobBaseFeeScalar)/1e6 + // + // We divide "calldataGas" by 16 to change from units of calldata gas to "estimated # of bytes when + // compressed". Known as "compressedTxSize" in the spec. + // + // Function is actually computed as follows for better precision under integer arithmetic: + // + // calldataGas*(l1BaseFee*16*l1BaseFeeScalar + l1BlobBaseFee*l1BlobBaseFeeScalar)/16e6 + + calldataCostPerByte := new(big.Int).Set(l1BaseFee) + calldataCostPerByte = calldataCostPerByte.Mul(calldataCostPerByte, sixteen) + calldataCostPerByte = calldataCostPerByte.Mul(calldataCostPerByte, l1BaseFeeScalar) + + blobCostPerByte := new(big.Int).Set(l1BlobBaseFee) + blobCostPerByte = blobCostPerByte.Mul(blobCostPerByte, l1BlobBaseFeeScalar) + + fee = new(big.Int).Add(calldataCostPerByte, blobCostPerByte) + fee = fee.Mul(fee, calldataGasUsed) + fee = fee.Div(fee, ecotoneDivisor) + + return fee, calldataGasUsed + } +} + +type gasParams struct { + l1BaseFee *big.Int + l1BlobBaseFee *big.Int + costFunc l1CostFunc + feeScalar *big.Float // pre-ecotone + l1BaseFeeScalar *uint32 // post-ecotone + l1BlobBaseFeeScalar *uint32 // post-ecotone +} + +// intToScaledFloat returns scalar/10e6 as a float +func intToScaledFloat(scalar *big.Int) *big.Float { + fscalar := new(big.Float).SetInt(scalar) + fdivisor := new(big.Float).SetUint64(1_000_000) // 10**6, i.e. 6 decimals + return new(big.Float).Quo(fscalar, fdivisor) +} + +// extractL1GasParams extracts the gas parameters necessary to compute gas costs from L1 block info +func extractL1GasParams(config *params.ChainConfig, time uint64, data []byte) (gasParams, error) { + // edge case: for the very first Ecotone block we still need to use the Bedrock + // function. We detect this edge case by seeing if the function selector is the old one + // If so, fall through to the pre-ecotone format + // Both Ecotone and Fjord use the same function selector + if config.IsEcotone(time) && len(data) >= 4 && !bytes.Equal(data[0:4], BedrockL1AttributesSelector) { + p, err := extractL1GasParamsPostEcotone(data) + if err != nil { + return gasParams{}, err + } + + if config.IsFjord(time) { + p.costFunc = NewL1CostFuncFjord( + p.l1BaseFee, + p.l1BlobBaseFee, + big.NewInt(int64(*p.l1BaseFeeScalar)), + big.NewInt(int64(*p.l1BlobBaseFeeScalar)), + ) + } else { + p.costFunc = newL1CostFuncEcotone( + p.l1BaseFee, + p.l1BlobBaseFee, + big.NewInt(int64(*p.l1BaseFeeScalar)), + big.NewInt(int64(*p.l1BlobBaseFeeScalar)), + ) + } + + return p, nil + } + return extractL1GasParamsPreEcotone(config, time, data) +} + +func extractL1GasParamsPreEcotone(config *params.ChainConfig, time uint64, data []byte) (gasParams, error) { + // data consists of func selector followed by 7 ABI-encoded parameters (32 bytes each) + if len(data) < 4+32*8 { + return gasParams{}, fmt.Errorf("expected at least %d L1 info bytes, got %d", 4+32*8, len(data)) + } + data = data[4:] // trim function selector + l1BaseFee := new(big.Int).SetBytes(data[32*2 : 32*3]) // arg index 2 + overhead := new(big.Int).SetBytes(data[32*6 : 32*7]) // arg index 6 + scalar := new(big.Int).SetBytes(data[32*7 : 32*8]) // arg index 7 + feeScalar := intToScaledFloat(scalar) // legacy: format fee scalar as big Float + costFunc := newL1CostFuncBedrockHelper(l1BaseFee, overhead, scalar, config.IsRegolith(time)) + return gasParams{ + l1BaseFee: l1BaseFee, + costFunc: costFunc, + feeScalar: feeScalar, + }, nil +} + +// extractL1GasParamsPostEcotone extracts the gas parameters necessary to compute gas from L1 attribute +// info calldata after the Ecotone upgrade, but not for the very first Ecotone block. +func extractL1GasParamsPostEcotone(data []byte) (gasParams, error) { + if len(data) != 164 { + return gasParams{}, fmt.Errorf("expected 164 L1 info bytes, got %d", len(data)) + } + // data layout assumed for Ecotone: + // offset type varname + // 0 <selector> + // 4 uint32 _basefeeScalar + // 8 uint32 _blobBaseFeeScalar + // 12 uint64 _sequenceNumber, + // 20 uint64 _timestamp, + // 28 uint64 _l1BlockNumber + // 36 uint256 _basefee, + // 68 uint256 _blobBaseFee, + // 100 bytes32 _hash, + // 132 bytes32 _batcherHash, + l1BaseFee := new(big.Int).SetBytes(data[36:68]) + l1BlobBaseFee := new(big.Int).SetBytes(data[68:100]) + l1BaseFeeScalar := binary.BigEndian.Uint32(data[4:8]) + l1BlobBaseFeeScalar := binary.BigEndian.Uint32(data[8:12]) + return gasParams{ + l1BaseFee: l1BaseFee, + l1BlobBaseFee: l1BlobBaseFee, + l1BaseFeeScalar: &l1BaseFeeScalar, + l1BlobBaseFeeScalar: &l1BlobBaseFeeScalar, + }, nil +} + +// L1Cost computes the the data availability fee for transactions in blocks prior to the Ecotone +// upgrade. It is used by e2e tests so must remain exported. +func L1Cost(rollupDataGas uint64, l1BaseFee, overhead, scalar *big.Int) *big.Int { + l1GasUsed := new(big.Int).SetUint64(rollupDataGas) + l1GasUsed.Add(l1GasUsed, overhead) + return l1CostHelper(l1GasUsed, l1BaseFee, scalar) +} + +func l1CostHelper(gasWithOverhead, l1BaseFee, scalar *big.Int) *big.Int { + fee := new(big.Int).Set(gasWithOverhead) + fee.Mul(fee, l1BaseFee).Mul(fee, scalar).Div(fee, oneMillion) + return fee +} + +// NewL1CostFuncFjord returns an l1 cost function suitable for the Fjord upgrade +func NewL1CostFuncFjord(l1BaseFee, l1BlobBaseFee, baseFeeScalar, blobFeeScalar *big.Int) l1CostFunc { + return func(costData RollupCostData) (fee, calldataGasUsed *big.Int) { + // Fjord L1 cost function: + //l1FeeScaled = baseFeeScalar*l1BaseFee*16 + blobFeeScalar*l1BlobBaseFee + //estimatedSize = max(minTransactionSize, intercept + fastlzCoef*fastlzSize) + //l1Cost = estimatedSize * l1FeeScaled / 1e12 + + scaledL1BaseFee := new(big.Int).Mul(baseFeeScalar, l1BaseFee) + calldataCostPerByte := new(big.Int).Mul(scaledL1BaseFee, sixteen) + blobCostPerByte := new(big.Int).Mul(blobFeeScalar, l1BlobBaseFee) + l1FeeScaled := new(big.Int).Add(calldataCostPerByte, blobCostPerByte) + + fastLzSize := new(big.Int).SetUint64(costData.FastLzSize) + estimatedSize := new(big.Int).Add(L1CostIntercept, new(big.Int).Mul(L1CostFastlzCoef, fastLzSize)) + + if estimatedSize.Cmp(MinTransactionSizeScaled) < 0 { + estimatedSize.Set(MinTransactionSizeScaled) + } + + l1CostScaled := new(big.Int).Mul(estimatedSize, l1FeeScaled) + l1Cost := new(big.Int).Div(l1CostScaled, fjordDivisor) + + calldataGasUsed = new(big.Int).Mul(estimatedSize, new(big.Int).SetUint64(params.TxDataNonZeroGasEIP2028)) + calldataGasUsed.Div(calldataGasUsed, big.NewInt(1e6)) + + return l1Cost, calldataGasUsed + } +} + +func extractEcotoneFeeParams(l1FeeParams []byte) (l1BaseFeeScalar, l1BlobBaseFeeScalar *big.Int) { + offset := scalarSectionStart + l1BaseFeeScalar = new(big.Int).SetBytes(l1FeeParams[offset : offset+4]) + l1BlobBaseFeeScalar = new(big.Int).SetBytes(l1FeeParams[offset+4 : offset+8]) + return +} + +func bedrockCalldataGasUsed(costData RollupCostData) (calldataGasUsed *big.Int) { + calldataGas := (costData.Zeroes * params.TxDataZeroGas) + (costData.Ones * params.TxDataNonZeroGasEIP2028) + return new(big.Int).SetUint64(calldataGas) +} + +// FlzCompressLen returns the length of the data after compression through FastLZ, based on +// https://github.com/Vectorized/solady/blob/5315d937d79b335c668896d7533ac603adac5315/js/solady.js +func FlzCompressLen(ib []byte) uint32 { + n := uint32(0) + ht := make([]uint32, 8192) + u24 := func(i uint32) uint32 { + return uint32(ib[i]) | (uint32(ib[i+1]) << 8) | (uint32(ib[i+2]) << 16) + } + cmp := func(p uint32, q uint32, e uint32) uint32 { + l := uint32(0) + for e -= q; l < e; l++ { + if ib[p+l] != ib[q+l] { + e = 0 + } + } + return l + } + literals := func(r uint32) { + n += 0x21 * (r / 0x20) + r %= 0x20 + if r != 0 { + n += r + 1 + } + } + match := func(l uint32) { + l-- + n += 3 * (l / 262) + if l%262 >= 6 { + n += 3 + } else { + n += 2 + } + } + hash := func(v uint32) uint32 { + return ((2654435769 * v) >> 19) & 0x1fff + } + setNextHash := func(ip uint32) uint32 { + ht[hash(u24(ip))] = ip + return ip + 1 + } + a := uint32(0) + ipLimit := uint32(len(ib)) - 13 + if len(ib) < 13 { + ipLimit = 0 + } + for ip := a + 2; ip < ipLimit; { + r := uint32(0) + d := uint32(0) + for { + s := u24(ip) + h := hash(s) + r = ht[h] + ht[h] = ip + d = ip - r + if ip >= ipLimit { + break + } + ip++ + if d <= 0x1fff && s == u24(r) { + break + } + } + if ip >= ipLimit { + break + } + ip-- + if ip > a { + literals(ip - a) + } + l := cmp(r+3, ip+3, ipLimit+9) + match(l) + ip = setNextHash(setNextHash(ip + l)) + a = ip + } + literals(uint32(len(ib)) - a) + return n +}
diff --git go-ethereum/core/vm/evm.go op-geth/core/vm/evm.go index 16cc8549080ac0bb0210975ee96693e085ca384a..c1b0ff0d0fac6e4442e752a6033587a9293ae537 100644 --- go-ethereum/core/vm/evm.go +++ op-geth/core/vm/evm.go @@ -20,11 +20,12 @@ import ( "math/big" "sync/atomic"   + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" - "github.com/holiman/uint256" )   type ( @@ -40,6 +41,8 @@ func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) { var precompiles map[common.Address]PrecompiledContract switch { + case evm.chainRules.IsOptimismFjord: + precompiles = PrecompiledContractsFjord case evm.chainRules.IsCancun: precompiles = PrecompiledContractsCancun case evm.chainRules.IsBerlin: @@ -52,6 +55,13 @@ default: precompiles = PrecompiledContractsHomestead } p, ok := precompiles[addr] + // Restrict overrides to known precompiles + if ok && evm.chainConfig.IsOptimism() && evm.Config.OptimismPrecompileOverrides != nil { + override, ok := evm.Config.OptimismPrecompileOverrides(evm.chainRules, p, addr) + if ok { + return override, ok + } + } return p, ok }   @@ -65,6 +75,8 @@ // Transfer transfers ether from one account to the other Transfer TransferFunc // GetHash returns the hash corresponding to n GetHash GetHashFunc + // L1CostFunc returns the L1 cost of the rollup message, the function may be nil, or return nil + L1CostFunc types.L1CostFunc   // Block information Coinbase common.Address // Provides information for COINBASE

Deposit transactions have special processing rules: gas is pre-paid on L1, and deposits with EVM-failure are included with rolled back changes (except mint). For regular transactions, at the end of the transition, the 1559 burn and L1 cost are routed to vaults.

diff --git go-ethereum/core/state_transition.go op-geth/core/state_transition.go index 9c4f76d1c58538b9fe8ead870c989f27bfbd70b6..a86e4c6d582d5a38fbd5a8473f0401a81e712b5c 100644 --- go-ethereum/core/state_transition.go +++ op-geth/core/state_transition.go @@ -145,20 +145,30 @@ // When SkipAccountChecks is true, the message nonce is not checked against the // account nonce in state. It also disables checking that the sender is an EOA. // This field will be set to true for operations like RPC eth_call. SkipAccountChecks bool + + IsSystemTx bool // IsSystemTx indicates the message, if also a deposit, does not emit gas usage. + IsDepositTx bool // IsDepositTx indicates the message is force-included and can persist a mint. + Mint *big.Int // Mint is the amount to mint before EVM processing, or nil if there is no minting. + RollupCostData types.RollupCostData // RollupCostData caches data to compute the fee we charge for data availability }   // TransactionToMessage converts a transaction into a Message. func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.Int) (*Message, error) { msg := &Message{ - Nonce: tx.Nonce(), - GasLimit: tx.Gas(), - GasPrice: new(big.Int).Set(tx.GasPrice()), - GasFeeCap: new(big.Int).Set(tx.GasFeeCap()), - GasTipCap: new(big.Int).Set(tx.GasTipCap()), - To: tx.To(), - Value: tx.Value(), - Data: tx.Data(), - AccessList: tx.AccessList(), + Nonce: tx.Nonce(), + GasLimit: tx.Gas(), + GasPrice: new(big.Int).Set(tx.GasPrice()), + GasFeeCap: new(big.Int).Set(tx.GasFeeCap()), + GasTipCap: new(big.Int).Set(tx.GasTipCap()), + To: tx.To(), + Value: tx.Value(), + Data: tx.Data(), + AccessList: tx.AccessList(), + IsSystemTx: tx.IsSystemTx(), + IsDepositTx: tx.IsDepositTx(), + Mint: tx.Mint(), + RollupCostData: tx.RollupCostData(), + SkipAccountChecks: false, BlobHashes: tx.BlobHashes(), BlobGasFeeCap: tx.BlobGasFeeCap(), @@ -235,11 +245,21 @@ func (st *StateTransition) buyGas() error { mgval := new(big.Int).SetUint64(st.msg.GasLimit) mgval = mgval.Mul(mgval, st.msg.GasPrice) + var l1Cost *big.Int + if st.evm.Context.L1CostFunc != nil && !st.msg.SkipAccountChecks { + l1Cost = st.evm.Context.L1CostFunc(st.msg.RollupCostData, st.evm.Context.Time) + if l1Cost != nil { + mgval = mgval.Add(mgval, l1Cost) + } + } balanceCheck := new(big.Int).Set(mgval) if st.msg.GasFeeCap != nil { balanceCheck.SetUint64(st.msg.GasLimit) balanceCheck = balanceCheck.Mul(balanceCheck, st.msg.GasFeeCap) balanceCheck.Add(balanceCheck, st.msg.Value) + if l1Cost != nil { + balanceCheck.Add(balanceCheck, l1Cost) + } } if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { if blobGas := st.blobGasUsed(); blobGas > 0 { @@ -272,6 +292,21 @@ return nil }   func (st *StateTransition) preCheck() error { + if st.msg.IsDepositTx { + // No fee fields to check, no nonce to check, and no need to check if EOA (L1 already verified it for us) + // Gas is free, but no refunds! + st.initialGas = st.msg.GasLimit + st.gasRemaining += st.msg.GasLimit // Add gas here in order to be able to execute calls. + // Don't touch the gas pool for system transactions + if st.msg.IsSystemTx { + if st.evm.ChainConfig().IsOptimismRegolith(st.evm.Context.Time) { + return fmt.Errorf("%w: address %v", ErrSystemTxNotSupported, + st.msg.From.Hex()) + } + return nil + } + return st.gp.SubGas(st.msg.GasLimit) // gas used by deposits may not be used by other txs + } // Only check transactions that are not fake msg := st.msg if !msg.SkipAccountChecks { @@ -365,6 +400,41 @@ // // However if any consensus issue encountered, return the error directly with // nil evm execution result. func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { + if mint := st.msg.Mint; mint != nil { + mintU256, overflow := uint256.FromBig(mint) + if overflow { + return nil, fmt.Errorf("mint value exceeds uint256: %d", mintU256) + } + st.state.AddBalance(st.msg.From, mintU256) + } + snap := st.state.Snapshot() + + result, err := st.innerTransitionDb() + // Failed deposits must still be included. Unless we cannot produce the block at all due to the gas limit. + // On deposit failure, we rewind any state changes from after the minting, and increment the nonce. + if err != nil && err != ErrGasLimitReached && st.msg.IsDepositTx { + st.state.RevertToSnapshot(snap) + // Even though we revert the state changes, always increment the nonce for the next deposit transaction + st.state.SetNonce(st.msg.From, st.state.GetNonce(st.msg.From)+1) + // Record deposits as using all their gas (matches the gas pool) + // System Transactions are special & are not recorded as using any gas (anywhere) + // Regolith changes this behaviour so the actual gas used is reported. + // In this case the tx is invalid so is recorded as using all gas. + gasUsed := st.msg.GasLimit + if st.msg.IsSystemTx && !st.evm.ChainConfig().IsRegolith(st.evm.Context.Time) { + gasUsed = 0 + } + result = &ExecutionResult{ + UsedGas: gasUsed, + Err: fmt.Errorf("failed deposit: %w", err), + ReturnData: nil, + } + err = nil + } + return result, err +} + +func (st *StateTransition) innerTransitionDb() (*ExecutionResult, error) { // First check this message satisfies all consensus rules before // applying the message. The rules include these clauses // @@ -383,7 +453,11 @@ if tracer := st.evm.Config.Tracer; tracer != nil { tracer.CaptureTxStart(st.initialGas) defer func() { - tracer.CaptureTxEnd(st.gasRemaining) + if st.msg.IsDepositTx { + tracer.CaptureTxEnd(0) + } else { + tracer.CaptureTxEnd(st.gasRemaining) + } }() }   @@ -435,6 +509,24 @@ st.state.SetNonce(msg.From, st.state.GetNonce(sender.Address())+1) ret, st.gasRemaining, vmerr = st.evm.Call(sender, st.to(), msg.Data, st.gasRemaining, value) }   + // if deposit: skip refunds, skip tipping coinbase + // Regolith changes this behaviour to report the actual gasUsed instead of always reporting all gas used. + if st.msg.IsDepositTx && !rules.IsOptimismRegolith { + // Record deposits as using all their gas (matches the gas pool) + // System Transactions are special & are not recorded as using any gas (anywhere) + gasUsed := st.msg.GasLimit + if st.msg.IsSystemTx { + gasUsed = 0 + } + return &ExecutionResult{ + UsedGas: gasUsed, + Err: vmerr, + ReturnData: ret, + }, nil + } + // Note for deposit tx there is no ETH refunded for unused gas, but that's taken care of by the fact that gasPrice + // is always 0 for deposit tx. So calling refundGas will ensure the gasUsed accounting is correct without actually + // changing the sender's balance var gasRefund uint64 if !rules.IsLondon { // Before EIP-3529: refunds were capped to gasUsed / 2 @@ -443,6 +535,15 @@ } else { // After EIP-3529: refunds are capped to gasUsed / 5 gasRefund = st.refundGas(params.RefundQuotientEIP3529) } + if st.msg.IsDepositTx && rules.IsOptimismRegolith { + // Skip coinbase payments for deposit tx in Regolith + return &ExecutionResult{ + UsedGas: st.gasUsed(), + RefundedGas: gasRefund, + Err: vmerr, + ReturnData: ret, + }, nil + } effectiveTip := msg.GasPrice if rules.IsLondon { effectiveTip = cmath.BigMin(msg.GasTipCap, new(big.Int).Sub(msg.GasFeeCap, st.evm.Context.BaseFee)) @@ -457,6 +558,24 @@ } else { fee := new(uint256.Int).SetUint64(st.gasUsed()) fee.Mul(fee, effectiveTipU256) st.state.AddBalance(st.evm.Context.Coinbase, fee) + } + + // Check that we are post bedrock to enable op-geth to be able to create pseudo pre-bedrock blocks (these are pre-bedrock, but don't follow l2 geth rules) + // Note optimismConfig will not be nil if rules.IsOptimismBedrock is true + if optimismConfig := st.evm.ChainConfig().Optimism; optimismConfig != nil && rules.IsOptimismBedrock && !st.msg.IsDepositTx { + gasCost := new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.evm.Context.BaseFee) + amtU256, overflow := uint256.FromBig(gasCost) + if overflow { + return nil, fmt.Errorf("optimism gas cost overflows U256: %d", gasCost) + } + st.state.AddBalance(params.OptimismBaseFeeRecipient, amtU256) + if l1Cost := st.evm.Context.L1CostFunc(st.msg.RollupCostData, st.evm.Context.Time); l1Cost != nil { + amtU256, overflow = uint256.FromBig(l1Cost) + if overflow { + return nil, fmt.Errorf("optimism l1 cost overflows U256: %d", l1Cost) + } + st.state.AddBalance(params.OptimismL1FeeRecipient, amtU256) + } }   return &ExecutionResult{
diff --git go-ethereum/core/error.go op-geth/core/error.go index 72cacf8c78d2fdb4ee98563edbfda8c094057521..8c691b17ff761c541093a5de23db70edba74e5d7 100644 --- go-ethereum/core/error.go +++ op-geth/core/error.go @@ -110,4 +110,7 @@ ErrMissingBlobHashes = errors.New("blob transaction missing blob hashes")   // ErrBlobTxCreate is returned if a blob transaction has no explicit to field. ErrBlobTxCreate = errors.New("blob transaction of type create") + + // ErrSystemTxNotSupported is returned for any deposit tx with IsSystemTx=true after the Regolith fork + ErrSystemTxNotSupported = errors.New("system tx not supported") )

The gaslimit is free to be set by the Engine API caller, instead of enforcing adjustments of the gaslimit in increments of 11024 of the previous gaslimit. The gaslimit is changed (and limited) through the SystemConfig contract.

diff --git go-ethereum/consensus/misc/eip1559/eip1559.go op-geth/consensus/misc/eip1559/eip1559.go index 84b82c4c492ec1351da2129b51b99fc50d3c6b28..a66298af692fbb7bb477847452aff5937af55c83 100644 --- go-ethereum/consensus/misc/eip1559/eip1559.go +++ op-geth/consensus/misc/eip1559/eip1559.go @@ -37,15 +37,17 @@ parentGasLimit := parent.GasLimit if !config.IsLondon(parent.Number) { parentGasLimit = parent.GasLimit * config.ElasticityMultiplier() } - if err := misc.VerifyGaslimit(parentGasLimit, header.GasLimit); err != nil { - return err + if config.Optimism == nil { // gasLimit can adjust instantly in optimism + if err := misc.VerifyGaslimit(parentGasLimit, header.GasLimit); err != nil { + return err + } } // Verify the header is not malformed if header.BaseFee == nil { return errors.New("header is missing baseFee") } // Verify the baseFee is correct based on the parent header. - expectedBaseFee := CalcBaseFee(config, parent) + expectedBaseFee := CalcBaseFee(config, parent, header.Time) if header.BaseFee.Cmp(expectedBaseFee) != 0 { return fmt.Errorf("invalid baseFee: have %s, want %s, parentBaseFee %s, parentGasUsed %d", header.BaseFee, expectedBaseFee, parent.BaseFee, parent.GasUsed) @@ -54,7 +56,8 @@ return nil }   // CalcBaseFee calculates the basefee of the header. -func CalcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int { +// The time belongs to the new block to check if Canyon is activted or not +func CalcBaseFee(config *params.ChainConfig, parent *types.Header, time uint64) *big.Int { // If the current block is the first EIP-1559 block, return the InitialBaseFee. if !config.IsLondon(parent.Number) { return new(big.Int).SetUint64(params.InitialBaseFee) @@ -77,7 +80,7 @@ // max(1, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator) num.SetUint64(parent.GasUsed - parentGasTarget) num.Mul(num, parent.BaseFee) num.Div(num, denom.SetUint64(parentGasTarget)) - num.Div(num, denom.SetUint64(config.BaseFeeChangeDenominator())) + num.Div(num, denom.SetUint64(config.BaseFeeChangeDenominator(time))) baseFeeDelta := math.BigMax(num, common.Big1)   return num.Add(parent.BaseFee, baseFeeDelta) @@ -87,7 +90,7 @@ // max(0, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator) num.SetUint64(parentGasTarget - parent.GasUsed) num.Mul(num, parent.BaseFee) num.Div(num, denom.SetUint64(parentGasTarget)) - num.Div(num, denom.SetUint64(config.BaseFeeChangeDenominator())) + num.Div(num, denom.SetUint64(config.BaseFeeChangeDenominator(time))) baseFee := num.Sub(parent.BaseFee, num)   return math.BigMax(baseFee, common.Big0)

The Engine API is activated at the Merge transition, with a Total Terminal Difficulty (TTD). The rollup starts post-merge, and thus sets the TTD to 0. The TTD is always “reached” starting at the bedrock block.

diff --git go-ethereum/consensus/beacon/consensus.go op-geth/consensus/beacon/consensus.go index a350e383a293223be8f50e236d808c6e5fbe0345..ba4980b8d4ebf46a9d33a70951a3754f719ddf79 100644 --- go-ethereum/consensus/beacon/consensus.go +++ op-geth/consensus/beacon/consensus.go @@ -59,6 +59,7 @@ // The beacon here is a half-functional consensus engine with partial functions which // is only used for necessary consensus checks. The legacy consensus engine can be any // engine implements the consensus interface (except the beacon itself). type Beacon struct { + // For migrated OP chains (OP mainnet, OP Goerli), ethone is a dummy legacy pre-Bedrock consensus ethone consensus.Engine // Original consensus engine used in eth1, e.g. ethash or clique }   @@ -106,12 +107,27 @@ } return errs }   +// OP-Stack Bedrock variant of splitHeaders: the total-terminal difficulty is terminated at bedrock transition, but also reset to 0. +// So just use the bedrock fork check to split the headers, to simplify the splitting. +// The returned slices are slices over the input. The input must be sorted. +func (beacon *Beacon) splitBedrockHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) ([]*types.Header, []*types.Header, error) { + for i, h := range headers { + if chain.Config().IsBedrock(h.Number) { + return headers[:i], headers[i:], nil + } + } + return headers, nil, nil +} + // splitHeaders splits the provided header batch into two parts according to // the configured ttd. It requires the parent of header batch along with its // td are stored correctly in chain. If ttd is not configured yet, all headers // will be treated legacy PoW headers. // Note, this function will not verify the header validity but just split them. func (beacon *Beacon) splitHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) ([]*types.Header, []*types.Header, error) { + if chain.Config().Optimism != nil { + return beacon.splitBedrockHeaders(chain, headers) + } // TTD is not defined yet, all headers should be in legacy format. ttd := chain.Config().TerminalTotalDifficulty if ttd == nil { @@ -447,6 +463,10 @@ func (beacon *Beacon) InnerEngine() consensus.Engine { return beacon.ethone }   +func (beacon *Beacon) SwapInner(inner consensus.Engine) { + beacon.ethone = inner +} + // SetThreads updates the mining threads. Delegate the call // to the eth1 engine if it's threaded. func (beacon *Beacon) SetThreads(threads int) { @@ -462,8 +482,16 @@ // IsTTDReached checks if the TotalTerminalDifficulty has been surpassed on the `parentHash` block. // It depends on the parentHash already being stored in the database. // If the parentHash is not stored in the database a UnknownAncestor error is returned. func IsTTDReached(chain consensus.ChainHeaderReader, parentHash common.Hash, parentNumber uint64) (bool, error) { + if cfg := chain.Config(); cfg.Optimism != nil { + // If OP-Stack then bedrock activation number determines when TTD (eth Merge) has been reached. + // Note: some tests/utils will set parentNumber == max_uint64 as "parent" of the genesis block, this is fine. + return cfg.IsBedrock(new(big.Int).SetUint64(parentNumber + 1)), nil + } if chain.Config().TerminalTotalDifficulty == nil { return false, nil + } + if common.Big0.Cmp(chain.Config().TerminalTotalDifficulty) == 0 { // in case TTD is reached at genesis. + return true, nil } td := chain.GetTd(parentHash, parentNumber) if td == nil {

Pre-Bedrock OP-mainnet and OP-Goerli had differently formatted block-headers, loosely compatible with the geth types (since it was based on Clique). However, due to differences like the extra-data length (97+ bytes), these legacy block-headers need special verification. The pre-merge “consensus” fallback is set to this custom but basic verifier, to accept these headers when syncing a pre-bedrock part of the chain, independent of any clique code or configuration (which may be removed from geth at a later point). All the custom verifier has to do is accept the headers, as the headers are already verified by block-hash through the reverse-header-sync.

diff --git go-ethereum/consensus/beacon/oplegacy.go op-geth/consensus/beacon/oplegacy.go new file mode 100644 index 0000000000000000000000000000000000000000..62810cb03f15680aeef90074196bdb1dbefb5dee --- /dev/null +++ op-geth/consensus/beacon/oplegacy.go @@ -0,0 +1,91 @@ +package beacon + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" +) + +type OpLegacy struct{} + +func (o *OpLegacy) Author(header *types.Header) (common.Address, error) { + return header.Coinbase, nil +} + +func (o *OpLegacy) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error { + // Short circuit if the header is known, or its parent not + number := header.Number.Uint64() + if chain.GetHeader(header.Hash(), number) != nil { + return nil + } + parent := chain.GetHeader(header.ParentHash, number-1) + if parent == nil { + return consensus.ErrUnknownAncestor + } + // legacy chain is verified by block-hash reverse sync otherwise + return nil +} + +func (o *OpLegacy) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) { + abort := make(chan struct{}) + results := make(chan error, len(headers)) + + for i := range headers { + // legacy chain is verified by block-hash reverse sync + var parent *types.Header + if i == 0 { + parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1) + } else if headers[i-1].Hash() == headers[i].ParentHash { + parent = headers[i-1] + } + var err error + if parent == nil { + err = consensus.ErrUnknownAncestor + } + results <- err + } + return abort, results +} + +func (o *OpLegacy) VerifyUncles(chain consensus.ChainReader, block *types.Block) error { + return nil +} + +func (o *OpLegacy) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { + return fmt.Errorf("cannot prepare for legacy block header: %s (num %d)", header.Hash(), header.Number) +} + +func (o *OpLegacy) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, withdrawals []*types.Withdrawal) { + panic(fmt.Errorf("cannot finalize legacy block header: %s (num %d)", header.Hash(), header.Number)) +} + +func (o *OpLegacy) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, withdrawals []*types.Withdrawal) (*types.Block, error) { + return nil, fmt.Errorf("cannot finalize and assemble for legacy block header: %s (num %d)", header.Hash(), header.Number) +} + +func (o *OpLegacy) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { + return fmt.Errorf("cannot seal legacy block header: %s (num %d)", block.Hash(), block.Number()) +} + +func (o *OpLegacy) SealHash(header *types.Header) common.Hash { + panic(fmt.Errorf("cannot compute pow/poa seal-hash for legacy block header: %s (num %d)", header.Hash(), header.Number)) +} + +func (o *OpLegacy) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { + return big.NewInt(0) +} + +func (o *OpLegacy) APIs(chain consensus.ChainHeaderReader) []rpc.API { + return nil +} + +func (o *OpLegacy) Close() error { + return nil +} + +var _ consensus.Engine = (*OpLegacy)(nil)

The Engine API is extended to insert transactions into the block and optionally exclude the tx-pool, to reproduce the exact block of the sequencer from just the inputs, as derived from L1 by the rollup-node. See L2 execution engine specs.

diff --git go-ethereum/beacon/engine/gen_blockparams.go op-geth/beacon/engine/gen_blockparams.go index b1f01b50ff8e3b1adf0454ff989356a6f612a337..c343e5890668558738a5f31b287a89a427888f66 100644 --- go-ethereum/beacon/engine/gen_blockparams.go +++ op-geth/beacon/engine/gen_blockparams.go @@ -21,6 +21,9 @@ Random common.Hash `json:"prevRandao" gencodec:"required"` SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"` Withdrawals []*types.Withdrawal `json:"withdrawals"` BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"` + Transactions []hexutil.Bytes `json:"transactions,omitempty" gencodec:"optional"` + NoTxPool bool `json:"noTxPool,omitempty" gencodec:"optional"` + GasLimit *hexutil.Uint64 `json:"gasLimit,omitempty" gencodec:"optional"` } var enc PayloadAttributes enc.Timestamp = hexutil.Uint64(p.Timestamp) @@ -28,6 +31,14 @@ enc.Random = p.Random enc.SuggestedFeeRecipient = p.SuggestedFeeRecipient enc.Withdrawals = p.Withdrawals enc.BeaconRoot = p.BeaconRoot + if p.Transactions != nil { + enc.Transactions = make([]hexutil.Bytes, len(p.Transactions)) + for k, v := range p.Transactions { + enc.Transactions[k] = v + } + } + enc.NoTxPool = p.NoTxPool + enc.GasLimit = (*hexutil.Uint64)(p.GasLimit) return json.Marshal(&enc) }   @@ -39,6 +50,9 @@ Random *common.Hash `json:"prevRandao" gencodec:"required"` SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"` Withdrawals []*types.Withdrawal `json:"withdrawals"` BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"` + Transactions []hexutil.Bytes `json:"transactions,omitempty" gencodec:"optional"` + NoTxPool *bool `json:"noTxPool,omitempty" gencodec:"optional"` + GasLimit *hexutil.Uint64 `json:"gasLimit,omitempty" gencodec:"optional"` } var dec PayloadAttributes if err := json.Unmarshal(input, &dec); err != nil { @@ -61,6 +75,18 @@ p.Withdrawals = dec.Withdrawals } if dec.BeaconRoot != nil { p.BeaconRoot = dec.BeaconRoot + } + if dec.Transactions != nil { + p.Transactions = make([][]byte, len(dec.Transactions)) + for k, v := range dec.Transactions { + p.Transactions[k] = v + } + } + if dec.NoTxPool != nil { + p.NoTxPool = *dec.NoTxPool + } + if dec.GasLimit != nil { + p.GasLimit = (*uint64)(dec.GasLimit) } return nil }
diff --git go-ethereum/beacon/engine/types.go op-geth/beacon/engine/types.go index 60accc3c7917cd162b6212e8932259c640fa1c0b..9a3ea8d0778feacac33ab42b002ad378edac2d74 100644 --- go-ethereum/beacon/engine/types.go +++ op-geth/beacon/engine/types.go @@ -46,11 +46,22 @@ Random common.Hash `json:"prevRandao" gencodec:"required"` SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"` Withdrawals []*types.Withdrawal `json:"withdrawals"` BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"` + + // Transactions is a field for rollups: the transactions list is forced into the block + Transactions [][]byte `json:"transactions,omitempty" gencodec:"optional"` + // NoTxPool is a field for rollups: if true, the no transactions are taken out of the tx-pool, + // only transactions from the above Transactions list will be included. + NoTxPool bool `json:"noTxPool,omitempty" gencodec:"optional"` + // GasLimit is a field for rollups: if set, this sets the exact gas limit the block produced with. + GasLimit *uint64 `json:"gasLimit,omitempty" gencodec:"optional"` }   // JSON type overrides for PayloadAttributes. type payloadAttributesMarshaling struct { Timestamp hexutil.Uint64 + + Transactions []hexutil.Bytes + GasLimit *hexutil.Uint64 }   //go:generate go run github.com/fjl/gencodec -type ExecutableData -field-override executableDataMarshaling -out gen_ed.go @@ -97,6 +108,9 @@ ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` BlockValue *big.Int `json:"blockValue" gencodec:"required"` BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` Override bool `json:"shouldOverrideBuilder"` + + // OP-Stack: Ecotone specific fields + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot,omitempty"` }   type BlobsBundleV1 struct { @@ -295,7 +309,13 @@ bundle.Commitments = append(bundle.Commitments, hexutil.Bytes(sidecar.Commitments[j][:])) bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(sidecar.Proofs[j][:])) } } - return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees, BlobsBundle: &bundle, Override: false} + return &ExecutionPayloadEnvelope{ + ExecutionPayload: data, + BlockValue: fees, + BlobsBundle: &bundle, + Override: false, + ParentBeaconBlockRoot: block.BeaconRoot(), + } }   // ExecutionPayloadBodyV1 is used in the response to GetPayloadBodiesByHashV1 and GetPayloadBodiesByRangeV1
diff --git go-ethereum/eth/catalyst/api.go op-geth/eth/catalyst/api.go index 58566a47fc6c36d3eca9f2dcb1f52c5de8010fa5..18862bad630f2d98536e7ac1cc3059aa996c0029 100644 --- go-ethereum/eth/catalyst/api.go +++ op-geth/eth/catalyst/api.go @@ -327,7 +327,7 @@ } else if api.eth.BlockChain().CurrentBlock().Hash() == update.HeadBlockHash { // If the specified head matches with our local head, do nothing and keep // generating the payload. It's a special corner case that a few slots are // missing and we are requested to generate the payload in slot. - } else { + } else if api.eth.BlockChain().Config().Optimism == nil { // minor Engine API divergence: allow proposers to reorg their own chain // If the head block is already in our canonical chain, the beacon client is // probably resyncing. Ignore the update. log.Info("Ignoring beacon update to old head", "number", block.NumberU64(), "hash", update.HeadBlockHash, "age", common.PrettyAge(time.Unix(int64(block.Time()), 0)), "have", api.eth.BlockChain().CurrentBlock().Number) @@ -371,6 +371,17 @@ // If payload generation was requested, create a new block to be potentially // sealed by the beacon client. The payload will be requested later, and we // will replace it arbitrarily many times in between. if payloadAttributes != nil { + if api.eth.BlockChain().Config().Optimism != nil && payloadAttributes.GasLimit == nil { + return engine.STATUS_INVALID, engine.InvalidPayloadAttributes.With(errors.New("gasLimit parameter is required")) + } + transactions := make(types.Transactions, 0, len(payloadAttributes.Transactions)) + for i, otx := range payloadAttributes.Transactions { + var tx types.Transaction + if err := tx.UnmarshalBinary(otx); err != nil { + return engine.STATUS_INVALID, fmt.Errorf("transaction %d is not valid: %v", i, err) + } + transactions = append(transactions, &tx) + } args := &miner.BuildPayloadArgs{ Parent: update.HeadBlockHash, Timestamp: payloadAttributes.Timestamp, @@ -378,6 +389,9 @@ FeeRecipient: payloadAttributes.SuggestedFeeRecipient, Random: payloadAttributes.Random, Withdrawals: payloadAttributes.Withdrawals, BeaconRoot: payloadAttributes.BeaconRoot, + NoTxPool: payloadAttributes.NoTxPool, + Transactions: transactions, + GasLimit: payloadAttributes.GasLimit, Version: payloadVersion, } id := args.Id() @@ -759,6 +773,9 @@ // user that something might be off with their consensus node. // // TODO(karalabe): Spin this goroutine down somehow func (api *ConsensusAPI) heartbeat() { + if api.eth.BlockChain().Config().Optimism != nil { // don't start the api heartbeat, there is no transition + return + } // Sleep a bit on startup since there's obviously no beacon client yet // attached, so no need to print scary warnings to the user. time.Sleep(beaconUpdateStartupTimeout)

The block-building code (in the “miner” package because of Proof-Of-Work legacy of ethereum) implements the changes to support the transaction-inclusion, tx-pool toggle and gaslimit parameters of the Engine API.

diff --git go-ethereum/miner/miner.go op-geth/miner/miner.go index 58bb71b557b8d6dd7aabbb63751f6877877ae344..85b3d8d13d58582f442ca17798122c83b5946aca 100644 --- go-ethereum/miner/miner.go +++ op-geth/miner/miner.go @@ -18,6 +18,7 @@ // Package miner implements Ethereum block creation and mining. package miner   import ( + "context" "fmt" "math/big" "sync" @@ -31,6 +32,7 @@ "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -41,6 +43,10 @@ // to offer all the functions here. type Backend interface { BlockChain() *core.BlockChain TxPool() *txpool.TxPool +} + +type BackendWithHistoricalState interface { + StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) }   // Config is the configuration parameters of mining. @@ -53,12 +59,15 @@ GasPrice *big.Int // Minimum gas price for mining a transaction Recommit time.Duration // The time interval for miner to re-create mining work.   NewPayloadTimeout time.Duration // The maximum time allowance for creating a new payload + + RollupComputePendingBlock bool // Compute the pending block from tx-pool, instead of copying the latest-block + EffectiveGasCeil uint64 // if non-zero, a gas ceiling to apply independent of the header's gaslimit value }   // DefaultConfig contains default settings for miner. var DefaultConfig = Config{ GasCeil: 30000000, - GasPrice: big.NewInt(params.GWei), + GasPrice: big.NewInt(params.Wei),   // The default recommit time is chosen as two seconds since // consensus-layer usually will wait a half slot of time(6s)
diff --git go-ethereum/miner/payload_building.go op-geth/miner/payload_building.go index 719736c4795c03b461a3a8b0fdde6b1cd9495670..833652669ae60fb6561c0100ca20a9d0c0f5677f 100644 --- go-ethereum/miner/payload_building.go +++ op-geth/miner/payload_building.go @@ -19,8 +19,10 @@ import ( "crypto/sha256" "encoding/binary" + "errors" "math/big" "sync" + "sync/atomic" "time"   "github.com/ethereum/go-ethereum/beacon/engine" @@ -42,6 +44,10 @@ Random common.Hash // The provided randomness value Withdrawals types.Withdrawals // The provided withdrawals BeaconRoot *common.Hash // The provided beaconRoot (Cancun) Version engine.PayloadVersion // Versioning byte for payload id calculation. + + NoTxPool bool // Optimism addition: option to disable tx pool contents from being included + Transactions []*types.Transaction // Optimism addition: txs forced into the block via engine API + GasLimit *uint64 // Optimism addition: override gas limit of the block to build }   // Id computes an 8-byte identifier by hashing the components of the payload arguments. @@ -56,6 +62,19 @@ rlp.Encode(hasher, args.Withdrawals) if args.BeaconRoot != nil { hasher.Write(args.BeaconRoot[:]) } + + if args.NoTxPool || len(args.Transactions) > 0 { // extend if extra payload attributes are used + binary.Write(hasher, binary.BigEndian, args.NoTxPool) + binary.Write(hasher, binary.BigEndian, uint64(len(args.Transactions))) + for _, tx := range args.Transactions { + h := tx.Hash() + hasher.Write(h[:]) + } + } + if args.GasLimit != nil { + binary.Write(hasher, binary.BigEndian, *args.GasLimit) + } + var out engine.PayloadID copy(out[:], hasher.Sum(nil)[:8]) out[0] = byte(args.Version) @@ -76,6 +95,10 @@ fullFees *big.Int stop chan struct{} lock sync.Mutex cond *sync.Cond + + err error + stopOnce sync.Once + interrupt *atomic.Int32 // interrupt signal shared with worker }   // newPayload initializes the payload object. @@ -84,12 +107,16 @@ payload := &Payload{ id: id, empty: empty, stop: make(chan struct{}), + + interrupt: new(atomic.Int32), } log.Info("Starting work on payload", "id", payload.id) payload.cond = sync.NewCond(&payload.lock) return payload }   +var errInterruptedUpdate = errors.New("interrupted payload update") + // update updates the full-block with latest built version. func (payload *Payload) update(r *newPayloadResult, elapsed time.Duration) { payload.lock.Lock() @@ -100,6 +127,19 @@ case <-payload.stop: return // reject stale update default: } + + defer payload.cond.Broadcast() // fire signal for notifying any full block result + + if errors.Is(r.err, errInterruptedUpdate) { + log.Debug("Ignoring interrupted payload update", "id", payload.id) + return + } else if r.err != nil { + log.Warn("Error building payload update", "id", payload.id, "err", r.err) + payload.err = r.err // record latest error + return + } + log.Debug("New payload update", "id", payload.id, "elapsed", common.PrettyDuration(elapsed)) + // Ensure the newly provided full block has a higher transaction fee. // In post-merge stage, there is no uncle reward anymore and transaction // fee(apart from the mev revenue) is the only indicator for comparison. @@ -121,24 +161,12 @@ "root", r.block.Root(), "elapsed", common.PrettyDuration(elapsed), ) } - payload.cond.Broadcast() // fire signal for notifying full block }   // Resolve returns the latest built payload and also terminates the background // thread for updating payload. It's safe to be called multiple times. func (payload *Payload) Resolve() *engine.ExecutionPayloadEnvelope { - payload.lock.Lock() - defer payload.lock.Unlock() - - select { - case <-payload.stop: - default: - close(payload.stop) - } - if payload.full != nil { - return engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars) - } - return engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil) + return payload.resolve(false) }   // ResolveEmpty is basically identical to Resolve, but it expects empty block only. @@ -153,10 +181,24 @@ // ResolveFull is basically identical to Resolve, but it expects full block only. // Don't call Resolve until ResolveFull returns, otherwise it might block forever. func (payload *Payload) ResolveFull() *engine.ExecutionPayloadEnvelope { + return payload.resolve(true) +} + +func (payload *Payload) WaitFull() { payload.lock.Lock() defer payload.lock.Unlock() + payload.cond.Wait() +}   - if payload.full == nil { +func (payload *Payload) resolve(onlyFull bool) *engine.ExecutionPayloadEnvelope { + payload.lock.Lock() + defer payload.lock.Unlock() + + // We interrupt any active building block to prevent it from adding more transactions, + // and if it is an update, don't attempt to seal the block. + payload.interruptBuilding() + + if payload.full == nil && (onlyFull || payload.empty == nil) { select { case <-payload.stop: return nil @@ -167,21 +209,82 @@ // forever if Resolve is called in the meantime which // terminates the background construction process. payload.cond.Wait() } - // Terminate the background payload construction - select { - case <-payload.stop: - default: - close(payload.stop) + + // Now we can signal the building routine to stop. + payload.stopBuilding() + + if payload.full != nil { + return engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars) + } else if !onlyFull && payload.empty != nil { + return engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil) + } else if err := payload.err; err != nil { + log.Error("Error building any payload", "id", payload.id, "err", err) } - return engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars) + return nil +} + +// interruptBuilding sets an interrupt for a potentially ongoing +// block building process. +// This will prevent it from adding new transactions to the block, and if it is +// building an update, the block will also not be sealed, as we would discard +// the update anyways. +// interruptBuilding is safe to be called concurrently. +func (payload *Payload) interruptBuilding() { + // Set the interrupt if not interrupted already. + // It's ok if it has either already been interrupted by payload resolution earlier, + // or by the timeout timer set to commitInterruptTimeout. + if payload.interrupt.CompareAndSwap(commitInterruptNone, commitInterruptResolve) { + log.Debug("Interrupted payload building.", "id", payload.id) + } else { + log.Debug("Payload building already interrupted.", + "id", payload.id, "interrupt", payload.interrupt.Load()) + } +} + +// stopBuilding signals to the block updating routine to stop. An ongoing payload +// building job will still complete. It can be interrupted to stop filling new +// transactions with interruptBuilding. +// stopBuilding is safe to be called concurrently. +func (payload *Payload) stopBuilding() { + // Concurrent Resolve calls should only stop once. + payload.stopOnce.Do(func() { + log.Debug("Stop payload building.", "id", payload.id) + close(payload.stop) + }) }   // buildPayload builds the payload according to the provided parameters. func (w *worker) buildPayload(args *BuildPayloadArgs) (*Payload, error) { - // Build the initial version with no transaction included. It should be fast - // enough to run. The empty payload can at least make sure there is something - // to deliver for not missing slot. - emptyParams := &generateParams{ + if args.NoTxPool { // don't start the background payload updating job if there is no tx pool to pull from + // Build the initial version with no transaction included. It should be fast + // enough to run. The empty payload can at least make sure there is something + // to deliver for not missing slot. + // In OP-Stack, the "empty" block is constructed from provided txs only, i.e. no tx-pool usage. + emptyParams := &generateParams{ + timestamp: args.Timestamp, + forceTime: true, + parentHash: args.Parent, + coinbase: args.FeeRecipient, + random: args.Random, + withdrawals: args.Withdrawals, + beaconRoot: args.BeaconRoot, + noTxs: true, + txs: args.Transactions, + gasLimit: args.GasLimit, + } + empty := w.getSealingBlock(emptyParams) + if empty.err != nil { + return nil, empty.err + } + payload := newPayload(empty.block, args.Id()) + // make sure to make it appear as full, otherwise it will wait indefinitely for payload building to complete. + payload.full = empty.block + payload.fullFees = empty.fees + payload.cond.Broadcast() // unblocks Resolve + return payload, nil + } + + fullParams := &generateParams{ timestamp: args.Timestamp, forceTime: true, parentHash: args.Parent, @@ -189,15 +292,21 @@ coinbase: args.FeeRecipient, random: args.Random, withdrawals: args.Withdrawals, beaconRoot: args.BeaconRoot, - noTxs: true, + noTxs: false, + txs: args.Transactions, + gasLimit: args.GasLimit, } - empty := w.getSealingBlock(emptyParams) - if empty.err != nil { - return nil, empty.err + + // Since we skip building the empty block when using the tx pool, we need to explicitly + // validate the BuildPayloadArgs here. + blockTime, err := w.validateParams(fullParams) + if err != nil { + return nil, err }   - // Construct a payload object for return. - payload := newPayload(empty.block, args.Id()) + payload := newPayload(nil, args.Id()) + // set shared interrupt + fullParams.interrupt = payload.interrupt   // Spin up a routine for updating the payload in background. This strategy // can maximum the revenue for including transactions with highest fee. @@ -207,36 +316,59 @@ // for triggering process immediately. timer := time.NewTimer(0) defer timer.Stop()   - // Setup the timer for terminating the process if SECONDS_PER_SLOT (12s in - // the Mainnet configuration) have passed since the point in time identified - // by the timestamp parameter. - endTimer := time.NewTimer(time.Second * 12) + start := time.Now() + // Setup the timer for terminating the payload building process as determined + // by validateParams. + endTimer := time.NewTimer(blockTime) + defer endTimer.Stop()   - fullParams := &generateParams{ - timestamp: args.Timestamp, - forceTime: true, - parentHash: args.Parent, - coinbase: args.FeeRecipient, - random: args.Random, - withdrawals: args.Withdrawals, - beaconRoot: args.BeaconRoot, - noTxs: false, + timeout := time.Now().Add(blockTime) + + stopReason := "delivery" + defer func() { + log.Info("Stopping work on payload", + "id", payload.id, + "reason", stopReason, + "elapsed", common.PrettyDuration(time.Since(start))) + }() + + updatePayload := func() time.Duration { + start := time.Now() + // getSealingBlock is interrupted by shared interrupt + r := w.getSealingBlock(fullParams) + dur := time.Since(start) + // update handles error case + payload.update(r, dur) + if r.err == nil { + // after first successful pass, we're updating + fullParams.isUpdate = true + } + timer.Reset(w.recommit) + return dur }   + var lastDuration time.Duration for { select { case <-timer.C: - start := time.Now() - r := w.getSealingBlock(fullParams) - if r.err == nil { - payload.update(r, time.Since(start)) + // We have to prioritize the stop signal because the recommit timer + // might have fired while stop also got closed. + select { + case <-payload.stop: + return + default: + } + // Assuming last payload building duration as lower bound for next one, + // skip new update if we're too close to the timeout anyways. + if lastDuration > 0 && time.Now().Add(lastDuration).After(timeout) { + stopReason = "near-timeout" + return } - timer.Reset(w.recommit) + lastDuration = updatePayload() case <-payload.stop: - log.Info("Stopping work on payload", "id", payload.id, "reason", "delivery") return case <-endTimer.C: - log.Info("Stopping work on payload", "id", payload.id, "reason", "timeout") + stopReason = "timeout" return } }
diff --git go-ethereum/miner/payload_building_test.go op-geth/miner/payload_building_test.go index 708072b5ecf2b593d22a6447d535f76618ee8b7f..3696a60f452d2aeaed6d053850577059a61832fe 100644 --- go-ethereum/miner/payload_building_test.go +++ op-geth/miner/payload_building_test.go @@ -17,6 +17,7 @@ package miner   import ( + "math/big" "reflect" "testing" "time" @@ -30,6 +31,14 @@ "github.com/ethereum/go-ethereum/params" )   func TestBuildPayload(t *testing.T) { + t.Run("no-tx-pool", func(t *testing.T) { testBuildPayload(t, true, false) }) + // no-tx-pool case with interrupt not interesting because no-tx-pool doesn't run + // the builder routine + t.Run("with-tx-pool", func(t *testing.T) { testBuildPayload(t, false, false) }) + t.Run("with-tx-pool-interrupt", func(t *testing.T) { testBuildPayload(t, false, true) }) +} + +func testBuildPayload(t *testing.T, noTxPool, interrupt bool) { t.Parallel() var ( db = rawdb.NewMemoryDatabase() @@ -38,18 +47,33 @@ ) w, b := newTestWorker(t, params.TestChainConfig, ethash.NewFaker(), db, 0) defer w.close()   + const numInterruptTxs = 256 + if interrupt { + // when doing interrupt testing, create a large pool so interruption will + // definitely be visible. + txs := genTxs(1, numInterruptTxs) + b.txPool.Add(txs, true, false) + } + timestamp := uint64(time.Now().Unix()) args := &BuildPayloadArgs{ Parent: b.chain.CurrentBlock().Hash(), Timestamp: timestamp, Random: common.Hash{}, FeeRecipient: recipient, + NoTxPool: noTxPool, } + // payload resolution now interrupts block building, so we have to + // wait for the payloading building process to build its first block payload, err := w.buildPayload(args) if err != nil { t.Fatalf("Failed to build payload %v", err) } verify := func(outer *engine.ExecutionPayloadEnvelope, txs int) { + t.Helper() + if outer == nil { + t.Fatal("ExecutionPayloadEnvelope is nil") + } payload := outer.ExecutionPayload if payload.ParentHash != b.chain.CurrentBlock().Hash() { t.Fatal("Unexpected parent hash") @@ -63,15 +87,27 @@ } if payload.FeeRecipient != recipient { t.Fatal("Unexpected fee recipient") } - if len(payload.Transactions) != txs { - t.Fatal("Unexpected transaction set") + if !interrupt && len(payload.Transactions) != txs { + t.Fatalf("Unexpect transaction set: got %d, expected %d", len(payload.Transactions), txs) + } else if interrupt && len(payload.Transactions) >= txs { + t.Fatalf("Unexpect transaction set: got %d, expected less than %d", len(payload.Transactions), txs) } } - empty := payload.ResolveEmpty() - verify(empty, 0)   - full := payload.ResolveFull() - verify(full, len(pendingTxs)) + if noTxPool { + // we only build the empty block when ignoring the tx pool + empty := payload.ResolveEmpty() + verify(empty, 0) + full := payload.ResolveFull() + verify(full, 0) + } else if interrupt { + full := payload.ResolveFull() + verify(full, len(pendingTxs)+numInterruptTxs) + } else { // tx-pool and no interrupt + payload.WaitFull() + full := payload.ResolveFull() + verify(full, len(pendingTxs)) + }   // Ensure resolve can be called multiple times and the // result should be unchanged @@ -80,6 +116,22 @@ dataTwo := payload.Resolve() if !reflect.DeepEqual(dataOne, dataTwo) { t.Fatal("Unexpected payload data") } +} + +func genTxs(startNonce, count uint64) types.Transactions { + txs := make(types.Transactions, 0, count) + signer := types.LatestSigner(params.TestChainConfig) + for nonce := startNonce; nonce < startNonce+count; nonce++ { + txs = append(txs, types.MustSignNewTx(testBankKey, signer, &types.AccessListTx{ + ChainID: params.TestChainConfig.ChainID, + Nonce: nonce, + To: &testUserAddress, + Value: big.NewInt(1000), + Gas: params.TxGas, + GasPrice: big.NewInt(params.InitialBaseFee), + })) + } + return txs }   func TestPayloadId(t *testing.T) {
diff --git go-ethereum/miner/worker.go op-geth/miner/worker.go index 9a36106231e9828713c628fb33f9cbaf1003c491..a69e5644105f19ff1f2a59ee11fd0da493c27181 100644 --- go-ethereum/miner/worker.go +++ op-geth/miner/worker.go @@ -17,6 +17,7 @@ package miner   import ( + "context" "errors" "fmt" "math/big" @@ -26,6 +27,7 @@ "time"   "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core" @@ -33,6 +35,7 @@ "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -56,7 +59,7 @@ resubmitAdjustChanSize = 10   // minRecommitInterval is the minimal time interval to recreate the sealing block with // any newly arrived transactions. - minRecommitInterval = 1 * time.Second + minRecommitInterval = 100 * time.Millisecond   // maxRecommitInterval is the maximum time interval to recreate the sealing block with // any newly arrived transactions. @@ -78,6 +81,7 @@ var ( errBlockInterruptedByNewHead = errors.New("new head arrived while building block") errBlockInterruptedByRecommit = errors.New("recommit interrupt while building block") errBlockInterruptedByTimeout = errors.New("timeout while building block") + errBlockInterruptedByResolve = errors.New("payload resolution while building block") )   // environment is the worker's current environment and holds all @@ -142,6 +146,7 @@ commitInterruptNone int32 = iota commitInterruptNewHead commitInterruptResubmit commitInterruptTimeout + commitInterruptResolve )   // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. @@ -348,6 +353,9 @@ // pending returns the pending state and corresponding block. The returned // values can be nil in case the pending block is not initialized. func (w *worker) pending() (*types.Block, *state.StateDB) { + if w.chainConfig.Optimism != nil && !w.config.RollupComputePendingBlock { + return nil, nil // when not computing the pending block, there is never a pending state + } w.snapshotMu.RLock() defer w.snapshotMu.RUnlock() if w.snapshotState == nil { @@ -359,6 +367,12 @@ // pendingBlock returns pending block. The returned block can be nil in case the // pending block is not initialized. func (w *worker) pendingBlock() *types.Block { + if w.chainConfig.Optimism != nil && !w.config.RollupComputePendingBlock { + // For compatibility when not computing a pending block, we serve the latest block as "pending" + headHeader := w.eth.BlockChain().CurrentHeader() + headBlock := w.eth.BlockChain().GetBlock(headHeader.Hash(), headHeader.Number.Uint64()) + return headBlock + } w.snapshotMu.RLock() defer w.snapshotMu.RUnlock() return w.snapshotBlock @@ -367,6 +381,9 @@ // pendingBlockAndReceipts returns pending block and corresponding receipts. // The returned values can be nil in case the pending block is not initialized. func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) { + if w.chainConfig.Optimism != nil && !w.config.RollupComputePendingBlock { + return nil, nil // when not computing the pending block, there are no pending receipts, and thus no pending logs + } w.snapshotMu.RLock() defer w.snapshotMu.RUnlock() return w.snapshotBlock, w.snapshotReceipts @@ -421,6 +438,19 @@ // newWorkLoop is a standalone goroutine to submit new sealing work upon received events. func (w *worker) newWorkLoop(recommit time.Duration) { defer w.wg.Done() + if w.chainConfig.Optimism != nil && !w.config.RollupComputePendingBlock { + for { // do not update the pending-block, instead drain work without doing it, to keep producers from blocking. + select { + case <-w.startCh: + case <-w.chainHeadCh: + case <-w.resubmitIntervalCh: + case <-w.resubmitAdjustCh: + case <-w.exitCh: + return + } + } + } + var ( interrupt *atomic.Int32 minRecommit = recommit // minimal resubmit interval specified by user. @@ -538,6 +568,9 @@ case req := <-w.getWorkCh: req.result <- w.generateWork(req.params)   case ev := <-w.txsCh: + if w.chainConfig.Optimism != nil && !w.config.RollupComputePendingBlock { + continue // don't update the pending-block snapshot if we are not computing the pending block + } // Apply transactions to the pending state if we're not sealing // // Note all transactions received may not be continuous with transactions @@ -720,6 +753,15 @@ func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address) (*environment, error) { // Retrieve the parent state to execute on top and start a prefetcher for // the miner to speed block sealing up a bit. state, err := w.chain.StateAt(parent.Root) + if err != nil && w.chainConfig.Optimism != nil { // Allow the miner to reorg its own chain arbitrarily deep + if historicalBackend, ok := w.eth.(BackendWithHistoricalState); ok { + var release tracers.StateReleaseFunc + parentBlock := w.eth.BlockChain().GetBlockByHash(parent.Hash()) + state, release, err = historicalBackend.StateAtBlock(context.Background(), parentBlock, ^uint64(0), nil, false, false) + state = state.Copy() + release() + } + } if err != nil { return nil, err } @@ -933,6 +975,43 @@ random common.Hash // The randomness generated by beacon chain, empty before the merge withdrawals types.Withdrawals // List of withdrawals to include in block. beaconRoot *common.Hash // The beacon root (cancun field). noTxs bool // Flag whether an empty block without any transaction is expected + + txs types.Transactions // Deposit transactions to include at the start of the block + gasLimit *uint64 // Optional gas limit override + interrupt *atomic.Int32 // Optional interruption signal to pass down to worker.generateWork + isUpdate bool // Optional flag indicating that this is building a discardable update +} + +// validateParams validates the given parameters. +// It currently checks that the parent block is known and that the timestamp is valid, +// i.e., after the parent block's timestamp. +// It returns an upper bound of the payload building duration as computed +// by the difference in block timestamps between the parent and genParams. +func (w *worker) validateParams(genParams *generateParams) (time.Duration, error) { + w.mu.RLock() + defer w.mu.RUnlock() + + // Find the parent block for sealing task + parent := w.chain.CurrentBlock() + if genParams.parentHash != (common.Hash{}) { + block := w.chain.GetBlockByHash(genParams.parentHash) + if block == nil { + return 0, fmt.Errorf("missing parent %v", genParams.parentHash) + } + parent = block.Header() + } + + // Sanity check the timestamp correctness + blockTime := int64(genParams.timestamp) - int64(parent.Time) + if blockTime <= 0 && genParams.forceTime { + return 0, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time, genParams.timestamp) + } + + // minimum payload build time of 2s + if blockTime < 2 { + blockTime = 2 + } + return time.Duration(blockTime) * time.Second, nil }   // prepareWork constructs the sealing task according to the given parameters, @@ -969,7 +1048,7 @@ Time: timestamp, Coinbase: genParams.coinbase, } // Set the extra field. - if len(w.extra) != 0 { + if len(w.extra) != 0 && w.chainConfig.Optimism == nil { // Optimism chains must not set any extra data. header.Extra = w.extra } // Set the randomness field from the beacon chain if it's available. @@ -978,12 +1057,18 @@ header.MixDigest = genParams.random } // Set baseFee and GasLimit if we are on an EIP-1559 chain if w.chainConfig.IsLondon(header.Number) { - header.BaseFee = eip1559.CalcBaseFee(w.chainConfig, parent) + header.BaseFee = eip1559.CalcBaseFee(w.chainConfig, parent, header.Time) if !w.chainConfig.IsLondon(parent.Number) { parentGasLimit := parent.GasLimit * w.chainConfig.ElasticityMultiplier() header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil) } } + if genParams.gasLimit != nil { // override gas limit if specified + header.GasLimit = *genParams.gasLimit + } else if w.chain.Config().Optimism != nil && w.config.GasCeil != 0 { + // configure the gas limit of pending blocks with the miner gas limit config when using optimism + header.GasLimit = w.config.GasCeil + } // Apply EIP-4844, EIP-4788. if w.chainConfig.IsCancun(header.Number, header.Time) { var excessBlobGas uint64 @@ -1011,7 +1096,7 @@ log.Error("Failed to create sealing context", "err", err) return nil, err } if header.ParentBeaconRoot != nil { - context := core.NewEVMBlockContext(header, w.chain, nil) + context := core.NewEVMBlockContext(header, w.chain, nil, w.chainConfig, env.state) vmenv := vm.NewEVM(context, vm.TxContext{}, env.state, w.chainConfig, vm.Config{}) core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv, env.state) } @@ -1077,26 +1162,56 @@ return nil }   // generateWork generates a sealing block based on the given parameters. -func (w *worker) generateWork(params *generateParams) *newPayloadResult { - work, err := w.prepareWork(params) +func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { + work, err := w.prepareWork(genParams) if err != nil { return &newPayloadResult{err: err} } defer work.discard() + if work.gasPool == nil { + gasLimit := w.config.EffectiveGasCeil + if gasLimit == 0 || gasLimit > work.header.GasLimit { + gasLimit = work.header.GasLimit + } + work.gasPool = new(core.GasPool).AddGas(gasLimit) + }   - if !params.noTxs { - interrupt := new(atomic.Int32) + misc.EnsureCreate2Deployer(w.chainConfig, work.header.Time, work.state) + + for _, tx := range genParams.txs { + from, _ := types.Sender(work.signer, tx) + work.state.SetTxContext(tx.Hash(), work.tcount) + _, err := w.commitTransaction(work, tx) + if err != nil { + return &newPayloadResult{err: fmt.Errorf("failed to force-include tx: %s type: %d sender: %s nonce: %d, err: %w", tx.Hash(), tx.Type(), from, tx.Nonce(), err)} + } + work.tcount++ + } + + // forced transactions done, fill rest of block with transactions + if !genParams.noTxs { + // use shared interrupt if present + interrupt := genParams.interrupt + if interrupt == nil { + interrupt = new(atomic.Int32) + } timer := time.AfterFunc(w.newpayloadTimeout, func() { interrupt.Store(commitInterruptTimeout) }) - defer timer.Stop()   err := w.fillTransactions(interrupt, work) + timer.Stop() // don't need timeout interruption any more if errors.Is(err, errBlockInterruptedByTimeout) { log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(w.newpayloadTimeout)) + } else if errors.Is(err, errBlockInterruptedByResolve) { + log.Info("Block building got interrupted by payload resolution") } } - block, err := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, nil, work.receipts, params.withdrawals) + if intr := genParams.interrupt; intr != nil && genParams.isUpdate && intr.Load() != commitInterruptNone { + return &newPayloadResult{err: errInterruptedUpdate} + } + + block, err := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, nil, work.receipts, genParams.withdrawals) if err != nil { return &newPayloadResult{err: err} } @@ -1272,6 +1387,8 @@ case commitInterruptResubmit: return errBlockInterruptedByRecommit case commitInterruptTimeout: return errBlockInterruptedByTimeout + case commitInterruptResolve: + return errBlockInterruptedByResolve default: panic(fmt.Errorf("undefined signal %d", signal)) }
diff --git go-ethereum/miner/worker_test.go op-geth/miner/worker_test.go index 9dba12ae51a26b94888a2e3edb874f211c6ff9e9..b5235c21383108dbcff71beb1f53b6b744f75daf 100644 --- go-ethereum/miner/worker_test.go +++ op-geth/miner/worker_test.go @@ -303,7 +303,8 @@ min := float64(3 * time.Second.Nanoseconds()) estimate = estimate*(1-intervalAdjustRatio) + intervalAdjustRatio*(min-intervalAdjustBias) wantMinInterval, wantRecommitInterval = 3*time.Second, time.Duration(estimate)*time.Nanosecond case 3: - wantMinInterval, wantRecommitInterval = time.Second, time.Second + // lower than upstream test, since enforced min recommit interval is lower + wantMinInterval, wantRecommitInterval = 500*time.Millisecond, 500*time.Millisecond }   // Check interval

Transaction queueing and inclusion needs to account for the L1 cost component.

diff --git go-ethereum/core/txpool/blobpool/blobpool.go op-geth/core/txpool/blobpool/blobpool.go index 3ed698c1b18fcc4bfa0d8622208917cdd7c1ad33..15e5b8c2c4e55eaa0539cafb7647d37464e206dc 100644 --- go-ethereum/core/txpool/blobpool/blobpool.go +++ op-geth/core/txpool/blobpool/blobpool.go @@ -401,7 +401,7 @@ for addr := range p.index { p.recheck(addr, nil) } var ( - basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head)) + basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head, p.head.Time+1)) blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice) ) if p.head.ExcessBlobGas != nil { @@ -822,7 +822,7 @@ p.limbo.finalize(p.chain.CurrentFinalBlock()) } // Reset the price heap for the new set of basefee/blobfee pairs var ( - basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), newHead)) + basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), newHead, newHead.Time+1)) blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice)) ) if newHead.ExcessBlobGas != nil {
diff --git go-ethereum/core/txpool/blobpool/blobpool_test.go op-geth/core/txpool/blobpool/blobpool_test.go index f7644c1d0ab61c8faa954c668da30abf20e3d8ee..92e2f0580958d0511cb904e08de26c56aba4b401 100644 --- go-ethereum/core/txpool/blobpool/blobpool_test.go +++ op-geth/core/txpool/blobpool/blobpool_test.go @@ -102,7 +102,7 @@ Number: blockNumber, GasLimit: gasLimit, GasUsed: 0, BaseFee: mid, - }).Cmp(bc.basefee.ToBig()) > 0 { + }, blockTime).Cmp(bc.basefee.ToBig()) > 0 { hi = mid } else { lo = mid
diff --git go-ethereum/core/txpool/legacypool/legacypool.go op-geth/core/txpool/legacypool/legacypool.go index 4e1d26acf4056712a3a4bff2f8bc796fc81f8759..161e3e84018f3a0ffd8e8fc32fb025d7bffb7a37 100644 --- go-ethereum/core/txpool/legacypool/legacypool.go +++ op-geth/core/txpool/legacypool/legacypool.go @@ -126,6 +126,10 @@ NoLocals bool // Whether local transaction handling should be disabled Journal string // Journal of local transactions to survive node restarts Rejournal time.Duration // Time interval to regenerate the local transaction journal   + // JournalRemote controls whether journaling includes remote transactions or not. + // When true, all transactions loaded from the journal are treated as remote. + JournalRemote bool + PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)   @@ -135,6 +139,8 @@ AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts   Lifetime time.Duration // Maximum amount of time non-executable transaction are queued + + EffectiveGasCeil uint64 // if non-zero, a gas ceiling to enforce independent of the header's gaslimit value }   // DefaultConfig contains the default configurations for the transaction pool. @@ -231,6 +237,8 @@ wg sync.WaitGroup // tracks loop, scheduleReorgLoop initDoneCh chan struct{} // is closed once the pool is initialized (for tests)   changesSinceReorg int // A counter for how many drops we've performed in-between reorg. + + l1CostFn txpool.L1CostFunc // To apply L1 costs as rollup, optional field, may be nil. }   type txpoolResetRequest struct { @@ -267,7 +275,7 @@ pool.locals.add(addr) } pool.priced = newPricedList(pool.all)   - if !config.NoLocals && config.Journal != "" { + if (!config.NoLocals || config.JournalRemote) && config.Journal != "" { pool.journal = newTxJournal(config.Journal) } return pool @@ -316,10 +324,14 @@ go pool.scheduleReorgLoop()   // If local transactions and journaling is enabled, load from disk if pool.journal != nil { - if err := pool.journal.load(pool.addLocals); err != nil { + add := pool.addLocals + if pool.config.JournalRemote { + add = pool.addRemotesSync // Use sync version to match pool.AddLocals + } + if err := pool.journal.load(add); err != nil { log.Warn("Failed to load transaction journal", "err", err) } - if err := pool.journal.rotate(pool.local()); err != nil { + if err := pool.journal.rotate(pool.toJournal()); err != nil { log.Warn("Failed to rotate transaction journal", "err", err) } } @@ -389,7 +401,7 @@ // Handle local transaction journal rotation case <-journal.C: if pool.journal != nil { pool.mu.Lock() - if err := pool.journal.rotate(pool.local()); err != nil { + if err := pool.journal.rotate(pool.toJournal()); err != nil { log.Warn("Failed to rotate local tx journal", "err", err) } pool.mu.Unlock() @@ -583,6 +595,23 @@ return pool.locals.flatten() }   +// toJournal retrieves all transactions that should be included in the journal, +// grouped by origin account and sorted by nonce. +// The returned transaction set is a copy and can be freely modified by calling code. +func (pool *LegacyPool) toJournal() map[common.Address]types.Transactions { + if !pool.config.JournalRemote { + return pool.local() + } + txs := make(map[common.Address]types.Transactions) + for addr, pending := range pool.pending { + txs[addr] = append(txs[addr], pending.Flatten()...) + } + for addr, queued := range pool.queue { + txs[addr] = append(txs[addr], queued.Flatten()...) + } + return txs +} + // local retrieves all currently known local transactions, grouped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. @@ -610,8 +639,9 @@ Accept: 0 | 1<<types.LegacyTxType | 1<<types.AccessListTxType | 1<<types.DynamicFeeTxType, - MaxSize: txMaxSize, - MinTip: pool.gasTip.Load().ToBig(), + MaxSize: txMaxSize, + MinTip: pool.gasTip.Load().ToBig(), + EffectiveGasCeil: pool.config.EffectiveGasCeil, } if local { opts.MinTip = new(big.Int) @@ -648,11 +678,18 @@ }, ExistingCost: func(addr common.Address, nonce uint64) *big.Int { if list := pool.pending[addr]; list != nil { if tx := list.txs.Get(nonce); tx != nil { - return tx.Cost() + cost := tx.Cost() + if pool.l1CostFn != nil { + if l1Cost := pool.l1CostFn(tx.RollupCostData()); l1Cost != nil { // add rollup cost + cost = cost.Add(cost, l1Cost) + } + } + return cost } } return nil }, + L1CostFn: pool.l1CostFn, } if err := txpool.ValidateTransactionWithState(tx, pool.signer, opts); err != nil { return err @@ -775,7 +812,7 @@ // Try to replace an existing transaction in the pending pool if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) { // Nonce already pending, check if required price bump is met - inserted, old := list.Add(tx, pool.config.PriceBump) + inserted, old := list.Add(tx, pool.config.PriceBump, pool.l1CostFn) if !inserted { pendingDiscardMeter.Mark(1) return false, txpool.ErrReplaceUnderpriced @@ -849,7 +886,7 @@ from, _ := types.Sender(pool.signer, tx) // already validated if pool.queue[from] == nil { pool.queue[from] = newList(false) } - inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) + inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump, pool.l1CostFn) if !inserted { // An older transaction was better, discard this queuedDiscardMeter.Mark(1) @@ -884,7 +921,7 @@ // journalTx adds the specified transaction to the local disk journal if it is // deemed to have been sent from a local account. func (pool *LegacyPool) journalTx(from common.Address, tx *types.Transaction) { // Only journal if it's enabled and the transaction is local - if pool.journal == nil || !pool.locals.contains(from) { + if pool.journal == nil || (!pool.config.JournalRemote && !pool.locals.contains(from)) { return } if err := pool.journal.insert(tx); err != nil { @@ -903,7 +940,7 @@ pool.pending[addr] = newList(true) } list := pool.pending[addr]   - inserted, old := list.Add(tx, pool.config.PriceBump) + inserted, old := list.Add(tx, pool.config.PriceBump, pool.l1CostFn) if !inserted { // An older transaction was better, discard this pool.all.Remove(hash) @@ -1298,7 +1335,7 @@ if reset != nil { pool.demoteUnexecutables() if reset.newHead != nil { if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { - pendingBaseFee := eip1559.CalcBaseFee(pool.chainconfig, reset.newHead) + pendingBaseFee := eip1559.CalcBaseFee(pool.chainconfig, reset.newHead, reset.newHead.Time+1) pool.priced.SetBaseFee(pendingBaseFee) } else { pool.priced.Reheap() @@ -1430,12 +1467,36 @@ pool.currentHead.Store(newHead) pool.currentState = statedb pool.pendingNonces = newNoncer(statedb)   + if costFn := types.NewL1CostFunc(pool.chainconfig, statedb); costFn != nil { + pool.l1CostFn = func(rollupCostData types.RollupCostData) *big.Int { + return costFn(rollupCostData, newHead.Time) + } + } + // Inject any transactions discarded due to reorgs log.Debug("Reinjecting stale transactions", "count", len(reinject)) core.SenderCacher.Recover(pool.signer, reinject) pool.addTxsLocked(reinject, false) }   +// reduceBalanceByL1Cost returns the given balance, reduced by the L1Cost of the first transaction in list if applicable +// Other txs will get filtered out necessary. +func (pool *LegacyPool) reduceBalanceByL1Cost(list *list, balance *uint256.Int) *uint256.Int { + if !list.Empty() && pool.l1CostFn != nil { + el := list.txs.FirstElement() + if l1Cost := pool.l1CostFn(el.RollupCostData()); l1Cost != nil { + l1Cost256 := uint256.MustFromBig(l1Cost) + if l1Cost256.Cmp(balance) >= 0 { + // Avoid underflow + balance = uint256.NewInt(0) + } else { + balance = new(uint256.Int).Sub(balance, l1Cost256) + } + } + } + return balance +} + // promoteExecutables moves transactions that have become processable from the // future queue to the set of pending transactions. During this process, all // invalidated transactions (low nonce, low balance) are deleted. @@ -1444,7 +1505,7 @@ // Track the promoted transactions to broadcast them at once var promoted []*types.Transaction   // Iterate over all accounts and promote any executable transactions - gasLimit := pool.currentHead.Load().GasLimit + gasLimit := txpool.EffectiveGasLimit(pool.chainconfig, pool.currentHead.Load().GasLimit, pool.config.EffectiveGasCeil) for _, addr := range accounts { list := pool.queue[addr] if list == nil { @@ -1457,8 +1518,10 @@ hash := tx.Hash() pool.all.Remove(hash) } log.Trace("Removed old queued transactions", "count", len(forwards)) + balance := pool.currentState.GetBalance(addr) + balance = pool.reduceBalanceByL1Cost(list, balance) // Drop all transactions that are too costly (low balance or out of gas) - drops, _ := list.Filter(pool.currentState.GetBalance(addr), gasLimit) + drops, _ := list.Filter(balance, gasLimit) for _, tx := range drops { hash := tx.Hash() pool.all.Remove(hash) @@ -1647,7 +1710,7 @@ // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful // to trigger a re-heap is this function func (pool *LegacyPool) demoteUnexecutables() { // Iterate over all accounts and demote any non-executable transactions - gasLimit := pool.currentHead.Load().GasLimit + gasLimit := txpool.EffectiveGasLimit(pool.chainconfig, pool.currentHead.Load().GasLimit, pool.config.EffectiveGasCeil) for addr, list := range pool.pending { nonce := pool.currentState.GetNonce(addr)   @@ -1658,8 +1721,10 @@ hash := tx.Hash() pool.all.Remove(hash) log.Trace("Removed old pending transaction", "hash", hash) } + balance := pool.currentState.GetBalance(addr) + balance = pool.reduceBalanceByL1Cost(list, balance) // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later - drops, invalids := list.Filter(pool.currentState.GetBalance(addr), gasLimit) + drops, invalids := list.Filter(balance, gasLimit) for _, tx := range drops { hash := tx.Hash() log.Trace("Removed unpayable pending transaction", "hash", hash)
diff --git go-ethereum/core/txpool/legacypool/legacypool_test.go op-geth/core/txpool/legacypool/legacypool_test.go index 7ffbf745bb8e954271132df7c2f552e695477be1..427f95096d3dba3c7d64b6ddcd8ea548fbf1d49c 100644 --- go-ethereum/core/txpool/legacypool/legacypool_test.go +++ op-geth/core/txpool/legacypool/legacypool_test.go @@ -2343,10 +2343,13 @@ }   // Tests that local transactions are journaled to disk, but remote transactions // get discarded between restarts. -func TestJournaling(t *testing.T) { testJournaling(t, false) } -func TestJournalingNoLocals(t *testing.T) { testJournaling(t, true) } +func TestJournaling(t *testing.T) { testJournaling(t, false, false) } +func TestJournalingNoLocals(t *testing.T) { testJournaling(t, true, false) } + +func TestJournalingRemotes(t *testing.T) { testJournaling(t, false, true) } +func TestJournalingRemotesNoLocals(t *testing.T) { testJournaling(t, true, true) }   -func testJournaling(t *testing.T, nolocals bool) { +func testJournaling(t *testing.T, nolocals bool, journalRemotes bool) { t.Parallel()   // Create a temporary file for the journal @@ -2367,6 +2370,7 @@ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))   config := testTxPoolConfig config.NoLocals = nolocals + config.JournalRemote = journalRemotes config.Journal = journal config.Rejournal = time.Second   @@ -2415,10 +2419,14 @@ pending, queued = pool.Stats() if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } - if nolocals { + if nolocals && !journalRemotes { if pending != 0 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) } + } else if journalRemotes { + if pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } } else { if pending != 2 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) @@ -2439,10 +2447,16 @@ pool = New(config, blockchain) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())   pending, queued = pool.Stats() - if pending != 0 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + if journalRemotes { + if pending != 1 { // Remove the 2 replaced local transactions, but preserve the remote + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) + } + } else { + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + } } - if nolocals { + if nolocals && !journalRemotes { if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) }
diff --git go-ethereum/core/txpool/legacypool/list.go op-geth/core/txpool/legacypool/list.go index 7db9c98ace638b979e0d2f6f257526d714015f2e..380001cabc96ec9f0ffc8e0142a7014ee0e8412e 100644 --- go-ethereum/core/txpool/legacypool/list.go +++ op-geth/core/txpool/legacypool/list.go @@ -26,6 +26,7 @@ "sync/atomic" "time"   "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/holiman/uint256" "golang.org/x/exp/slices" @@ -265,6 +266,15 @@ cache := m.flatten() return cache[len(cache)-1] }   +// FirstElement returns the first element from the heap (guaranteed to be lowest), thus, the +// transaction with the lowest nonce. Returns nil if there are no elements. +func (m *sortedMap) FirstElement() *types.Transaction { + if m.Len() == 0 { + return nil + } + return m.Get((*m.index)[0]) +} + // list is a "list" of transactions belonging to an account, sorted by account // nonce. The same type can be used both for storing contiguous transactions for // the executable/pending queue; and for storing gapped transactions for the non- @@ -300,7 +310,7 @@ // transaction was accepted, and if yes, any previous transaction it replaced. // // If the new transaction is accepted into the list, the lists' cost and gas // thresholds are also potentially updated. -func (l *list) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Transaction) { +func (l *list) Add(tx *types.Transaction, priceBump uint64, l1CostFn txpool.L1CostFunc) (bool, *types.Transaction) { // If there's an older better transaction, abort old := l.txs.Get(tx.Nonce()) if old != nil { @@ -332,7 +342,11 @@ if overflow { return false, nil } l.totalcost.Add(l.totalcost, cost) - + if l1CostFn != nil { + if l1Cost := l1CostFn(tx.RollupCostData()); l1Cost != nil { // add rollup cost + l.totalcost.Add(l.totalcost, cost) + } + } // Otherwise overwrite the old transaction with the current one l.txs.Put(tx) if l.costcap.Cmp(cost) < 0 {
diff --git go-ethereum/core/txpool/legacypool/list_test.go op-geth/core/txpool/legacypool/list_test.go index 8587c66f7d22a07f5776d7e25f90499eddc239cc..9f341a7f2026a9ef79944f542fbde08d558c21e2 100644 --- go-ethereum/core/txpool/legacypool/list_test.go +++ op-geth/core/txpool/legacypool/list_test.go @@ -40,7 +40,7 @@ } // Insert the transactions in a random order list := newList(true) for _, v := range rand.Perm(len(txs)) { - list.Add(txs[v], DefaultConfig.PriceBump) + list.Add(txs[v], DefaultConfig.PriceBump, nil) } // Verify internal state if len(list.txs.items) != len(txs) { @@ -64,7 +64,7 @@ gasprice, _ := new(big.Int).SetString("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 0) gaslimit := uint64(i) tx, _ := types.SignTx(types.NewTransaction(uint64(i), common.Address{}, value, gaslimit, gasprice, nil), types.HomesteadSigner{}, key) t.Logf("cost: %x bitlen: %d\n", tx.Cost(), tx.Cost().BitLen()) - list.Add(tx, DefaultConfig.PriceBump) + list.Add(tx, DefaultConfig.PriceBump, nil) } }   @@ -82,7 +82,7 @@ b.ResetTimer() for i := 0; i < b.N; i++ { list := newList(true) for _, v := range rand.Perm(len(txs)) { - list.Add(txs[v], DefaultConfig.PriceBump) + list.Add(txs[v], DefaultConfig.PriceBump, nil) list.Filter(priceLimit, DefaultConfig.PriceBump) } } @@ -102,7 +102,7 @@ for i := 0; i < b.N; i++ { list := newList(true) // Insert the transactions in a random order for _, v := range rand.Perm(len(txs)) { - list.Add(txs[v], DefaultConfig.PriceBump) + list.Add(txs[v], DefaultConfig.PriceBump, nil) } b.StartTimer() list.Cap(list.Len() - 1)
diff --git go-ethereum/core/txpool/txpool.go op-geth/core/txpool/txpool.go index be7435247d92749027ccbee4e92f0d44153b4a4b..ff31ffc200643fa203b16a4ce20321061fc7da90 100644 --- go-ethereum/core/txpool/txpool.go +++ op-geth/core/txpool/txpool.go @@ -30,6 +30,8 @@ "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" )   +type L1CostFunc func(dataGas types.RollupCostData) *big.Int + // TxStatus is the current status of a transaction as seen by the pool. type TxStatus uint
diff --git go-ethereum/core/txpool/validation.go op-geth/core/txpool/validation.go index 8913859e846e73c032120dd9110791e6a1954d9d..0cf916228b7e7a131643c7828cd19f8256fb2750 100644 --- go-ethereum/core/txpool/validation.go +++ op-geth/core/txpool/validation.go @@ -30,12 +30,31 @@ "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" )   +// L1 Info Gas Overhead is the amount of gas the the L1 info deposit consumes. +// It is removed from the tx pool max gas to better indicate that L2 transactions +// are not able to consume all of the gas in a L2 block as the L1 info deposit is always present. +const l1InfoGasOverhead = uint64(70_000) + var ( // blobTxMinBlobGasPrice is the big.Int version of the configured protocol // parameter to avoid constucting a new big integer for every transaction. blobTxMinBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice) )   +func EffectiveGasLimit(chainConfig *params.ChainConfig, gasLimit uint64, effectiveLimit uint64) uint64 { + if effectiveLimit != 0 && effectiveLimit < gasLimit { + gasLimit = effectiveLimit + } + if chainConfig.Optimism != nil { + if l1InfoGasOverhead < gasLimit { + gasLimit -= l1InfoGasOverhead + } else { + gasLimit = 0 + } + } + return gasLimit +} + // ValidationOptions define certain differences between transaction validation // across the different pools without having to duplicate those checks. type ValidationOptions struct { @@ -44,6 +63,8 @@ Accept uint8 // Bitmap of transaction types that should be accepted for the calling pool MaxSize uint64 // Maximum size of a transaction that the caller can meaningfully handle MinTip *big.Int // Minimum gas tip needed to allow a transaction into the caller pool + + EffectiveGasCeil uint64 // if non-zero, a gas ceiling to enforce independent of the header's gaslimit value }   // ValidateTransaction is a helper method to check whether a transaction is valid @@ -53,6 +74,15 @@ // // This check is public to allow different transaction pools to check the basic // rules without duplicating code and running the risk of missed updates. func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types.Signer, opts *ValidationOptions) error { + // No unauthenticated deposits allowed in the transaction pool. + // This is for spam protection, not consensus, + // as the external engine-API user authenticates deposits. + if tx.Type() == types.DepositTxType { + return core.ErrTxTypeNotSupported + } + if opts.Config.IsOptimism() && tx.Type() == types.BlobTxType { + return core.ErrTxTypeNotSupported + } // Ensure transactions not implemented by the calling pool are rejected if opts.Accept&(1<<tx.Type()) == 0 { return fmt.Errorf("%w: tx type %v not supported by this pool", core.ErrTxTypeNotSupported, tx.Type()) @@ -82,7 +112,7 @@ if tx.Value().Sign() < 0 { return ErrNegativeValue } // Ensure the transaction doesn't exceed the current block limit gas - if head.GasLimit < tx.Gas() { + if EffectiveGasLimit(opts.Config, head.GasLimit, opts.EffectiveGasCeil) < tx.Gas() { return ErrGasLimit } // Sanity check for extremely large numbers (supported by RLP or RPC) @@ -191,6 +221,9 @@ // ExistingCost is a mandatory callback to retrieve an already pooled // transaction's cost with the given nonce to check for overdrafts. ExistingCost func(addr common.Address, nonce uint64) *big.Int + + // L1CostFn is an optional extension, to validate L1 rollup costs of a tx + L1CostFn L1CostFunc }   // ValidateTransactionWithState is a helper method to check whether a transaction @@ -221,6 +254,11 @@ var ( balance = opts.State.GetBalance(from).ToBig() cost = tx.Cost() ) + if opts.L1CostFn != nil { + if l1Cost := opts.L1CostFn(tx.RollupCostData()); l1Cost != nil { // add rollup cost + cost = cost.Add(cost, l1Cost) + } + } if balance.Cmp(cost) < 0 { return fmt.Errorf("%w: balance %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, cost, new(big.Int).Sub(cost, balance)) }

The rollup functionality is enabled with the optimism field in the chain config. The EIP-1559 parameters are configurable to adjust for faster more frequent and smaller blocks. The parameters can be overriden for testing.

diff --git go-ethereum/core/genesis.go op-geth/core/genesis.go index 54570ac61e4c7d3de57a00ad9e34d22aa02a18a3..934eb6e234d06291af5537530038015bc67a6086 100644 --- go-ethereum/core/genesis.go +++ op-geth/core/genesis.go @@ -24,6 +24,8 @@ "fmt" "math/big" "strings"   + "github.com/ethereum-optimism/superchain-registry/superchain" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" @@ -72,6 +74,11 @@ ParentHash common.Hash `json:"parentHash"` BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559 ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844 BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844 + + // StateHash represents the genesis state, to allow instantiation of a chain with missing initial state. + // Chains with history pruning, or extraordinarily large genesis allocation (e.g. after a regenesis event) + // may utilize this to get started, and then state-sync the latest state, while still verifying the header chain. + StateHash *common.Hash `json:"stateHash,omitempty"` }   func ReadGenesis(db ethdb.Database) (*Genesis, error) { @@ -108,6 +115,10 @@ genesis.Coinbase = genesisHeader.Coinbase genesis.BaseFee = genesisHeader.BaseFee genesis.ExcessBlobGas = genesisHeader.ExcessBlobGas genesis.BlobGasUsed = genesisHeader.BlobGasUsed + if genesis.Alloc == nil { + h := genesisHeader.Hash() + genesis.StateHash = &h + }   return &genesis, nil } @@ -210,6 +221,12 @@ // ChainOverrides contains the changes to chain config. type ChainOverrides struct { OverrideCancun *uint64 OverrideVerkle *uint64 + // optimism + OverrideOptimismCanyon *uint64 + OverrideOptimismEcotone *uint64 + OverrideOptimismFjord *uint64 + ApplySuperchainUpgrades bool + OverrideOptimismInterop *uint64 }   // SetupGenesisBlock writes or updates the genesis block in db. @@ -235,12 +252,50 @@ return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig } applyOverrides := func(config *params.ChainConfig) { if config != nil { + // If applying the superchain-registry to a known OP-Stack chain, + // then override the local chain-config with that from the registry. + if overrides != nil && overrides.ApplySuperchainUpgrades && config.IsOptimism() && config.ChainID != nil && config.ChainID.IsUint64() { + if _, ok := superchain.OPChains[config.ChainID.Uint64()]; ok { + conf, err := params.LoadOPStackChainConfig(config.ChainID.Uint64()) + if err != nil { + log.Warn("failed to load chain config from superchain-registry, skipping override", "err", err, "chain_id", config.ChainID) + } else { + *config = *conf + } + } + } + + if config.IsOptimism() && config.ChainID != nil && config.ChainID.Cmp(big.NewInt(params.OPGoerliChainID)) == 0 { + // Apply Optimism Goerli regolith time + config.RegolithTime = &params.OptimismGoerliRegolithTime + } + if config.IsOptimism() && config.ChainID != nil && config.ChainID.Cmp(big.NewInt(params.BaseGoerliChainID)) == 0 { + // Apply Base Goerli regolith time + config.RegolithTime = &params.BaseGoerliRegolithTime + } if overrides != nil && overrides.OverrideCancun != nil { config.CancunTime = overrides.OverrideCancun } if overrides != nil && overrides.OverrideVerkle != nil { config.VerkleTime = overrides.OverrideVerkle } + if overrides != nil && overrides.OverrideOptimismCanyon != nil { + config.CanyonTime = overrides.OverrideOptimismCanyon + config.ShanghaiTime = overrides.OverrideOptimismCanyon + if config.Optimism != nil && config.Optimism.EIP1559DenominatorCanyon == 0 { + config.Optimism.EIP1559DenominatorCanyon = 250 + } + } + if overrides != nil && overrides.OverrideOptimismEcotone != nil { + config.EcotoneTime = overrides.OverrideOptimismEcotone + config.CancunTime = overrides.OverrideOptimismEcotone + } + if overrides != nil && overrides.OverrideOptimismFjord != nil { + config.FjordTime = overrides.OverrideOptimismFjord + } + if overrides != nil && overrides.OverrideOptimismInterop != nil { + config.InteropTime = overrides.OverrideOptimismInterop + } } } // Just commit the new block if there is no stored genesis block. @@ -263,8 +318,13 @@ // The genesis block is present(perhaps in ancient database) while the // state database is not initialized yet. It can happen that the node // is initialized with an external ancient store. Commit genesis state // in this case. + // If the bedrock block is not 0, that implies that the network was migrated at the bedrock block. + // In this case the genesis state may not be in the state database (e.g. op-geth is performing a snap + // sync without an existing datadir) & even if it were, would not be useful as op-geth is not able to + // execute the pre-bedrock STF. header := rawdb.ReadHeader(db, stored, 0) - if header.Root != types.EmptyRootHash && !triedb.Initialized(header.Root) { + transitionedNetwork := genesis != nil && genesis.Config != nil && genesis.Config.BedrockBlock != nil && genesis.Config.BedrockBlock.Uint64() != 0 + if header.Root != types.EmptyRootHash && !triedb.Initialized(header.Root) && !transitionedNetwork { if genesis == nil { genesis = DefaultGenesisBlock() } @@ -385,8 +445,15 @@ }   // ToBlock returns the genesis block according to genesis specification. func (g *Genesis) ToBlock() *types.Block { - root, err := hashAlloc(&g.Alloc, g.IsVerkle()) - if err != nil { + var root common.Hash + var err error + if g.StateHash != nil { + if len(g.Alloc) > 0 { + panic(fmt.Errorf("cannot both have genesis hash %s "+ + "and non-empty state-allocation", *g.StateHash)) + } + root = *g.StateHash + } else if root, err = hashAlloc(&g.Alloc, g.IsVerkle()); err != nil { panic(err) } head := &types.Header{
diff --git go-ethereum/params/config.go op-geth/params/config.go index 21ede457fd6820869a935c8b195473519073a58d..b3a96a96af5d8aaad615faeac05c5e155dbfe2e2 100644 --- go-ethereum/params/config.go +++ op-geth/params/config.go @@ -32,6 +32,32 @@ SepoliaGenesisHash = common.HexToHash("0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9") GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") )   +const ( + OPMainnetChainID = 10 + OPGoerliChainID = 420 + BaseMainnetChainID = 8453 + BaseGoerliChainID = 84531 + baseSepoliaChainID = 84532 + baseGoerliDevnetChainID = 11763071 + pgnSepoliaChainID = 58008 + devnetChainID = 997 + chaosnetChainID = 888 +) + +// OP Stack chain config +var ( + // March 17, 2023 @ 7:00:00 pm UTC + OptimismGoerliRegolithTime = uint64(1679079600) + // May 4, 2023 @ 5:00:00 pm UTC + BaseGoerliRegolithTime = uint64(1683219600) + // Apr 21, 2023 @ 6:30:00 pm UTC + baseGoerliDevnetRegolithTime = uint64(1682101800) + // March 5, 2023 @ 2:48:00 am UTC + devnetRegolithTime = uint64(1677984480) + // August 16, 2023 @ 3:34:22 am UTC + chaosnetRegolithTime = uint64(1692156862) +) + func newUint64(val uint64) *uint64 { return &val }   var ( @@ -307,6 +333,16 @@ Ethash: new(EthashConfig), Clique: nil, } TestRules = TestChainConfig.Rules(new(big.Int), false, 0) + + // This is an Optimism chain config with bedrock starting a block 5, introduced for historical endpoint testing, largely based on the clique config + OptimismTestConfig = func() *ChainConfig { + conf := *AllCliqueProtocolChanges // copy the config + conf.Clique = nil + conf.TerminalTotalDifficultyPassed = true + conf.BedrockBlock = big.NewInt(5) + conf.Optimism = &OptimismConfig{EIP1559Elasticity: 50, EIP1559Denominator: 10} + return &conf + }() )   // NetworkNames are user friendly names to use in the chain spec banner. @@ -353,6 +389,15 @@ CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun) PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague) VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle)   + BedrockBlock *big.Int `json:"bedrockBlock,omitempty"` // Bedrock switch block (nil = no fork, 0 = already on optimism bedrock) + RegolithTime *uint64 `json:"regolithTime,omitempty"` // Regolith switch time (nil = no fork, 0 = already on optimism regolith) + CanyonTime *uint64 `json:"canyonTime,omitempty"` // Canyon switch time (nil = no fork, 0 = already on optimism canyon) + // Delta: the Delta upgrade does not affect the execution-layer, and is thus not configurable in the chain config. + EcotoneTime *uint64 `json:"ecotoneTime,omitempty"` // Ecotone switch time (nil = no fork, 0 = already on optimism ecotone) + FjordTime *uint64 `json:"fjordTime,omitempty"` // Fjord switch time (nil = no fork, 0 = already on Optimism Fjord) + + InteropTime *uint64 `json:"interopTime,omitempty"` // Interop switch time (nil = no fork, 0 = already on optimism interop) + // TerminalTotalDifficulty is the amount of total difficulty reached by // the network that triggers the consensus upgrade. TerminalTotalDifficulty *big.Int `json:"terminalTotalDifficulty,omitempty"` @@ -365,6 +410,9 @@ // Various consensus engines Ethash *EthashConfig `json:"ethash,omitempty"` Clique *CliqueConfig `json:"clique,omitempty"` + + // Optimism config, nil if not active + Optimism *OptimismConfig `json:"optimism,omitempty"` }   // EthashConfig is the consensus engine configs for proof-of-work based sealing. @@ -386,6 +434,18 @@ func (c *CliqueConfig) String() string { return "clique" }   +// OptimismConfig is the optimism config. +type OptimismConfig struct { + EIP1559Elasticity uint64 `json:"eip1559Elasticity"` + EIP1559Denominator uint64 `json:"eip1559Denominator"` + EIP1559DenominatorCanyon uint64 `json:"eip1559DenominatorCanyon"` +} + +// String implements the stringer interface, returning the optimism fee config details. +func (o *OptimismConfig) String() string { + return "optimism" +} + // Description returns a human-readable description of ChainConfig. func (c *ChainConfig) Description() string { var banner string @@ -397,6 +457,8 @@ network = "unknown" } banner += fmt.Sprintf("Chain ID: %v (%s)\n", c.ChainID, network) switch { + case c.Optimism != nil: + banner += "Consensus: Optimism\n" case c.Ethash != nil: if c.TerminalTotalDifficulty == nil { banner += "Consensus: Ethash (proof-of-work)\n" @@ -475,6 +537,21 @@ } if c.VerkleTime != nil { banner += fmt.Sprintf(" - Verkle: @%-10v\n", *c.VerkleTime) } + if c.RegolithTime != nil { + banner += fmt.Sprintf(" - Regolith: @%-10v\n", *c.RegolithTime) + } + if c.CanyonTime != nil { + banner += fmt.Sprintf(" - Canyon: @%-10v\n", *c.CanyonTime) + } + if c.EcotoneTime != nil { + banner += fmt.Sprintf(" - Ecotone: @%-10v\n", *c.EcotoneTime) + } + if c.FjordTime != nil { + banner += fmt.Sprintf(" - Fjord: @%-10v\n", *c.FjordTime) + } + if c.InteropTime != nil { + banner += fmt.Sprintf(" - Interop: @%-10v\n", *c.InteropTime) + } return banner }   @@ -578,6 +655,62 @@ func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time) }   +// IsBedrock returns whether num is either equal to the Bedrock fork block or greater. +func (c *ChainConfig) IsBedrock(num *big.Int) bool { + return isBlockForked(c.BedrockBlock, num) +} + +func (c *ChainConfig) IsRegolith(time uint64) bool { + return isTimestampForked(c.RegolithTime, time) +} + +func (c *ChainConfig) IsCanyon(time uint64) bool { + return isTimestampForked(c.CanyonTime, time) +} + +func (c *ChainConfig) IsEcotone(time uint64) bool { + return isTimestampForked(c.EcotoneTime, time) +} + +func (c *ChainConfig) IsFjord(time uint64) bool { + return isTimestampForked(c.FjordTime, time) +} + +func (c *ChainConfig) IsInterop(time uint64) bool { + return isTimestampForked(c.InteropTime, time) +} + +// IsOptimism returns whether the node is an optimism node or not. +func (c *ChainConfig) IsOptimism() bool { + return c.Optimism != nil +} + +// IsOptimismBedrock returns true iff this is an optimism node & bedrock is active +func (c *ChainConfig) IsOptimismBedrock(num *big.Int) bool { + return c.IsOptimism() && c.IsBedrock(num) +} + +func (c *ChainConfig) IsOptimismRegolith(time uint64) bool { + return c.IsOptimism() && c.IsRegolith(time) +} + +func (c *ChainConfig) IsOptimismCanyon(time uint64) bool { + return c.IsOptimism() && c.IsCanyon(time) +} + +func (c *ChainConfig) IsOptimismEcotone(time uint64) bool { + return c.IsOptimism() && c.IsEcotone(time) +} + +func (c *ChainConfig) IsOptimismFjord(time uint64) bool { + return c.IsOptimism() && c.IsFjord(time) +} + +// IsOptimismPreBedrock returns true iff this is an optimism node & bedrock is not yet active +func (c *ChainConfig) IsOptimismPreBedrock(num *big.Int) bool { + return c.IsOptimism() && !c.IsBedrock(num) +} + // CheckCompatible checks whether scheduled fork transitions have been imported // with a mismatching chain configuration. func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, time uint64) *ConfigCompatError { @@ -743,12 +876,22 @@ return nil }   // BaseFeeChangeDenominator bounds the amount the base fee can change between blocks. -func (c *ChainConfig) BaseFeeChangeDenominator() uint64 { +// The time parameters is the timestamp of the block to determine if Canyon is active or not +func (c *ChainConfig) BaseFeeChangeDenominator(time uint64) uint64 { + if c.Optimism != nil { + if c.IsCanyon(time) { + return c.Optimism.EIP1559DenominatorCanyon + } + return c.Optimism.EIP1559Denominator + } return DefaultBaseFeeChangeDenominator }   // ElasticityMultiplier bounds the maximum gas limit an EIP-1559 block may have. func (c *ChainConfig) ElasticityMultiplier() uint64 { + if c.Optimism != nil { + return c.Optimism.EIP1559Elasticity + } return DefaultElasticityMultiplier }   @@ -902,6 +1045,8 @@ IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool IsBerlin, IsLondon bool IsMerge, IsShanghai, IsCancun, IsPrague bool IsVerkle bool + IsOptimismBedrock, IsOptimismRegolith bool + IsOptimismCanyon, IsOptimismFjord bool }   // Rules ensures c's ChainID is not nil. @@ -929,5 +1074,10 @@ IsShanghai: isMerge && c.IsShanghai(num, timestamp), IsCancun: isMerge && c.IsCancun(num, timestamp), IsPrague: isMerge && c.IsPrague(num, timestamp), IsVerkle: isMerge && c.IsVerkle(num, timestamp), + // Optimism + IsOptimismBedrock: isMerge && c.IsOptimismBedrock(num), + IsOptimismRegolith: isMerge && c.IsOptimismRegolith(timestamp), + IsOptimismCanyon: isMerge && c.IsOptimismCanyon(timestamp), + IsOptimismFjord: isMerge && c.IsOptimismFjord(timestamp), } }
diff --git go-ethereum/params/protocol_params.go op-geth/params/protocol_params.go index 7eb63e89ac619c28b9f96bdf7617aa0a2d0fe504..5e2a0632afbeeb1ae2341791f076615cea6fe092 100644 --- go-ethereum/params/protocol_params.go +++ op-geth/params/protocol_params.go @@ -22,6 +22,13 @@ "github.com/ethereum/go-ethereum/common" )   +var ( + // The base fee portion of the transaction fee accumulates at this predeploy + OptimismBaseFeeRecipient = common.HexToAddress("0x4200000000000000000000000000000000000019") + // The L1 portion of the transaction fee accumulates at this predeploy + OptimismL1FeeRecipient = common.HexToAddress("0x420000000000000000000000000000000000001A") +) + const ( GasLimitBoundDivisor uint64 = 1024 // The bound divisor of the gas limit, used in update calculations. MinGasLimit uint64 = 5000 // Minimum the gas limit may ever be. @@ -158,6 +165,8 @@ Bls12381PairingBaseGas uint64 = 115000 // Base gas price for BLS12-381 elliptic curve pairing check Bls12381PairingPerPairGas uint64 = 23000 // Per-point pair gas price for BLS12-381 elliptic curve pairing check Bls12381MapG1Gas uint64 = 5500 // Gas price for BLS12-381 mapping field element to G1 operation Bls12381MapG2Gas uint64 = 110000 // Gas price for BLS12-381 mapping field element to G2 operation + + P256VerifyGas uint64 = 3450 // secp256r1 elliptic curve signature verifier gas price   // The Refund Quotient is the cap on how much of the used gas can be refunded. Before EIP-3529, // up to half the consumed gas could be refunded. Redefined as 1/5th in EIP-3529

The optimism Goerli testnet used clique-config data to make geth internals accept blocks. Post-bedrock the beacon-consensus (i.e. follow Engine API) is now used, and the clique config is removed.

diff --git go-ethereum/core/rawdb/accessors_metadata.go op-geth/core/rawdb/accessors_metadata.go index 859566f722f5960b0ce417514d6f20ff35a5cbe9..7dae87a98f2f5383b3b14f0d04012eb64c98a91b 100644 --- go-ethereum/core/rawdb/accessors_metadata.go +++ op-geth/core/rawdb/accessors_metadata.go @@ -64,6 +64,9 @@ if err := json.Unmarshal(data, &config); err != nil { log.Error("Invalid chain config JSON", "hash", hash, "err", err) return nil } + if config.Optimism != nil { + config.Clique = nil // get rid of legacy clique data in chain config (optimism goerli issue) + } return &config }
diff --git go-ethereum/core/gen_genesis.go op-geth/core/gen_genesis.go index b8acf9df7c91cc03c8cfda2fcba226b4f8d365be..3a57ec65e8c35e151f4fe3b792a2d5b7290744a4 100644 --- go-ethereum/core/gen_genesis.go +++ op-geth/core/gen_genesis.go @@ -34,6 +34,7 @@ ParentHash common.Hash `json:"parentHash"` BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"` ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"` BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"` + StateHash *common.Hash `json:"stateHash,omitempty"` } var enc Genesis enc.Config = g.Config @@ -56,27 +57,29 @@ enc.ParentHash = g.ParentHash enc.BaseFee = (*math.HexOrDecimal256)(g.BaseFee) enc.ExcessBlobGas = (*math.HexOrDecimal64)(g.ExcessBlobGas) enc.BlobGasUsed = (*math.HexOrDecimal64)(g.BlobGasUsed) + enc.StateHash = g.StateHash return json.Marshal(&enc) }   // UnmarshalJSON unmarshals from JSON. func (g *Genesis) UnmarshalJSON(input []byte) error { type Genesis struct { - Config *params.ChainConfig `json:"config"` - Nonce *math.HexOrDecimal64 `json:"nonce"` - Timestamp *math.HexOrDecimal64 `json:"timestamp"` - ExtraData *hexutil.Bytes `json:"extraData"` - GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"` - Mixhash *common.Hash `json:"mixHash"` - Coinbase *common.Address `json:"coinbase"` - Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"` - Number *math.HexOrDecimal64 `json:"number"` - GasUsed *math.HexOrDecimal64 `json:"gasUsed"` - ParentHash *common.Hash `json:"parentHash"` - BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"` - ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"` - BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"` + Config *params.ChainConfig `json:"config"` + Nonce *math.HexOrDecimal64 `json:"nonce"` + Timestamp *math.HexOrDecimal64 `json:"timestamp"` + ExtraData *hexutil.Bytes `json:"extraData"` + GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"` + Mixhash *common.Hash `json:"mixHash"` + Coinbase *common.Address `json:"coinbase"` + Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"number"` + GasUsed *math.HexOrDecimal64 `json:"gasUsed"` + ParentHash *common.Hash `json:"parentHash"` + BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"` + ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"` + BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"` + StateHash *common.Hash `json:"stateHash,omitempty"` } var dec Genesis if err := json.Unmarshal(input, &dec); err != nil { @@ -132,6 +135,9 @@ g.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) } if dec.BlobGasUsed != nil { g.BlobGasUsed = (*uint64)(dec.BlobGasUsed) + } + if dec.StateHash != nil { + g.StateHash = dec.StateHash } return nil }

Testing of the superchain configuration

diff --git go-ethereum/core/superchain.go op-geth/core/superchain.go new file mode 100644 index 0000000000000000000000000000000000000000..47b160c51964b97fd30b73e836b6533676c65a3b --- /dev/null +++ op-geth/core/superchain.go @@ -0,0 +1,101 @@ +package core + +import ( + "fmt" + "math/big" + + "github.com/ethereum-optimism/superchain-registry/superchain" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" +) + +func LoadOPStackGenesis(chainID uint64) (*Genesis, error) { + chConfig, ok := superchain.OPChains[chainID] + if !ok { + return nil, fmt.Errorf("unknown chain ID: %d", chainID) + } + + cfg, err := params.LoadOPStackChainConfig(chainID) + if err != nil { + return nil, fmt.Errorf("failed to load params.ChainConfig for chain %d: %w", chainID, err) + } + + gen, err := superchain.LoadGenesis(chainID) + if err != nil { + return nil, fmt.Errorf("failed to load genesis definition for chain %d: %w", chainID, err) + } + + genesis := &Genesis{ + Config: cfg, + Nonce: gen.Nonce, + Timestamp: gen.Timestamp, + ExtraData: gen.ExtraData, + GasLimit: gen.GasLimit, + Difficulty: (*big.Int)(gen.Difficulty), + Mixhash: common.Hash(gen.Mixhash), + Coinbase: common.Address(gen.Coinbase), + Alloc: make(GenesisAlloc), + Number: gen.Number, + GasUsed: gen.GasUsed, + ParentHash: common.Hash(gen.ParentHash), + BaseFee: (*big.Int)(gen.BaseFee), + ExcessBlobGas: gen.ExcessBlobGas, + BlobGasUsed: gen.BlobGasUsed, + } + + for addr, acc := range gen.Alloc { + var code []byte + if acc.CodeHash != ([32]byte{}) { + dat, err := superchain.LoadContractBytecode(acc.CodeHash) + if err != nil { + return nil, fmt.Errorf("failed to load bytecode %s of address %s in chain %d: %w", acc.CodeHash, addr, chainID, err) + } + code = dat + } + var storage map[common.Hash]common.Hash + if len(acc.Storage) > 0 { + storage = make(map[common.Hash]common.Hash) + for k, v := range acc.Storage { + storage[common.Hash(k)] = common.Hash(v) + } + } + bal := common.Big0 + if acc.Balance != nil { + bal = (*big.Int)(acc.Balance) + } + genesis.Alloc[common.Address(addr)] = GenesisAccount{ + Code: code, + Storage: storage, + Balance: bal, + Nonce: acc.Nonce, + } + } + if gen.StateHash != nil { + if len(gen.Alloc) > 0 { + return nil, fmt.Errorf("chain definition unexpectedly contains both allocation (%d) and state-hash %s", len(gen.Alloc), *gen.StateHash) + } + genesis.StateHash = (*common.Hash)(gen.StateHash) + } + + genesisBlock := genesis.ToBlock() + genesisBlockHash := genesisBlock.Hash() + expectedHash := common.Hash([32]byte(chConfig.Genesis.L2.Hash)) + + // Verify we correctly produced the genesis config by recomputing the genesis-block-hash, + // and check the genesis matches the chain genesis definition. + if chConfig.Genesis.L2.Number != genesisBlock.NumberU64() { + switch chainID { + case params.OPMainnetChainID: + expectedHash = common.HexToHash("0x7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b") + case params.OPGoerliChainID: + expectedHash = common.HexToHash("0xc1fc15cd51159b1f1e5cbc4b82e85c1447ddfa33c52cf1d98d14fba0d6354be1") + default: + return nil, fmt.Errorf("unknown stateless genesis definition for chain %d", chainID) + } + } + if expectedHash != genesisBlockHash { + return nil, fmt.Errorf("chainID=%d: produced genesis with hash %s but expected %s", chainID, genesisBlockHash, expectedHash) + } + return genesis, nil +}
diff --git go-ethereum/params/superchain.go op-geth/params/superchain.go new file mode 100644 index 0000000000000000000000000000000000000000..bcce62832f9e25c8820fe65b06435b517d1e2bcb --- /dev/null +++ op-geth/params/superchain.go @@ -0,0 +1,278 @@ +package params + +import ( + "encoding/binary" + "fmt" + "math/big" + "sort" + "strings" + + "github.com/ethereum-optimism/superchain-registry/superchain" + "github.com/ethereum/go-ethereum/common" +) + +var OPStackSupport = ProtocolVersionV0{Build: [8]byte{}, Major: 7, Minor: 0, Patch: 0, PreRelease: 0}.Encode() + +func init() { + for id, ch := range superchain.OPChains { + NetworkNames[fmt.Sprintf("%d", id)] = ch.Name + } +} + +func OPStackChainIDByName(name string) (uint64, error) { + for id, ch := range superchain.OPChains { + if ch.Chain+"-"+ch.Superchain == name { + return id, nil + } + } + return 0, fmt.Errorf("unknown chain %q", name) +} + +func OPStackChainNames() (out []string) { + for _, ch := range superchain.OPChains { + out = append(out, ch.Chain+"-"+ch.Superchain) + } + sort.Strings(out) + return +} + +func LoadOPStackChainConfig(chainID uint64) (*ChainConfig, error) { + chConfig, ok := superchain.OPChains[chainID] + if !ok { + return nil, fmt.Errorf("unknown chain ID: %d", chainID) + } + + genesisActivation := uint64(0) + out := &ChainConfig{ + ChainID: new(big.Int).SetUint64(chainID), + HomesteadBlock: common.Big0, + DAOForkBlock: nil, + DAOForkSupport: false, + EIP150Block: common.Big0, + EIP155Block: common.Big0, + EIP158Block: common.Big0, + ByzantiumBlock: common.Big0, + ConstantinopleBlock: common.Big0, + PetersburgBlock: common.Big0, + IstanbulBlock: common.Big0, + MuirGlacierBlock: common.Big0, + BerlinBlock: common.Big0, + LondonBlock: common.Big0, + ArrowGlacierBlock: common.Big0, + GrayGlacierBlock: common.Big0, + MergeNetsplitBlock: common.Big0, + ShanghaiTime: chConfig.CanyonTime, // Shanghai activates with Canyon + CancunTime: chConfig.EcotoneTime, // Cancun activates with Ecotone + PragueTime: nil, + BedrockBlock: common.Big0, + RegolithTime: &genesisActivation, + CanyonTime: chConfig.CanyonTime, + EcotoneTime: chConfig.EcotoneTime, + FjordTime: chConfig.FjordTime, + TerminalTotalDifficulty: common.Big0, + TerminalTotalDifficultyPassed: true, + Ethash: nil, + Clique: nil, + Optimism: &OptimismConfig{ + EIP1559Elasticity: 6, + EIP1559Denominator: 50, + EIP1559DenominatorCanyon: 250, + }, + } + + // special overrides for OP-Stack chains with pre-Regolith upgrade history + switch chainID { + case OPGoerliChainID: + out.LondonBlock = big.NewInt(4061224) + out.ArrowGlacierBlock = big.NewInt(4061224) + out.GrayGlacierBlock = big.NewInt(4061224) + out.MergeNetsplitBlock = big.NewInt(4061224) + out.BedrockBlock = big.NewInt(4061224) + out.RegolithTime = &OptimismGoerliRegolithTime + out.Optimism.EIP1559Elasticity = 10 + case OPMainnetChainID: + out.BerlinBlock = big.NewInt(3950000) + out.LondonBlock = big.NewInt(105235063) + out.ArrowGlacierBlock = big.NewInt(105235063) + out.GrayGlacierBlock = big.NewInt(105235063) + out.MergeNetsplitBlock = big.NewInt(105235063) + out.BedrockBlock = big.NewInt(105235063) + case BaseGoerliChainID: + out.RegolithTime = &BaseGoerliRegolithTime + out.Optimism.EIP1559Elasticity = 10 + case baseSepoliaChainID: + out.Optimism.EIP1559Elasticity = 10 + case baseGoerliDevnetChainID: + out.RegolithTime = &baseGoerliDevnetRegolithTime + case pgnSepoliaChainID: + out.Optimism.EIP1559Elasticity = 2 + out.Optimism.EIP1559Denominator = 8 + case devnetChainID: + out.RegolithTime = &devnetRegolithTime + out.Optimism.EIP1559Elasticity = 10 + case chaosnetChainID: + out.RegolithTime = &chaosnetRegolithTime + out.Optimism.EIP1559Elasticity = 10 + } + + return out, nil +} + +// ProtocolVersion encodes the OP-Stack protocol version. See OP-Stack superchain-upgrade specification. +type ProtocolVersion [32]byte + +func (p ProtocolVersion) MarshalText() ([]byte, error) { + return common.Hash(p).MarshalText() +} + +func (p *ProtocolVersion) UnmarshalText(input []byte) error { + return (*common.Hash)(p).UnmarshalText(input) +} + +func (p ProtocolVersion) Parse() (versionType uint8, build [8]byte, major, minor, patch, preRelease uint32) { + versionType = p[0] + if versionType != 0 { + return + } + // bytes 1:8 reserved for future use + copy(build[:], p[8:16]) // differentiates forks and custom-builds of standard protocol + major = binary.BigEndian.Uint32(p[16:20]) // incompatible API changes + minor = binary.BigEndian.Uint32(p[20:24]) // identifies additional functionality in backwards compatible manner + patch = binary.BigEndian.Uint32(p[24:28]) // identifies backward-compatible bug-fixes + preRelease = binary.BigEndian.Uint32(p[28:32]) // identifies unstable versions that may not satisfy the above + return +} + +func (p ProtocolVersion) String() string { + versionType, build, major, minor, patch, preRelease := p.Parse() + if versionType != 0 { + return "v0.0.0-unknown." + common.Hash(p).String() + } + ver := fmt.Sprintf("v%d.%d.%d", major, minor, patch) + if preRelease != 0 { + ver += fmt.Sprintf("-%d", preRelease) + } + if build != ([8]byte{}) { + if humanBuildTag(build) { + ver += fmt.Sprintf("+%s", strings.TrimRight(string(build[:]), "\x00")) + } else { + ver += fmt.Sprintf("+0x%x", build) + } + } + return ver +} + +// humanBuildTag identifies which build tag we can stringify for human-readable versions +func humanBuildTag(v [8]byte) bool { + for i, c := range v { // following semver.org advertised regex, alphanumeric with '-' and '.', except leading '.'. + if c == 0 { // trailing zeroed are allowed + for _, d := range v[i:] { + if d != 0 { + return false + } + } + return true + } + if !((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || (c == '.' && i > 0)) { + return false + } + } + return true +} + +// ProtocolVersionComparison is used to identify how far ahead/outdated a protocol version is relative to another. +// This value is used in metrics and switch comparisons, to easily identify each type of version difference. +// Negative values mean the version is outdated. +// Positive values mean the version is up-to-date. +// Matching versions have a 0. +type ProtocolVersionComparison int + +const ( + AheadMajor ProtocolVersionComparison = 4 + OutdatedMajor ProtocolVersionComparison = -4 + AheadMinor ProtocolVersionComparison = 3 + OutdatedMinor ProtocolVersionComparison = -3 + AheadPatch ProtocolVersionComparison = 2 + OutdatedPatch ProtocolVersionComparison = -2 + AheadPrerelease ProtocolVersionComparison = 1 + OutdatedPrerelease ProtocolVersionComparison = -1 + Matching ProtocolVersionComparison = 0 + DiffVersionType ProtocolVersionComparison = 100 + DiffBuild ProtocolVersionComparison = 101 + EmptyVersion ProtocolVersionComparison = 102 + InvalidVersion ProtocolVersionComparison = 103 +) + +func (p ProtocolVersion) Compare(other ProtocolVersion) (cmp ProtocolVersionComparison) { + if p == (ProtocolVersion{}) || (other == (ProtocolVersion{})) { + return EmptyVersion + } + aVersionType, aBuild, aMajor, aMinor, aPatch, aPreRelease := p.Parse() + bVersionType, bBuild, bMajor, bMinor, bPatch, bPreRelease := other.Parse() + if aVersionType != bVersionType { + return DiffVersionType + } + if aBuild != bBuild { + return DiffBuild + } + // max values are reserved, consider versions with these values invalid + if aMajor == ^uint32(0) || aMinor == ^uint32(0) || aPatch == ^uint32(0) || aPreRelease == ^uint32(0) || + bMajor == ^uint32(0) || bMinor == ^uint32(0) || bPatch == ^uint32(0) || bPreRelease == ^uint32(0) { + return InvalidVersion + } + fn := func(a, b uint32, ahead, outdated ProtocolVersionComparison) ProtocolVersionComparison { + if a == b { + return Matching + } + if a > b { + return ahead + } + return outdated + } + if aPreRelease != 0 { // if A is a pre-release, then decrement the version before comparison + if aPatch != 0 { + aPatch -= 1 + } else if aMinor != 0 { + aMinor -= 1 + aPatch = ^uint32(0) // max value + } else if aMajor != 0 { + aMajor -= 1 + aMinor = ^uint32(0) // max value + } + } + if bPreRelease != 0 { // if B is a pre-release, then decrement the version before comparison + if bPatch != 0 { + bPatch -= 1 + } else if bMinor != 0 { + bMinor -= 1 + bPatch = ^uint32(0) // max value + } else if bMajor != 0 { + bMajor -= 1 + bMinor = ^uint32(0) // max value + } + } + if c := fn(aMajor, bMajor, AheadMajor, OutdatedMajor); c != Matching { + return c + } + if c := fn(aMinor, bMinor, AheadMinor, OutdatedMinor); c != Matching { + return c + } + if c := fn(aPatch, bPatch, AheadPatch, OutdatedPatch); c != Matching { + return c + } + return fn(aPreRelease, bPreRelease, AheadPrerelease, OutdatedPrerelease) +} + +type ProtocolVersionV0 struct { + Build [8]byte + Major, Minor, Patch, PreRelease uint32 +} + +func (v ProtocolVersionV0) Encode() (out ProtocolVersion) { + copy(out[8:16], v.Build[:]) + binary.BigEndian.PutUint32(out[16:20], v.Major) + binary.BigEndian.PutUint32(out[20:24], v.Minor) + binary.BigEndian.PutUint32(out[24:28], v.Patch) + binary.BigEndian.PutUint32(out[28:32], v.PreRelease) + return +}

Changes to the node configuration and services.

Flag changes: - Transactions can be forwarded to an RPC for sequencing. - Historical calls can be forwarded to a legacy node. - The tx pool propagation can be enabled/disabled. - The Optimism bedrock fork activation can be changed for testing.

diff --git go-ethereum/cmd/geth/config.go op-geth/cmd/geth/config.go index 5f52f1df54423289b352083d1fcd672296d79571..6a4dbc2c2e6b0b77910d17a6c453c64fd454a02f 100644 --- go-ethereum/cmd/geth/config.go +++ op-geth/cmd/geth/config.go @@ -173,10 +173,32 @@ if ctx.IsSet(utils.OverrideCancun.Name) { v := ctx.Uint64(utils.OverrideCancun.Name) cfg.Eth.OverrideCancun = &v } + + if ctx.IsSet(utils.OverrideOptimismCanyon.Name) { + v := ctx.Uint64(utils.OverrideOptimismCanyon.Name) + cfg.Eth.OverrideOptimismCanyon = &v + } + + if ctx.IsSet(utils.OverrideOptimismEcotone.Name) { + v := ctx.Uint64(utils.OverrideOptimismEcotone.Name) + cfg.Eth.OverrideOptimismEcotone = &v + } + + if ctx.IsSet(utils.OverrideOptimismFjord.Name) { + v := ctx.Uint64(utils.OverrideOptimismFjord.Name) + cfg.Eth.OverrideOptimismFjord = &v + } + + if ctx.IsSet(utils.OverrideOptimismInterop.Name) { + v := ctx.Uint64(utils.OverrideOptimismInterop.Name) + cfg.Eth.OverrideOptimismInterop = &v + } + if ctx.IsSet(utils.OverrideVerkle.Name) { v := ctx.Uint64(utils.OverrideVerkle.Name) cfg.Eth.OverrideVerkle = &v } + backend, eth := utils.RegisterEthService(stack, &cfg.Eth)   // Create gauge with geth system and build information
diff --git go-ethereum/cmd/geth/main.go op-geth/cmd/geth/main.go index 2f7d37fdd7e7b7a8d69f2a7be749bdd9d34a21bb..d987364ca16887d5cea85d88a3e07e3080d82390 100644 --- go-ethereum/cmd/geth/main.go +++ op-geth/cmd/geth/main.go @@ -67,10 +67,15 @@ utils.USBFlag, utils.SmartCardDaemonPathFlag, utils.OverrideCancun, utils.OverrideVerkle, + utils.OverrideOptimismCanyon, + utils.OverrideOptimismEcotone, + utils.OverrideOptimismFjord, + utils.OverrideOptimismInterop, utils.EnablePersonal, utils.TxPoolLocalsFlag, utils.TxPoolNoLocalsFlag, utils.TxPoolJournalFlag, + utils.TxPoolJournalRemotesFlag, utils.TxPoolRejournalFlag, utils.TxPoolPriceLimitFlag, utils.TxPoolPriceBumpFlag, @@ -118,6 +123,7 @@ utils.MaxPeersFlag, utils.MaxPendingPeersFlag, utils.MiningEnabledFlag, utils.MinerGasLimitFlag, + utils.MinerEffectiveGasLimitFlag, utils.MinerGasPriceFlag, utils.MinerEtherbaseFlag, utils.MinerExtraDataFlag, @@ -143,6 +149,14 @@ utils.GpoBlocksFlag, utils.GpoPercentileFlag, utils.GpoMaxGasPriceFlag, utils.GpoIgnoreGasPriceFlag, + utils.GpoMinSuggestedPriorityFeeFlag, + utils.RollupSequencerHTTPFlag, + utils.RollupHistoricalRPCFlag, + utils.RollupHistoricalRPCTimeoutFlag, + utils.RollupDisableTxPoolGossipFlag, + utils.RollupComputePendingBlock, + utils.RollupHaltOnIncompatibleProtocolVersionFlag, + utils.RollupSuperchainUpgradesFlag, configFileFlag, utils.LogDebugFlag, utils.LogBacktraceAtFlag, @@ -305,6 +319,9 @@ 5. Networking is disabled; there is no listen-address, the maximum number of peers is set to 0, and discovery is disabled. `)   + case ctx.IsSet(utils.OPNetworkFlag.Name): + log.Info("Starting geth on an OP network...", "network", ctx.String(utils.OPNetworkFlag.Name)) + case !ctx.IsSet(utils.NetworkIdFlag.Name): log.Info("Starting Geth on Ethereum mainnet...") } @@ -316,7 +333,14 @@ !ctx.IsSet(utils.SepoliaFlag.Name) && !ctx.IsSet(utils.GoerliFlag.Name) && !ctx.IsSet(utils.DeveloperFlag.Name) { // Nope, we're really on mainnet. Bump that cache up! + // Note: If we don't set the OPNetworkFlag and have already initialized the database, we may hit this case. log.Info("Bumping default cache on mainnet", "provided", ctx.Int(utils.CacheFlag.Name), "updated", 4096) + ctx.Set(utils.CacheFlag.Name, strconv.Itoa(4096)) + } + } else if ctx.String(utils.SyncModeFlag.Name) != "light" && !ctx.IsSet(utils.CacheFlag.Name) && ctx.IsSet(utils.OPNetworkFlag.Name) { + // We haven't set the cache, but may used the OP network flag we may be on an OP stack mainnet. + if strings.Contains(ctx.String(utils.OPNetworkFlag.Name), "mainnet") { + log.Info("Bumping default cache on mainnet", "provided", ctx.Int(utils.CacheFlag.Name), "updated", 4096, "network", ctx.String(utils.OPNetworkFlag.Name)) ctx.Set(utils.CacheFlag.Name, strconv.Itoa(4096)) } }
diff --git go-ethereum/cmd/utils/flags.go op-geth/cmd/utils/flags.go index b813e52970d82a99205240866c8d291044b18161..f4ecd35287b786583f4330f146ac6ff5c9fd50de 100644 --- go-ethereum/cmd/utils/flags.go +++ op-geth/cmd/utils/flags.go @@ -155,6 +155,15 @@ Name: "holesky", Usage: "Holesky network: pre-configured proof-of-stake test network", Category: flags.EthCategory, } + + OPNetworkFlag = &cli.StringFlag{ + Name: "op-network", + Aliases: []string{"beta.op-network"}, + Usage: "Select a pre-configured OP-Stack network (warning: op-mainnet and op-goerli require special sync," + + " datadir is recommended), options: " + strings.Join(params.OPStackChainNames(), ", "), + Category: flags.EthCategory, + } + // Dev mode DeveloperFlag = &cli.BoolFlag{ Name: "dev", @@ -252,6 +261,26 @@ Name: "override.verkle", Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting", Category: flags.EthCategory, } + OverrideOptimismCanyon = &cli.Uint64Flag{ + Name: "override.canyon", + Usage: "Manually specify the Optimism Canyon fork timestamp, overriding the bundled setting", + Category: flags.EthCategory, + } + OverrideOptimismEcotone = &cli.Uint64Flag{ + Name: "override.ecotone", + Usage: "Manually specify the Optimism Ecotone fork timestamp, overriding the bundled setting", + Category: flags.EthCategory, + } + OverrideOptimismFjord = &cli.Uint64Flag{ + Name: "override.fjord", + Usage: "Manually specify the Optimism Fjord fork timestamp, overriding the bundled setting", + Category: flags.EthCategory, + } + OverrideOptimismInterop = &cli.Uint64Flag{ + Name: "override.interop", + Usage: "Manually specify the Optimsim Interop feature-set fork timestamp, overriding the bundled setting", + Category: flags.EthCategory, + } SyncModeFlag = &flags.TextMarshalerFlag{ Name: "syncmode", Usage: `Blockchain sync mode ("snap" or "full")`, @@ -296,6 +325,11 @@ TxPoolJournalFlag = &cli.StringFlag{ Name: "txpool.journal", Usage: "Disk journal for local transaction to survive node restarts", Value: ethconfig.Defaults.TxPool.Journal, + Category: flags.TxPoolCategory, + } + TxPoolJournalRemotesFlag = &cli.BoolFlag{ + Name: "txpool.journalremotes", + Usage: "Includes remote transactions in the journal", Category: flags.TxPoolCategory, } TxPoolRejournalFlag = &cli.DurationFlag{ @@ -434,6 +468,12 @@ MinerGasLimitFlag = &cli.Uint64Flag{ Name: "miner.gaslimit", Usage: "Target gas ceiling for mined blocks", Value: ethconfig.Defaults.Miner.GasCeil, + Category: flags.MinerCategory, + } + MinerEffectiveGasLimitFlag = &cli.Uint64Flag{ + Name: "miner.effectivegaslimit", + Usage: "If non-zero, an effective gas limit to apply in addition to the block header gaslimit.", + Value: 0, Category: flags.MinerCategory, } MinerGasPriceFlag = &flags.BigFlag{ @@ -749,13 +789,14 @@ Name: "discovery.v4", Aliases: []string{"discv4"}, Usage: "Enables the V4 discovery mechanism", Category: flags.NetworkingCategory, - Value: true, + Value: false, } DiscoveryV5Flag = &cli.BoolFlag{ Name: "discovery.v5", Aliases: []string{"discv5"}, Usage: "Enables the experimental RLPx V5 (Topic Discovery) mechanism", Category: flags.NetworkingCategory, + Value: true, } NetrestrictFlag = &cli.StringFlag{ Name: "netrestrict", @@ -813,6 +854,60 @@ Usage: "Gas price below which gpo will ignore transactions", Value: ethconfig.Defaults.GPO.IgnorePrice.Int64(), Category: flags.GasPriceCategory, } + GpoMinSuggestedPriorityFeeFlag = &cli.Int64Flag{ + Name: "gpo.minsuggestedpriorityfee", + Usage: "Minimum transaction priority fee to suggest. Used on OP chains when blocks are not full.", + Value: ethconfig.Defaults.GPO.MinSuggestedPriorityFee.Int64(), + Category: flags.GasPriceCategory, + } + + // Rollup Flags + RollupSequencerHTTPFlag = &cli.StringFlag{ + Name: "rollup.sequencerhttp", + Usage: "HTTP endpoint for the sequencer mempool", + Category: flags.RollupCategory, + } + + RollupHistoricalRPCFlag = &cli.StringFlag{ + Name: "rollup.historicalrpc", + Usage: "RPC endpoint for historical data.", + Category: flags.RollupCategory, + } + + RollupHistoricalRPCTimeoutFlag = &cli.StringFlag{ + Name: "rollup.historicalrpctimeout", + Usage: "Timeout for historical RPC requests.", + Value: "5s", + Category: flags.RollupCategory, + } + + RollupDisableTxPoolGossipFlag = &cli.BoolFlag{ + Name: "rollup.disabletxpoolgossip", + Usage: "Disable transaction pool gossip.", + Category: flags.RollupCategory, + } + RollupEnableTxPoolAdmissionFlag = &cli.BoolFlag{ + Name: "rollup.enabletxpooladmission", + Usage: "Add RPC-submitted transactions to the txpool (on by default if --rollup.sequencerhttp is not set).", + Category: flags.RollupCategory, + } + RollupComputePendingBlock = &cli.BoolFlag{ + Name: "rollup.computependingblock", + Usage: "By default the pending block equals the latest block to save resources and not leak txs from the tx-pool, this flag enables computing of the pending block from the tx-pool instead.", + Category: flags.RollupCategory, + } + RollupHaltOnIncompatibleProtocolVersionFlag = &cli.StringFlag{ + Name: "rollup.halt", + Usage: "Opt-in option to halt on incompatible protocol version requirements of the given level (major/minor/patch/none), as signaled through the Engine API by the rollup node", + Category: flags.RollupCategory, + } + RollupSuperchainUpgradesFlag = &cli.BoolFlag{ + Name: "rollup.superchain-upgrades", + Aliases: []string{"beta.rollup.superchain-upgrades"}, + Usage: "Apply superchain-registry config changes to the local chain-configuration", + Category: flags.RollupCategory, + Value: true, + }   // Metrics flags MetricsEnabledFlag = &cli.BoolFlag{ @@ -918,7 +1013,7 @@ SepoliaFlag, HoleskyFlag, } // NetworkFlags is the flag group of all built-in supported networks. - NetworkFlags = append([]cli.Flag{MainnetFlag}, TestnetFlags...) + NetworkFlags = append([]cli.Flag{MainnetFlag, OPNetworkFlag}, TestnetFlags...)   // DatabaseFlags is the flag group of all database flags. DatabaseFlags = []cli.Flag{ @@ -945,6 +1040,9 @@ } if ctx.Bool(HoleskyFlag.Name) { return filepath.Join(path, "holesky") } + if ctx.IsSet(OPNetworkFlag.Name) { + return filepath.Join(path, ctx.String(OPNetworkFlag.Name)) + } return path } Fatalf("Cannot determine default data directory, please set manually (--datadir)") @@ -1036,6 +1134,13 @@ case ctx.IsSet(BootnodesFlag.Name): urls = SplitAndTrim(ctx.String(BootnodesFlag.Name)) case cfg.BootstrapNodesV5 != nil: return // already set, don't apply defaults. + case ctx.IsSet(OPNetworkFlag.Name): + network := ctx.String(OPNetworkFlag.Name) + if strings.Contains(strings.ToLower(network), "mainnet") { + urls = params.V5OPBootnodes + } else { + urls = params.V5OPTestnetBootnodes + } }   cfg.BootstrapNodesV5 = make([]*enode.Node, 0, len(urls)) @@ -1434,6 +1539,8 @@ case ctx.Bool(SepoliaFlag.Name) && cfg.DataDir == node.DefaultDataDir(): cfg.DataDir = filepath.Join(node.DefaultDataDir(), "sepolia") case ctx.Bool(HoleskyFlag.Name) && cfg.DataDir == node.DefaultDataDir(): cfg.DataDir = filepath.Join(node.DefaultDataDir(), "holesky") + case ctx.IsSet(OPNetworkFlag.Name) && cfg.DataDir == node.DefaultDataDir(): + cfg.DataDir = filepath.Join(node.DefaultDataDir(), ctx.String(OPNetworkFlag.Name)) } }   @@ -1450,6 +1557,9 @@ } if ctx.IsSet(GpoIgnoreGasPriceFlag.Name) { cfg.IgnorePrice = big.NewInt(ctx.Int64(GpoIgnoreGasPriceFlag.Name)) } + if ctx.IsSet(GpoMinSuggestedPriorityFeeFlag.Name) { + cfg.MinSuggestedPriorityFee = big.NewInt(ctx.Int64(GpoMinSuggestedPriorityFeeFlag.Name)) + } }   func setTxPool(ctx *cli.Context, cfg *legacypool.Config) { @@ -1469,6 +1579,9 @@ } if ctx.IsSet(TxPoolJournalFlag.Name) { cfg.Journal = ctx.String(TxPoolJournalFlag.Name) } + if ctx.IsSet(TxPoolJournalRemotesFlag.Name) { + cfg.JournalRemote = ctx.Bool(TxPoolJournalRemotesFlag.Name) + } if ctx.IsSet(TxPoolRejournalFlag.Name) { cfg.Rejournal = ctx.Duration(TxPoolRejournalFlag.Name) } @@ -1493,6 +1606,11 @@ } if ctx.IsSet(TxPoolLifetimeFlag.Name) { cfg.Lifetime = ctx.Duration(TxPoolLifetimeFlag.Name) } + if ctx.IsSet(MinerEffectiveGasLimitFlag.Name) { + // While technically this is a miner config parameter, we also want the txpool to enforce + // it to avoid accepting transactions that can never be included in a block. + cfg.EffectiveGasCeil = ctx.Uint64(MinerEffectiveGasLimitFlag.Name) + } }   func setMiner(ctx *cli.Context, cfg *miner.Config) { @@ -1502,6 +1620,9 @@ } if ctx.IsSet(MinerGasLimitFlag.Name) { cfg.GasCeil = ctx.Uint64(MinerGasLimitFlag.Name) } + if ctx.IsSet(MinerEffectiveGasLimitFlag.Name) { + cfg.EffectiveGasCeil = ctx.Uint64(MinerEffectiveGasLimitFlag.Name) + } if ctx.IsSet(MinerGasPriceFlag.Name) { cfg.GasPrice = flags.GlobalBig(ctx, MinerGasPriceFlag.Name) } @@ -1510,6 +1631,9 @@ cfg.Recommit = ctx.Duration(MinerRecommitIntervalFlag.Name) } if ctx.IsSet(MinerNewPayloadTimeout.Name) { cfg.NewPayloadTimeout = ctx.Duration(MinerNewPayloadTimeout.Name) + } + if ctx.IsSet(RollupComputePendingBlock.Name) { + cfg.RollupComputePendingBlock = ctx.Bool(RollupComputePendingBlock.Name) } }   @@ -1585,7 +1709,7 @@ // SetEthConfig applies eth-related command line flags to the config. func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { // Avoid conflicting network flags - CheckExclusive(ctx, MainnetFlag, DeveloperFlag, GoerliFlag, SepoliaFlag, HoleskyFlag) + CheckExclusive(ctx, MainnetFlag, DeveloperFlag, GoerliFlag, SepoliaFlag, HoleskyFlag, OPNetworkFlag) CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer   // Set configurations from CLI flags @@ -1728,6 +1852,20 @@ } else { cfg.EthDiscoveryURLs = SplitAndTrim(urls) } } + // Only configure sequencer http flag if we're running in verifier mode i.e. --mine is disabled. + if ctx.IsSet(RollupSequencerHTTPFlag.Name) && !ctx.IsSet(MiningEnabledFlag.Name) { + cfg.RollupSequencerHTTP = ctx.String(RollupSequencerHTTPFlag.Name) + } + if ctx.IsSet(RollupHistoricalRPCFlag.Name) { + cfg.RollupHistoricalRPC = ctx.String(RollupHistoricalRPCFlag.Name) + } + if ctx.IsSet(RollupHistoricalRPCTimeoutFlag.Name) { + cfg.RollupHistoricalRPCTimeout = ctx.Duration(RollupHistoricalRPCTimeoutFlag.Name) + } + cfg.RollupDisableTxPoolGossip = ctx.Bool(RollupDisableTxPoolGossipFlag.Name) + cfg.RollupDisableTxPoolAdmission = cfg.RollupSequencerHTTP != "" && !ctx.Bool(RollupEnableTxPoolAdmissionFlag.Name) + cfg.RollupHaltOnIncompatibleProtocolVersion = ctx.String(RollupHaltOnIncompatibleProtocolVersionFlag.Name) + cfg.ApplySuperchainUpgrades = ctx.Bool(RollupSuperchainUpgradesFlag.Name) // Override any default configs for hard coded networks. switch { case ctx.Bool(MainnetFlag.Name): @@ -1829,6 +1967,12 @@ } if !ctx.IsSet(MinerGasPriceFlag.Name) { cfg.Miner.GasPrice = big.NewInt(1) } + case ctx.IsSet(OPNetworkFlag.Name): + genesis := MakeGenesis(ctx) + if !ctx.IsSet(NetworkIdFlag.Name) { + cfg.NetworkId = genesis.Config.ChainID.Uint64() + } + cfg.Genesis = genesis default: if cfg.NetworkId == 1 { SetDNSDiscoveryDefaults(cfg, params.MainnetGenesisHash) @@ -2020,8 +2164,7 @@ }   func IsNetworkPreset(ctx *cli.Context) bool { for _, flag := range NetworkFlags { - bFlag, _ := flag.(*cli.BoolFlag) - if ctx.IsSet(bFlag.Name) { + if ctx.IsSet(flag.Names()[0]) { return true } } @@ -2063,6 +2206,17 @@ case ctx.Bool(SepoliaFlag.Name): genesis = core.DefaultSepoliaGenesisBlock() case ctx.Bool(GoerliFlag.Name): genesis = core.DefaultGoerliGenesisBlock() + case ctx.IsSet(OPNetworkFlag.Name): + name := ctx.String(OPNetworkFlag.Name) + ch, err := params.OPStackChainIDByName(name) + if err != nil { + Fatalf("failed to load OP-Stack chain %q: %v", name, err) + } + genesis, err := core.LoadOPStackGenesis(ch) + if err != nil { + Fatalf("failed to load genesis for OP-Stack chain %q (%d): %v", name, ch, err) + } + return genesis case ctx.Bool(DeveloperFlag.Name): Fatalf("Developer chains are ephemeral") }
diff --git go-ethereum/internal/flags/categories.go op-geth/internal/flags/categories.go index 3ff0767921b9f97755b09d818ca7ca55e4f79c8a..fe2e6d29d4583b57ad9fdbb875dfc68e1eb12faa 100644 --- go-ethereum/internal/flags/categories.go +++ op-geth/internal/flags/categories.go @@ -32,6 +32,7 @@ NetworkingCategory = "NETWORKING" MinerCategory = "MINER" GasPriceCategory = "GAS PRICE ORACLE" VMCategory = "VIRTUAL MACHINE" + RollupCategory = "ROLLUP NODE" LoggingCategory = "LOGGING AND DEBUGGING" MetricsCategory = "METRICS AND STATS" MiscCategory = "MISC"

List the op-geth and upstream go-ethereum versions.

diff --git go-ethereum/build/ci.go op-geth/build/ci.go index 4d8dba6ce23405e8ab0ecf9d9cb6fadec1819f48..452b863332b73330a1a7ce944e2736c2effc2549 100644 --- go-ethereum/build/ci.go +++ op-geth/build/ci.go @@ -247,6 +247,9 @@ if env.Commit != "" { ld = append(ld, "-X", "github.com/ethereum/go-ethereum/internal/version.gitCommit="+env.Commit) ld = append(ld, "-X", "github.com/ethereum/go-ethereum/internal/version.gitDate="+env.Date) } + if env.Tag != "" { + ld = append(ld, "-X", "github.com/ethereum/go-ethereum/params.gitTag="+env.Tag) + } // Strip DWARF on darwin. This used to be required for certain things, // and there is no downside to this, so we just keep doing it. if runtime.GOOS == "darwin" { @@ -541,7 +544,7 @@ switch { case env.Branch == "master": tags = []string{"latest"} case strings.HasPrefix(env.Tag, "v1."): - tags = []string{"stable", fmt.Sprintf("release-1.%d", params.VersionMinor), "v" + params.Version} + tags = []string{"stable", fmt.Sprintf("release-1.%d", params.OPVersionMinor), "v" + params.Version} } // If architecture specific image builds are requested, build and push them if *image { @@ -833,10 +836,10 @@ return wdflag }   func isUnstableBuild(env build.Environment) bool { - if env.Tag != "" { - return false + if env.Tag == "untagged" || env.Tag == "" { + return true } - return true + return false }   type debPackage struct {
diff --git go-ethereum/cmd/geth/misccmd.go op-geth/cmd/geth/misccmd.go index f3530c30fb69fe08024d9f62a1a377d416c684d9..e7fb108ebfb6a065eb1232ced8fcdf1c49eee4c5 100644 --- go-ethereum/cmd/geth/misccmd.go +++ op-geth/cmd/geth/misccmd.go @@ -80,6 +80,7 @@ } if git.Date != "" { fmt.Println("Git Commit Date:", git.Date) } + fmt.Println("Upstream Version:", params.GethVersionWithMeta) fmt.Println("Architecture:", runtime.GOARCH) fmt.Println("Go Version:", runtime.Version()) fmt.Println("Operating System:", runtime.GOOS)
diff --git go-ethereum/params/version.go op-geth/params/version.go index a2c258ff58cb604141f6489e5d6d7211d10763c5..86adc36db7229844c6eb8b039fbf5422c0f6383d 100644 --- go-ethereum/params/version.go +++ op-geth/params/version.go @@ -18,8 +18,11 @@ package params   import ( "fmt" + "regexp" + "strconv" )   +// Version is the version of upstream geth const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 13 // Minor version component of the current release @@ -27,14 +30,58 @@ VersionPatch = 15 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string )   +// OPVersion is the version of op-geth +var ( + OPVersionMajor = 0 // Major version component of the current release + OPVersionMinor = 1 // Minor version component of the current release + OPVersionPatch = 0 // Patch version component of the current release + OPVersionMeta = "untagged" // Version metadata to append to the version string +) + +// This is set at build-time by the linker when the build is done by build/ci.go. +var gitTag string + +// Override the version variables if the gitTag was set at build time. +var _ = func() (_ string) { + semver := regexp.MustCompile(`^v([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+[0-9A-Za-z-]+)?$`) + version := semver.FindStringSubmatch(gitTag) + if version == nil { + return + } + if version[4] == "" { + version[4] = "stable" + } + OPVersionMajor, _ = strconv.Atoi(version[1]) + OPVersionMinor, _ = strconv.Atoi(version[2]) + OPVersionPatch, _ = strconv.Atoi(version[3]) + OPVersionMeta = version[4] + return +}() + // Version holds the textual version string. var Version = func() string { - return fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch) + if OPVersionMeta == "untagged" { + return OPVersionMeta + } + return fmt.Sprintf("%d.%d.%d", OPVersionMajor, OPVersionMinor, OPVersionPatch) }()   // VersionWithMeta holds the textual version string including the metadata. var VersionWithMeta = func() string { - v := Version + if OPVersionMeta != "untagged" { + return Version + "-" + OPVersionMeta + } + return Version +}() + +// GethVersion holds the textual geth version string. +var GethVersion = func() string { + return fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch) +}() + +// GethVersionWithMeta holds the textual geth version string including the metadata. +var GethVersionWithMeta = func() string { + v := GethVersion if VersionMeta != "" { v += "-" + VersionMeta } @@ -46,8 +93,8 @@ // "1.8.11-dea1ce05" for stable releases, or "1.8.13-unstable-21c059b6" for unstable // releases. func ArchiveVersion(gitCommit string) string { vsn := Version - if VersionMeta != "stable" { - vsn += "-" + VersionMeta + if OPVersionMeta != "stable" { + vsn += "-" + OPVersionMeta } if len(gitCommit) >= 8 { vsn += "-" + gitCommit[:8] @@ -60,7 +107,7 @@ vsn := VersionWithMeta if len(gitCommit) >= 8 { vsn += "-" + gitCommit[:8] } - if (VersionMeta != "stable") && (gitDate != "") { + if (OPVersionMeta != "stable") && (gitDate != "") { vsn += "-" + gitDate } return vsn
diff --git go-ethereum/eth/ethconfig/config.go op-geth/eth/ethconfig/config.go index ad664afb5bd1fa68165f23bfd30ef5c82eb0a7e0..fa1bfd4642c07d874e3dc4b7c1d28bb29aee4106 100644 --- go-ethereum/eth/ethconfig/config.go +++ op-geth/eth/ethconfig/config.go @@ -38,12 +38,13 @@ )   // FullNodeGPO contains default gasprice oracle settings for full node. var FullNodeGPO = gasprice.Config{ - Blocks: 20, - Percentile: 60, - MaxHeaderHistory: 1024, - MaxBlockHistory: 1024, - MaxPrice: gasprice.DefaultMaxPrice, - IgnorePrice: gasprice.DefaultIgnorePrice, + Blocks: 20, + Percentile: 60, + MaxHeaderHistory: 1024, + MaxBlockHistory: 1024, + MaxPrice: gasprice.DefaultMaxPrice, + IgnorePrice: gasprice.DefaultIgnorePrice, + MinSuggestedPriorityFee: gasprice.DefaultMinSuggestedPriorityFee, }   // Defaults contains default settings for use on the Ethereum main net. @@ -159,12 +160,33 @@ OverrideCancun *uint64 `toml:",omitempty"`   // OverrideVerkle (TODO: remove after the fork) OverrideVerkle *uint64 `toml:",omitempty"` + + OverrideOptimismCanyon *uint64 `toml:",omitempty"` + + OverrideOptimismEcotone *uint64 `toml:",omitempty"` + + OverrideOptimismFjord *uint64 `toml:",omitempty"` + + OverrideOptimismInterop *uint64 `toml:",omitempty"` + + // ApplySuperchainUpgrades requests the node to load chain-configuration from the superchain-registry. + ApplySuperchainUpgrades bool `toml:",omitempty"` + + RollupSequencerHTTP string + RollupHistoricalRPC string + RollupHistoricalRPCTimeout time.Duration + RollupDisableTxPoolGossip bool + RollupDisableTxPoolAdmission bool + RollupHaltOnIncompatibleProtocolVersion string }   // CreateConsensusEngine creates a consensus engine for the given chain config. // Clique is allowed for now to live standalone, but ethash is forbidden and can // only exist on already merged networks. func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (consensus.Engine, error) { + if config.Optimism != nil { + return beacon.New(&beacon.OpLegacy{}), nil + } // If proof-of-authority is requested, set it up if config.Clique != nil { return beacon.New(clique.New(config.Clique, db)), nil
diff --git go-ethereum/eth/handler.go op-geth/eth/handler.go index 0343a5787012498a764d8814a48a7c2488c52519..79115c505eefe0caa3804748715a4e8d58938981 100644 --- go-ethereum/eth/handler.go +++ op-geth/eth/handler.go @@ -93,6 +93,7 @@ Sync downloader.SyncMode // Whether to snap or full sync BloomCache uint64 // Megabytes to alloc for snap sync bloom EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` RequiredBlocks map[uint64]common.Hash // Hard coded map of required block hashes for sync challenges + NoTxGossip bool // Disable P2P transaction gossip }   type handler struct { @@ -106,6 +107,8 @@ database ethdb.Database txpool txPool chain *core.BlockChain maxPeers int + + noTxGossip bool   downloader *downloader.Downloader blockFetcher *fetcher.BlockFetcher @@ -142,6 +145,7 @@ forkFilter: forkid.NewFilter(config.Chain), eventMux: config.EventMux, database: config.Database, txpool: config.TxPool, + noTxGossip: config.NoTxGossip, chain: config.Chain, peers: newPeerSet(), merger: config.Merger, @@ -169,9 +173,10 @@ log.Warn("Switch sync mode from full sync to snap sync", "reason", "head state missing") } } else { head := h.chain.CurrentBlock() - if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) { + if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) && (!config.Chain.Config().IsOptimism() || head.Number.Cmp(config.Chain.Config().BedrockBlock) != 0) { // Print warning log if database is not empty to run snap sync. - log.Warn("Switch sync mode from snap sync to full sync", "reason", "snap sync complete") + // For OP chains, snap sync from bedrock block is allowed. + log.Warn("Switch sync mode from snap sync to full sync") } else { // If snap sync was requested and our database is empty, grant it h.snapSync.Store(true) @@ -182,8 +187,14 @@ // If snap sync is requested but snapshots are disabled, fail loudly if h.snapSync.Load() && config.Chain.Snapshots() == nil { return nil, errors.New("snap sync not supported with snapshots disabled") } + // if the chainID is set, pass it to the downloader for use in sync + // this might not be set in tests + var chainID uint64 + if cid := h.chain.Config().ChainID; cid != nil { + chainID = cid.Uint64() + } // Construct the downloader (long sync) - h.downloader = downloader.New(config.Database, h.eventMux, h.chain, nil, h.removePeer, h.enableSyncedFeatures) + h.downloader = downloader.New(config.Database, h.eventMux, h.chain, nil, h.removePeer, h.enableSyncedFeatures, chainID) if ttd := h.chain.Config().TerminalTotalDifficulty; ttd != nil { if h.chain.Config().TerminalTotalDifficultyPassed { log.Info("Chain post-merge, sync via beacon client")
diff --git go-ethereum/eth/handler_eth.go op-geth/eth/handler_eth.go index f1284c10e637e5d3dd9ee47a3652aa3cfaaba60e..6a11bf3689ed954ee2f7c415cb7aac8a19264b24 100644 --- go-ethereum/eth/handler_eth.go +++ op-geth/eth/handler_eth.go @@ -34,7 +34,20 @@ // packets that are sent as replies or broadcasts. type ethHandler handler   func (h *ethHandler) Chain() *core.BlockChain { return h.chain } -func (h *ethHandler) TxPool() eth.TxPool { return h.txpool } + +// NilPool satisfies the TxPool interface but does not return any tx in the +// pool. It is used to disable transaction gossip. +type NilPool struct{} + +// NilPool Get always returns nil +func (n NilPool) Get(hash common.Hash) *types.Transaction { return nil } + +func (h *ethHandler) TxPool() eth.TxPool { + if h.noTxGossip { + return &NilPool{} + } + return h.txpool +}   // RunPeer is invoked when a peer joins on the `eth` protocol. func (h *ethHandler) RunPeer(peer *eth.Peer, hand eth.Handler) error { @@ -52,6 +65,9 @@ // AcceptTxs retrieves whether transaction processing is enabled on the node // or if inbound transactions should simply be dropped. func (h *ethHandler) AcceptTxs() bool { + if h.noTxGossip { + return false + } return h.synced.Load() }
diff --git go-ethereum/core/blockchain.go op-geth/core/blockchain.go index b1bbc3d5982ded15ea3659b8141a85f5fc842a65..9fd20ad74458bd87e80fff2b19ee157afff74fc4 100644 --- go-ethereum/core/blockchain.go +++ op-geth/core/blockchain.go @@ -286,6 +286,10 @@ } log.Info(strings.Repeat("-", 153)) log.Info("")   + if chainConfig.IsOptimism() && chainConfig.RegolithTime == nil { + log.Warn("Optimism RegolithTime has not been set") + } + bc := &BlockChain{ chainConfig: chainConfig, cacheConfig: cacheConfig,
diff --git go-ethereum/eth/catalyst/superchain.go op-geth/eth/catalyst/superchain.go new file mode 100644 index 0000000000000000000000000000000000000000..503519889f4f050ad0aa739525022044fa842bc6 --- /dev/null +++ op-geth/eth/catalyst/superchain.go @@ -0,0 +1,66 @@ +package catalyst + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/params" +) + +var ( + requiredProtocolDeltaGauge = metrics.NewRegisteredGauge("superchain/required/delta", nil) + recommendedProtocolDeltaGauge = metrics.NewRegisteredGauge("superchain/recommended/delta", nil) +) + +type SuperchainSignal struct { + Recommended params.ProtocolVersion `json:"recommended"` + Required params.ProtocolVersion `json:"required"` +} + +func (api *ConsensusAPI) SignalSuperchainV1(signal *SuperchainSignal) (params.ProtocolVersion, error) { + if signal == nil { + log.Info("Received empty superchain version signal", "local", params.OPStackSupport) + return params.OPStackSupport, nil + } + // update metrics and log any warnings/info + requiredProtocolDeltaGauge.Update(int64(params.OPStackSupport.Compare(signal.Required))) + recommendedProtocolDeltaGauge.Update(int64(params.OPStackSupport.Compare(signal.Recommended))) + logger := log.New("local", params.OPStackSupport, "required", signal.Required, "recommended", signal.Recommended) + LogProtocolVersionSupport(logger, params.OPStackSupport, signal.Recommended, "recommended") + LogProtocolVersionSupport(logger, params.OPStackSupport, signal.Required, "required") + + if err := api.eth.HandleRequiredProtocolVersion(signal.Required); err != nil { + log.Error("Failed to handle required protocol version", "err", err, "required", signal.Required) + return params.OPStackSupport, err + } + + return params.OPStackSupport, nil +} + +func LogProtocolVersionSupport(logger log.Logger, local, other params.ProtocolVersion, name string) { + switch local.Compare(other) { + case params.AheadMajor: + logger.Info(fmt.Sprintf("Ahead with major %s protocol version change", name)) + case params.AheadMinor, params.AheadPatch, params.AheadPrerelease: + logger.Debug(fmt.Sprintf("Ahead with compatible %s protocol version change", name)) + case params.Matching: + logger.Debug(fmt.Sprintf("Latest %s protocol version is supported", name)) + case params.OutdatedMajor: + logger.Error(fmt.Sprintf("Outdated with major %s protocol change", name)) + case params.OutdatedMinor: + logger.Warn(fmt.Sprintf("Outdated with minor backward-compatible %s protocol change", name)) + case params.OutdatedPatch: + logger.Info(fmt.Sprintf("Outdated with support backward-compatible %s protocol change", name)) + case params.OutdatedPrerelease: + logger.Debug(fmt.Sprintf("New %s protocol pre-release is available", name)) + case params.DiffBuild: + logger.Debug(fmt.Sprintf("Ignoring %s protocolversion signal, local build is different", name)) + case params.DiffVersionType: + logger.Warn(fmt.Sprintf("Failed to recognize %s protocol version signal version-type", name)) + case params.EmptyVersion: + logger.Debug(fmt.Sprintf("No %s protocol version available to check", name)) + case params.InvalidVersion: + logger.Warn(fmt.Sprintf("Invalid protocol version comparison with %s", name)) + } +}

Snap-sync does not serve unprefixed code by default.

diff --git go-ethereum/core/blockchain_reader.go op-geth/core/blockchain_reader.go index 9e8e3bd4195af62293c8db16002bed95779ae3fa..0677175c62a2c808461d9e6088e3f6a22ce6e2b4 100644 --- go-ethereum/core/blockchain_reader.go +++ op-geth/core/blockchain_reader.go @@ -346,6 +346,14 @@ // in Verkle scheme. Fix it once snap-sync is supported for Verkle. return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Address{}, hash) }   +// ContractCode retrieves a blob of data associated with a contract hash +// either from ephemeral in-memory cache, or from persistent storage. +// This is a legacy-method, replaced by ContractCodeWithPrefix, +// but required for old databases to serve snap-sync. +func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) { + return bc.stateCache.ContractCode(common.Address{}, hash) +} + // State returns a new mutable state based on the current HEAD block. func (bc *BlockChain) State() (*state.StateDB, error) { return bc.StateAt(bc.CurrentBlock().Root)
diff --git go-ethereum/eth/protocols/snap/handler.go op-geth/eth/protocols/snap/handler.go index bd7ce9e71543c944c1a8e1ff2dd2dfcaa95d236f..0d1aac0cefb71a0894099fbdc40ad7cd4f0b204f 100644 --- go-ethereum/eth/protocols/snap/handler.go +++ op-geth/eth/protocols/snap/handler.go @@ -469,7 +469,7 @@ if hash == types.EmptyCodeHash { // Peers should not request the empty code, but if they do, at // least sent them back a correct response without db lookups codes = append(codes, []byte{}) - } else if blob, err := chain.ContractCodeWithPrefix(hash); err == nil { + } else if blob, err := chain.ContractCode(hash); err == nil { codes = append(codes, blob) bytes += uint64(len(blob)) }

Snap-sync has access to trusted Deposit Transaction Nonce Data.

diff --git go-ethereum/eth/downloader/downloader.go op-geth/eth/downloader/downloader.go index 6e7c5dcf02c8ee82e8dec503bebbcb0a259ce195..185aeed6d04d7902c34d0ecd339570239e88801f 100644 --- go-ethereum/eth/downloader/downloader.go +++ op-geth/eth/downloader/downloader.go @@ -156,6 +156,9 @@ // Progress reporting metrics syncStartBlock uint64 // Head snap block when Geth was started syncStartTime time.Time // Time instance when chain sync started syncLogTime time.Time // Time instance when status was last reported + + // Chain ID for downloaders to reference + chainID uint64 }   // LightChain encapsulates functions required to synchronise a light chain. @@ -216,7 +219,7 @@ TrieDB() *triedb.Database }   // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader { +func New(stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func(), chainID uint64) *Downloader { if lightchain == nil { lightchain = chain } @@ -233,6 +236,7 @@ quitCh: make(chan struct{}), SnapSyncer: snap.NewSyncer(stateDb, chain.TrieDB().Scheme()), stateSyncStart: make(chan *stateSync), syncStartBlock: chain.CurrentSnapBlock().Number.Uint64(), + chainID: chainID, } // Create the post-merge skeleton syncer and start the process dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success)) @@ -1719,7 +1723,7 @@ blocks := make([]*types.Block, len(results)) receipts := make([]types.Receipts, len(results)) for i, result := range results { blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) - receipts[i] = result.Receipts + receipts[i] = correctReceipts(result.Receipts, result.Transactions, blocks[i].NumberU64(), d.chainID) } if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil { log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
diff --git go-ethereum/eth/downloader/receiptreference.go op-geth/eth/downloader/receiptreference.go new file mode 100644 index 0000000000000000000000000000000000000000..b4c5623ddc9033ac6c03e48710f44e5c00cf1d94 --- /dev/null +++ op-geth/eth/downloader/receiptreference.go @@ -0,0 +1,144 @@ +package downloader + +import ( + "bytes" + "embed" + "encoding/gob" + "fmt" + "path" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +// userDepositNonces is a struct to hold the reference data for user deposits +// The reference data is used to correct the deposit nonce in the receipts +type userDepositNonces struct { + ChainID uint64 + First uint64 + Last uint64 // non inclusive + Results map[uint64][]uint64 +} + +var ( + systemAddress = common.HexToAddress("0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001") + receiptReferencePath = "userDepositData" + //go:embed userDepositData/*.gob + receiptReference embed.FS + userDepositNoncesAlreadySearched = map[uint64]bool{} + userDepositNoncesReference = map[uint64]userDepositNonces{} +) + +// lazy load the reference data for the requested chain +// if this chain data was already requested, returns early +func initReceiptReferences(chainID uint64) { + // if already loaded, return + if userDepositNoncesAlreadySearched[chainID] { + return + } + // look for a file prefixed by the chainID + fPrefix := fmt.Sprintf("%d.", chainID) + files, err := receiptReference.ReadDir(receiptReferencePath) + if err != nil { + log.Warn("Receipt Correction: Failed to read reference directory", "err", err) + return + } + // mark as loaded so we don't try again, even if no files match + userDepositNoncesAlreadySearched[chainID] = true + for _, file := range files { + // skip files which don't match the prefix + if !strings.HasPrefix(file.Name(), fPrefix) { + continue + } + fpath := path.Join(receiptReferencePath, file.Name()) + bs, err := receiptReference.ReadFile(fpath) + if err != nil { + log.Warn("Receipt Correction: Failed to read reference data", "err", err) + continue + } + udns := userDepositNonces{} + err = gob.NewDecoder(bytes.NewReader(bs)).Decode(&udns) + if err != nil { + log.Warn("Receipt Correction: Failed to decode reference data", "err", err) + continue + } + userDepositNoncesReference[udns.ChainID] = udns + return + } +} + +// correctReceipts corrects the deposit nonce in the receipts using the reference data +// prior to Canyon Hard Fork, DepositNonces were not cryptographically verifiable. +// As a consequence, the deposit nonces found during Snap Sync may be incorrect. +// This function inspects transaction data for user deposits, and if it is found to be incorrect, it is corrected. +// The data used to correct the deposit nonce is stored in the userDepositData directory, +// and was generated with the receipt reference tool in the optimism monorepo. +func correctReceipts(receipts types.Receipts, transactions types.Transactions, blockNumber uint64, chainID uint64) types.Receipts { + initReceiptReferences(chainID) + // if there is no data even after initialization, return the receipts as is + depositNoncesForChain, ok := userDepositNoncesReference[chainID] + if !ok { + log.Trace("Receipt Correction: No data source for chain", "chainID", chainID) + return receipts + } + + // check that the block number being examined is within the range of the reference data + if blockNumber < depositNoncesForChain.First || blockNumber > depositNoncesForChain.Last { + log.Trace("Receipt Correction: Block is out of range for receipt reference", + "blockNumber", blockNumber, + "start", depositNoncesForChain.First, + "end", depositNoncesForChain.Last) + return receipts + } + + // get the block nonces + blockNonces, ok := depositNoncesForChain.Results[blockNumber] + if !ok { + log.Trace("Receipt Correction: Block does not contain user deposits", "blockNumber", blockNumber) + return receipts + } + + // iterate through the receipts and transactions to correct the deposit nonce + // user deposits should always be at the front of the block, but we will check all transactions to be sure + udCount := 0 + for i := 0; i < len(receipts); i++ { + r := receipts[i] + tx := transactions[i] + from, err := types.Sender(types.LatestSignerForChainID(tx.ChainId()), tx) + if err != nil { + log.Warn("Receipt Correction: Failed to determine sender", "err", err) + continue + } + // break as soon as a non deposit is found + if r.Type != types.DepositTxType { + break + } + // ignore any transactions from the system address + if from != systemAddress { + // prevent index out of range (indicates a problem with the reference data or the block data) + if udCount >= len(blockNonces) { + log.Warn("Receipt Correction: More user deposits in block than included in reference data", "in_reference", len(blockNonces)) + break + } + nonce := blockNonces[udCount] + udCount++ + log.Trace("Receipt Correction: User Deposit detected", "address", from, "nonce", nonce) + if nonce != *r.DepositNonce { + // correct the deposit nonce + // warn because this should not happen unless the data was modified by corruption or a malicious peer + // by correcting the nonce, the entire block is still valid for use + log.Warn("Receipt Correction: Corrected deposit nonce", "nonce", *r.DepositNonce, "corrected", nonce) + r.DepositNonce = &nonce + } + } + } + // check for unused reference data (indicates a problem with the reference data or the block data) + if udCount < len(blockNonces) { + log.Warn("Receipt Correction: More user deposits in reference data than found in block", "in_reference", len(blockNonces), "in_block", udCount) + } + + log.Trace("Receipt Correction: Completed", "blockNumber", blockNumber, "userDeposits", udCount, "receipts", len(receipts), "transactions", len(transactions)) + return receipts +}

Fix discv5 option to allow discv5 to be an active source for node-discovery.

diff --git go-ethereum/p2p/server.go op-geth/p2p/server.go index 975a3bb916624a229a8425465ec37c6efb855ea2..b398c51eb4b7bf01b00811469eb59171ec42c1d8 100644 --- go-ethereum/p2p/server.go +++ op-geth/p2p/server.go @@ -579,6 +579,7 @@ srv.DiscV5, err = discover.ListenV5(sconn, srv.localnode, cfg) if err != nil { return err } + srv.discmix.AddSource(srv.DiscV5.RandomNodes()) }   // Add protocol-specific discovery sources.

Discovery bootnode addresses.

diff --git go-ethereum/params/bootnodes.go op-geth/params/bootnodes.go index 5e2c7c21810234011be103fbd4c03b0d1c57adde..df05fc7a2f594a569aac44d6957a81101aca4810 100644 --- go-ethereum/params/bootnodes.go +++ op-geth/params/bootnodes.go @@ -87,6 +87,29 @@ "enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM", // 3.120.104.18 | aws-eu-central-1-frankfurt "enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM", // 3.64.117.223 | aws-eu-central-1-frankfurt} }   +var V5OPBootnodes = []string{ + // OP Labs + "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", + "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", + "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", + // Base + "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", + "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", + "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", + "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", + "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301", +} + +var V5OPTestnetBootnodes = []string{ + // OP Labs + "enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305", + "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", + "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", + // Base + "enode://548f715f3fc388a7c917ba644a2f16270f1ede48a5d88a4d14ea287cc916068363f3092e39936f1a3e7885198bef0e5af951f1d7b1041ce8ba4010917777e71f@18.210.176.114:30301", + "enode://6f10052847a966a725c9f4adf6716f9141155b99a0fb487fea3f51498f4c2a2cb8d534e680ee678f9447db85b93ff7c74562762c3714783a7233ac448603b25f@107.21.251.55:30301", +} + const dnsPrefix = "enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@"   // KnownDNSNetwork returns the address of a public DNS-based node list for the given
diff --git go-ethereum/eth/ethconfig/gen_config.go op-geth/eth/ethconfig/gen_config.go index 2abddc9e0d3878344f5ac547a0ac369db0d4a985..a80fb68fa411c6b8e5d8b926568e5b2a6a3feaf7 100644 --- go-ethereum/eth/ethconfig/gen_config.go +++ op-geth/eth/ethconfig/gen_config.go @@ -17,45 +17,56 @@ // MarshalTOML marshals as TOML. func (c Config) MarshalTOML() (interface{}, error) { type Config struct { - Genesis *core.Genesis `toml:",omitempty"` - NetworkId uint64 - SyncMode downloader.SyncMode - EthDiscoveryURLs []string - SnapDiscoveryURLs []string - NoPruning bool - NoPrefetch bool - TxLookupLimit uint64 `toml:",omitempty"` - TransactionHistory uint64 `toml:",omitempty"` - StateHistory uint64 `toml:",omitempty"` - StateScheme string `toml:",omitempty"` - RequiredBlocks map[uint64]common.Hash `toml:"-"` - LightServ int `toml:",omitempty"` - LightIngress int `toml:",omitempty"` - LightEgress int `toml:",omitempty"` - LightPeers int `toml:",omitempty"` - LightNoPrune bool `toml:",omitempty"` - LightNoSyncServe bool `toml:",omitempty"` - SkipBcVersionCheck bool `toml:"-"` - DatabaseHandles int `toml:"-"` - DatabaseCache int - DatabaseFreezer string - TrieCleanCache int - TrieDirtyCache int - TrieTimeout time.Duration - SnapshotCache int - Preimages bool - FilterLogCacheSize int - Miner miner.Config - TxPool legacypool.Config - BlobPool blobpool.Config - GPO gasprice.Config - EnablePreimageRecording bool - DocRoot string `toml:"-"` - RPCGasCap uint64 - RPCEVMTimeout time.Duration - RPCTxFeeCap float64 - OverrideCancun *uint64 `toml:",omitempty"` - OverrideVerkle *uint64 `toml:",omitempty"` + Genesis *core.Genesis `toml:",omitempty"` + NetworkId uint64 + SyncMode downloader.SyncMode + EthDiscoveryURLs []string + SnapDiscoveryURLs []string + NoPruning bool + NoPrefetch bool + TxLookupLimit uint64 `toml:",omitempty"` + TransactionHistory uint64 `toml:",omitempty"` + StateHistory uint64 `toml:",omitempty"` + StateScheme string `toml:",omitempty"` + RequiredBlocks map[uint64]common.Hash `toml:"-"` + LightServ int `toml:",omitempty"` + LightIngress int `toml:",omitempty"` + LightEgress int `toml:",omitempty"` + LightPeers int `toml:",omitempty"` + LightNoPrune bool `toml:",omitempty"` + LightNoSyncServe bool `toml:",omitempty"` + SkipBcVersionCheck bool `toml:"-"` + DatabaseHandles int `toml:"-"` + DatabaseCache int + DatabaseFreezer string + TrieCleanCache int + TrieDirtyCache int + TrieTimeout time.Duration + SnapshotCache int + Preimages bool + FilterLogCacheSize int + Miner miner.Config + TxPool legacypool.Config + BlobPool blobpool.Config + GPO gasprice.Config + EnablePreimageRecording bool + DocRoot string `toml:"-"` + RPCGasCap uint64 + RPCEVMTimeout time.Duration + RPCTxFeeCap float64 + OverrideCancun *uint64 `toml:",omitempty"` + OverrideVerkle *uint64 `toml:",omitempty"` + OverrideOptimismCanyon *uint64 `toml:",omitempty"` + OverrideOptimismEcotone *uint64 `toml:",omitempty"` + OverrideOptimismFjord *uint64 `toml:",omitempty"` + OverrideOptimismInterop *uint64 `toml:",omitempty"` + ApplySuperchainUpgrades bool `toml:",omitempty"` + RollupSequencerHTTP string + RollupHistoricalRPC string + RollupHistoricalRPCTimeout time.Duration + RollupDisableTxPoolGossip bool + RollupDisableTxPoolAdmission bool + RollupHaltOnIncompatibleProtocolVersion string } var enc Config enc.Genesis = c.Genesis @@ -97,51 +108,73 @@ enc.RPCEVMTimeout = c.RPCEVMTimeout enc.RPCTxFeeCap = c.RPCTxFeeCap enc.OverrideCancun = c.OverrideCancun enc.OverrideVerkle = c.OverrideVerkle + enc.OverrideOptimismCanyon = c.OverrideOptimismCanyon + enc.OverrideOptimismEcotone = c.OverrideOptimismEcotone + enc.OverrideOptimismFjord = c.OverrideOptimismFjord + enc.OverrideOptimismInterop = c.OverrideOptimismInterop + enc.ApplySuperchainUpgrades = c.ApplySuperchainUpgrades + enc.RollupSequencerHTTP = c.RollupSequencerHTTP + enc.RollupHistoricalRPC = c.RollupHistoricalRPC + enc.RollupHistoricalRPCTimeout = c.RollupHistoricalRPCTimeout + enc.RollupDisableTxPoolGossip = c.RollupDisableTxPoolGossip + enc.RollupDisableTxPoolAdmission = c.RollupDisableTxPoolAdmission + enc.RollupHaltOnIncompatibleProtocolVersion = c.RollupHaltOnIncompatibleProtocolVersion return &enc, nil }   // UnmarshalTOML unmarshals from TOML. func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { type Config struct { - Genesis *core.Genesis `toml:",omitempty"` - NetworkId *uint64 - SyncMode *downloader.SyncMode - EthDiscoveryURLs []string - SnapDiscoveryURLs []string - NoPruning *bool - NoPrefetch *bool - TxLookupLimit *uint64 `toml:",omitempty"` - TransactionHistory *uint64 `toml:",omitempty"` - StateHistory *uint64 `toml:",omitempty"` - StateScheme *string `toml:",omitempty"` - RequiredBlocks map[uint64]common.Hash `toml:"-"` - LightServ *int `toml:",omitempty"` - LightIngress *int `toml:",omitempty"` - LightEgress *int `toml:",omitempty"` - LightPeers *int `toml:",omitempty"` - LightNoPrune *bool `toml:",omitempty"` - LightNoSyncServe *bool `toml:",omitempty"` - SkipBcVersionCheck *bool `toml:"-"` - DatabaseHandles *int `toml:"-"` - DatabaseCache *int - DatabaseFreezer *string - TrieCleanCache *int - TrieDirtyCache *int - TrieTimeout *time.Duration - SnapshotCache *int - Preimages *bool - FilterLogCacheSize *int - Miner *miner.Config - TxPool *legacypool.Config - BlobPool *blobpool.Config - GPO *gasprice.Config - EnablePreimageRecording *bool - DocRoot *string `toml:"-"` - RPCGasCap *uint64 - RPCEVMTimeout *time.Duration - RPCTxFeeCap *float64 - OverrideCancun *uint64 `toml:",omitempty"` - OverrideVerkle *uint64 `toml:",omitempty"` + Genesis *core.Genesis `toml:",omitempty"` + NetworkId *uint64 + SyncMode *downloader.SyncMode + EthDiscoveryURLs []string + SnapDiscoveryURLs []string + NoPruning *bool + NoPrefetch *bool + TxLookupLimit *uint64 `toml:",omitempty"` + TransactionHistory *uint64 `toml:",omitempty"` + StateHistory *uint64 `toml:",omitempty"` + StateScheme *string `toml:",omitempty"` + RequiredBlocks map[uint64]common.Hash `toml:"-"` + LightServ *int `toml:",omitempty"` + LightIngress *int `toml:",omitempty"` + LightEgress *int `toml:",omitempty"` + LightPeers *int `toml:",omitempty"` + LightNoPrune *bool `toml:",omitempty"` + LightNoSyncServe *bool `toml:",omitempty"` + SkipBcVersionCheck *bool `toml:"-"` + DatabaseHandles *int `toml:"-"` + DatabaseCache *int + DatabaseFreezer *string + TrieCleanCache *int + TrieDirtyCache *int + TrieTimeout *time.Duration + SnapshotCache *int + Preimages *bool + FilterLogCacheSize *int + Miner *miner.Config + TxPool *legacypool.Config + BlobPool *blobpool.Config + GPO *gasprice.Config + EnablePreimageRecording *bool + DocRoot *string `toml:"-"` + RPCGasCap *uint64 + RPCEVMTimeout *time.Duration + RPCTxFeeCap *float64 + OverrideCancun *uint64 `toml:",omitempty"` + OverrideVerkle *uint64 `toml:",omitempty"` + OverrideOptimismCanyon *uint64 `toml:",omitempty"` + OverrideOptimismEcotone *uint64 `toml:",omitempty"` + OverrideOptimismFjord *uint64 `toml:",omitempty"` + OverrideOptimismInterop *uint64 `toml:",omitempty"` + ApplySuperchainUpgrades *bool `toml:",omitempty"` + RollupSequencerHTTP *string + RollupHistoricalRPC *string + RollupHistoricalRPCTimeout *time.Duration + RollupDisableTxPoolGossip *bool + RollupDisableTxPoolAdmission *bool + RollupHaltOnIncompatibleProtocolVersion *string } var dec Config if err := unmarshal(&dec); err != nil { @@ -263,6 +296,39 @@ c.OverrideCancun = dec.OverrideCancun } if dec.OverrideVerkle != nil { c.OverrideVerkle = dec.OverrideVerkle + } + if dec.OverrideOptimismCanyon != nil { + c.OverrideOptimismCanyon = dec.OverrideOptimismCanyon + } + if dec.OverrideOptimismEcotone != nil { + c.OverrideOptimismEcotone = dec.OverrideOptimismEcotone + } + if dec.OverrideOptimismFjord != nil { + c.OverrideOptimismFjord = dec.OverrideOptimismFjord + } + if dec.OverrideOptimismInterop != nil { + c.OverrideOptimismInterop = dec.OverrideOptimismInterop + } + if dec.ApplySuperchainUpgrades != nil { + c.ApplySuperchainUpgrades = *dec.ApplySuperchainUpgrades + } + if dec.RollupSequencerHTTP != nil { + c.RollupSequencerHTTP = *dec.RollupSequencerHTTP + } + if dec.RollupHistoricalRPC != nil { + c.RollupHistoricalRPC = *dec.RollupHistoricalRPC + } + if dec.RollupHistoricalRPCTimeout != nil { + c.RollupHistoricalRPCTimeout = *dec.RollupHistoricalRPCTimeout + } + if dec.RollupDisableTxPoolGossip != nil { + c.RollupDisableTxPoolGossip = *dec.RollupDisableTxPoolGossip + } + if dec.RollupDisableTxPoolAdmission != nil { + c.RollupDisableTxPoolAdmission = *dec.RollupDisableTxPoolAdmission + } + if dec.RollupHaltOnIncompatibleProtocolVersion != nil { + c.RollupHaltOnIncompatibleProtocolVersion = *dec.RollupHaltOnIncompatibleProtocolVersion } return nil }

Encode the Deposit Tx properties, the L1 costs, and daisy-chain RPC-calls for pre-Bedrock historical data

Pre-Bedrock L1-cost receipt data is loaded from the database if available, and post-Bedrock the L1-cost metadata is hydrated on-the-fly based on the L1 fee information in the corresponding block.

diff --git go-ethereum/core/rawdb/accessors_chain.go op-geth/core/rawdb/accessors_chain.go index 4686f82cf0970ae1b808c176055b46a310d19ed2..2b201f3290d24ed4a7834736e0cbb8acbb29dad8 100644 --- go-ethereum/core/rawdb/accessors_chain.go +++ op-geth/core/rawdb/accessors_chain.go @@ -626,7 +626,6 @@ log.Error("Missing body but have receipt", "hash", hash, "number", number) return nil } header := ReadHeader(db, hash, number) - var baseFee *big.Int if header == nil { baseFee = big.NewInt(0) @@ -676,6 +675,15 @@ type storedReceiptRLP struct { PostStateOrStatus []byte CumulativeGasUsed uint64 Logs []*types.Log + + // Remaining fields are declared to allow the receipt RLP to be parsed without errors. + // However, they must not be used as they may not be populated correctly due to multiple receipt formats + // being combined into a single list of optional fields which can be mistaken for each other. + // DepositNonce (*uint64) from Regolith deposit tx receipts will be parsed into L1GasUsed + L1GasUsed *big.Int `rlp:"optional"` // OVM legacy + L1GasPrice *big.Int `rlp:"optional"` // OVM legacy + L1Fee *big.Int `rlp:"optional"` // OVM legacy + FeeScalar string `rlp:"optional"` // OVM legacy }   // ReceiptLogs is a barebone version of ReceiptForStorage which only keeps
diff --git go-ethereum/core/types/gen_receipt_json.go op-geth/core/types/gen_receipt_json.go index 4c641a972795f70ce27e98baf1b3c2e032fe89c2..4e544a0b52b61bac03f57c71581117f04280f81e 100644 --- go-ethereum/core/types/gen_receipt_json.go +++ op-geth/core/types/gen_receipt_json.go @@ -16,21 +16,30 @@ // MarshalJSON marshals as JSON. func (r Receipt) MarshalJSON() ([]byte, error) { type Receipt struct { - Type hexutil.Uint64 `json:"type,omitempty"` - PostState hexutil.Bytes `json:"root"` - Status hexutil.Uint64 `json:"status"` - CumulativeGasUsed hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"` - Bloom Bloom `json:"logsBloom" gencodec:"required"` - Logs []*Log `json:"logs" gencodec:"required"` - TxHash common.Hash `json:"transactionHash" gencodec:"required"` - ContractAddress common.Address `json:"contractAddress"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"` - BlobGasUsed hexutil.Uint64 `json:"blobGasUsed,omitempty"` - BlobGasPrice *hexutil.Big `json:"blobGasPrice,omitempty"` - BlockHash common.Hash `json:"blockHash,omitempty"` - BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` - TransactionIndex hexutil.Uint `json:"transactionIndex"` + Type hexutil.Uint64 `json:"type,omitempty"` + PostState hexutil.Bytes `json:"root"` + Status hexutil.Uint64 `json:"status"` + CumulativeGasUsed hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"` + Bloom Bloom `json:"logsBloom" gencodec:"required"` + Logs []*Log `json:"logs" gencodec:"required"` + TxHash common.Hash `json:"transactionHash" gencodec:"required"` + ContractAddress common.Address `json:"contractAddress"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"` + BlobGasUsed hexutil.Uint64 `json:"blobGasUsed,omitempty"` + BlobGasPrice *hexutil.Big `json:"blobGasPrice,omitempty"` + DepositNonce *hexutil.Uint64 `json:"depositNonce,omitempty"` + DepositReceiptVersion *hexutil.Uint64 `json:"depositReceiptVersion,omitempty"` + BlockHash common.Hash `json:"blockHash,omitempty"` + BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` + TransactionIndex hexutil.Uint `json:"transactionIndex"` + L1GasPrice *hexutil.Big `json:"l1GasPrice,omitempty"` + L1BlobBaseFee *hexutil.Big `json:"l1BlobBaseFee,omitempty"` + L1GasUsed *hexutil.Big `json:"l1GasUsed,omitempty"` + L1Fee *hexutil.Big `json:"l1Fee,omitempty"` + FeeScalar *big.Float `json:"l1FeeScalar,omitempty"` + L1BaseFeeScalar *hexutil.Uint64 `json:"l1BaseFeeScalar,omitempty"` + L1BlobBaseFeeScalar *hexutil.Uint64 `json:"l1BlobBaseFeeScalar,omitempty"` } var enc Receipt enc.Type = hexutil.Uint64(r.Type) @@ -45,30 +54,48 @@ enc.GasUsed = hexutil.Uint64(r.GasUsed) enc.EffectiveGasPrice = (*hexutil.Big)(r.EffectiveGasPrice) enc.BlobGasUsed = hexutil.Uint64(r.BlobGasUsed) enc.BlobGasPrice = (*hexutil.Big)(r.BlobGasPrice) + enc.DepositNonce = (*hexutil.Uint64)(r.DepositNonce) + enc.DepositReceiptVersion = (*hexutil.Uint64)(r.DepositReceiptVersion) enc.BlockHash = r.BlockHash enc.BlockNumber = (*hexutil.Big)(r.BlockNumber) enc.TransactionIndex = hexutil.Uint(r.TransactionIndex) + enc.L1GasPrice = (*hexutil.Big)(r.L1GasPrice) + enc.L1BlobBaseFee = (*hexutil.Big)(r.L1BlobBaseFee) + enc.L1GasUsed = (*hexutil.Big)(r.L1GasUsed) + enc.L1Fee = (*hexutil.Big)(r.L1Fee) + enc.FeeScalar = r.FeeScalar + enc.L1BaseFeeScalar = (*hexutil.Uint64)(r.L1BaseFeeScalar) + enc.L1BlobBaseFeeScalar = (*hexutil.Uint64)(r.L1BlobBaseFeeScalar) return json.Marshal(&enc) }   // UnmarshalJSON unmarshals from JSON. func (r *Receipt) UnmarshalJSON(input []byte) error { type Receipt struct { - Type *hexutil.Uint64 `json:"type,omitempty"` - PostState *hexutil.Bytes `json:"root"` - Status *hexutil.Uint64 `json:"status"` - CumulativeGasUsed *hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"` - Bloom *Bloom `json:"logsBloom" gencodec:"required"` - Logs []*Log `json:"logs" gencodec:"required"` - TxHash *common.Hash `json:"transactionHash" gencodec:"required"` - ContractAddress *common.Address `json:"contractAddress"` - GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed,omitempty"` - BlobGasPrice *hexutil.Big `json:"blobGasPrice,omitempty"` - BlockHash *common.Hash `json:"blockHash,omitempty"` - BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` - TransactionIndex *hexutil.Uint `json:"transactionIndex"` + Type *hexutil.Uint64 `json:"type,omitempty"` + PostState *hexutil.Bytes `json:"root"` + Status *hexutil.Uint64 `json:"status"` + CumulativeGasUsed *hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"` + Bloom *Bloom `json:"logsBloom" gencodec:"required"` + Logs []*Log `json:"logs" gencodec:"required"` + TxHash *common.Hash `json:"transactionHash" gencodec:"required"` + ContractAddress *common.Address `json:"contractAddress"` + GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed,omitempty"` + BlobGasPrice *hexutil.Big `json:"blobGasPrice,omitempty"` + DepositNonce *hexutil.Uint64 `json:"depositNonce,omitempty"` + DepositReceiptVersion *hexutil.Uint64 `json:"depositReceiptVersion,omitempty"` + BlockHash *common.Hash `json:"blockHash,omitempty"` + BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` + TransactionIndex *hexutil.Uint `json:"transactionIndex"` + L1GasPrice *hexutil.Big `json:"l1GasPrice,omitempty"` + L1BlobBaseFee *hexutil.Big `json:"l1BlobBaseFee,omitempty"` + L1GasUsed *hexutil.Big `json:"l1GasUsed,omitempty"` + L1Fee *hexutil.Big `json:"l1Fee,omitempty"` + FeeScalar *big.Float `json:"l1FeeScalar,omitempty"` + L1BaseFeeScalar *hexutil.Uint64 `json:"l1BaseFeeScalar,omitempty"` + L1BlobBaseFeeScalar *hexutil.Uint64 `json:"l1BlobBaseFeeScalar,omitempty"` } var dec Receipt if err := json.Unmarshal(input, &dec); err != nil { @@ -115,6 +142,12 @@ } if dec.BlobGasPrice != nil { r.BlobGasPrice = (*big.Int)(dec.BlobGasPrice) } + if dec.DepositNonce != nil { + r.DepositNonce = (*uint64)(dec.DepositNonce) + } + if dec.DepositReceiptVersion != nil { + r.DepositReceiptVersion = (*uint64)(dec.DepositReceiptVersion) + } if dec.BlockHash != nil { r.BlockHash = *dec.BlockHash } @@ -123,6 +156,27 @@ r.BlockNumber = (*big.Int)(dec.BlockNumber) } if dec.TransactionIndex != nil { r.TransactionIndex = uint(*dec.TransactionIndex) + } + if dec.L1GasPrice != nil { + r.L1GasPrice = (*big.Int)(dec.L1GasPrice) + } + if dec.L1BlobBaseFee != nil { + r.L1BlobBaseFee = (*big.Int)(dec.L1BlobBaseFee) + } + if dec.L1GasUsed != nil { + r.L1GasUsed = (*big.Int)(dec.L1GasUsed) + } + if dec.L1Fee != nil { + r.L1Fee = (*big.Int)(dec.L1Fee) + } + if dec.FeeScalar != nil { + r.FeeScalar = dec.FeeScalar + } + if dec.L1BaseFeeScalar != nil { + r.L1BaseFeeScalar = (*uint64)(dec.L1BaseFeeScalar) + } + if dec.L1BlobBaseFeeScalar != nil { + r.L1BlobBaseFeeScalar = (*uint64)(dec.L1BlobBaseFeeScalar) } return nil }
diff --git go-ethereum/core/types/receipt.go op-geth/core/types/receipt.go index 4f96fde59c44278beee06e9846e158c2501644e6..4bc83bf988f84dbe37f3fc0a04577c2838f9363a 100644 --- go-ethereum/core/types/receipt.go +++ op-geth/core/types/receipt.go @@ -46,6 +46,9 @@ ReceiptStatusFailed = uint64(0)   // ReceiptStatusSuccessful is the status code of a transaction if execution succeeded. ReceiptStatusSuccessful = uint64(1) + + // The version number for post-canyon deposit receipts. + CanyonDepositReceiptVersion = uint64(1) )   // Receipt represents the results of a transaction. @@ -58,7 +61,8 @@ CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required"` Bloom Bloom `json:"logsBloom" gencodec:"required"` Logs []*Log `json:"logs" gencodec:"required"`   - // Implementation fields: These fields are added by geth when processing a transaction. + // Implementation fields: These fields are added by geth when processing a transaction or retrieving a receipt. + // gencodec annotated fields: these are stored in the chain database. TxHash common.Hash `json:"transactionHash" gencodec:"required"` ContractAddress common.Address `json:"contractAddress"` GasUsed uint64 `json:"gasUsed" gencodec:"required"` @@ -66,11 +70,28 @@ EffectiveGasPrice *big.Int `json:"effectiveGasPrice"` // required, but tag omitted for backwards compatibility BlobGasUsed uint64 `json:"blobGasUsed,omitempty"` BlobGasPrice *big.Int `json:"blobGasPrice,omitempty"`   + // DepositNonce was introduced in Regolith to store the actual nonce used by deposit transactions + // The state transition process ensures this is only set for Regolith deposit transactions. + DepositNonce *uint64 `json:"depositNonce,omitempty"` + // DepositReceiptVersion was introduced in Canyon to indicate an update to how receipt hashes + // should be computed when set. The state transition process ensures this is only set for + // post-Canyon deposit transactions. + DepositReceiptVersion *uint64 `json:"depositReceiptVersion,omitempty"` + // Inclusion information: These fields provide information about the inclusion of the // transaction corresponding to this receipt. BlockHash common.Hash `json:"blockHash,omitempty"` BlockNumber *big.Int `json:"blockNumber,omitempty"` TransactionIndex uint `json:"transactionIndex"` + + // Optimism: extend receipts with L1 fee info + L1GasPrice *big.Int `json:"l1GasPrice,omitempty"` // Present from pre-bedrock. L1 Basefee after Bedrock + L1BlobBaseFee *big.Int `json:"l1BlobBaseFee,omitempty"` // Always nil prior to the Ecotone hardfork + L1GasUsed *big.Int `json:"l1GasUsed,omitempty"` // Present from pre-bedrock, deprecated as of Fjord + L1Fee *big.Int `json:"l1Fee,omitempty"` // Present from pre-bedrock + FeeScalar *big.Float `json:"l1FeeScalar,omitempty"` // Present from pre-bedrock to Ecotone. Nil after Ecotone + L1BaseFeeScalar *uint64 `json:"l1BaseFeeScalar,omitempty"` // Always nil prior to the Ecotone hardfork + L1BlobBaseFeeScalar *uint64 `json:"l1BlobBaseFeeScalar,omitempty"` // Always nil prior to the Ecotone hardfork }   type receiptMarshaling struct { @@ -84,6 +105,17 @@ BlobGasUsed hexutil.Uint64 BlobGasPrice *hexutil.Big BlockNumber *hexutil.Big TransactionIndex hexutil.Uint + + // Optimism + L1GasPrice *hexutil.Big + L1BlobBaseFee *hexutil.Big + L1GasUsed *hexutil.Big + L1Fee *hexutil.Big + FeeScalar *big.Float + L1BaseFeeScalar *hexutil.Uint64 + L1BlobBaseFeeScalar *hexutil.Uint64 + DepositNonce *hexutil.Uint64 + DepositReceiptVersion *hexutil.Uint64 }   // receiptRLP is the consensus encoding of a receipt. @@ -94,11 +126,98 @@ Bloom Bloom Logs []*Log }   +type depositReceiptRLP struct { + PostStateOrStatus []byte + CumulativeGasUsed uint64 + Bloom Bloom + Logs []*Log + // DepositNonce was introduced in Regolith to store the actual nonce used by deposit transactions. + // Must be nil for any transactions prior to Regolith or that aren't deposit transactions. + DepositNonce *uint64 `rlp:"optional"` + // Receipt hash post-Regolith but pre-Canyon inadvertently did not include the above + // DepositNonce. Post Canyon, receipts will have a non-empty DepositReceiptVersion indicating + // which post-Canyon receipt hash function to invoke. + DepositReceiptVersion *uint64 `rlp:"optional"` +} + // storedReceiptRLP is the storage encoding of a receipt. type storedReceiptRLP struct { PostStateOrStatus []byte CumulativeGasUsed uint64 Logs []*Log + // DepositNonce was introduced in Regolith to store the actual nonce used by deposit transactions. + // Must be nil for any transactions prior to Regolith or that aren't deposit transactions. + DepositNonce *uint64 `rlp:"optional"` + // Receipt hash post-Regolith but pre-Canyon inadvertently did not include the above + // DepositNonce. Post Canyon, receipts will have a non-empty DepositReceiptVersion indicating + // which post-Canyon receipt hash function to invoke. + DepositReceiptVersion *uint64 `rlp:"optional"` +} + +// LegacyOptimismStoredReceiptRLP is the pre bedrock storage encoding of a +// receipt. It will only exist in the database if it was migrated using the +// migration tool. Nodes that sync using snap-sync will not have any of these +// entries. +type LegacyOptimismStoredReceiptRLP struct { + PostStateOrStatus []byte + CumulativeGasUsed uint64 + Logs []*LogForStorage + L1GasUsed *big.Int + L1GasPrice *big.Int + L1Fee *big.Int + FeeScalar string +} + +// LogForStorage is a wrapper around a Log that handles +// backward compatibility with prior storage formats. +type LogForStorage Log + +// EncodeRLP implements rlp.Encoder. +func (l *LogForStorage) EncodeRLP(w io.Writer) error { + rl := Log{Address: l.Address, Topics: l.Topics, Data: l.Data} + return rlp.Encode(w, &rl) +} + +type legacyRlpStorageLog struct { + Address common.Address + Topics []common.Hash + Data []byte + BlockNumber uint64 + TxHash common.Hash + TxIndex uint + BlockHash common.Hash + Index uint +} + +// DecodeRLP implements rlp.Decoder. +// +// Note some redundant fields(e.g. block number, tx hash etc) will be assembled later. +func (l *LogForStorage) DecodeRLP(s *rlp.Stream) error { + blob, err := s.Raw() + if err != nil { + return err + } + var dec Log + err = rlp.DecodeBytes(blob, &dec) + if err == nil { + *l = LogForStorage{ + Address: dec.Address, + Topics: dec.Topics, + Data: dec.Data, + } + } else { + // Try to decode log with previous definition. + var dec legacyRlpStorageLog + err = rlp.DecodeBytes(blob, &dec) + if err == nil { + *l = LogForStorage{ + Address: dec.Address, + Topics: dec.Topics, + Data: dec.Data, + } + } + } + return err }   // NewReceipt creates a barebone transaction receipt, copying the init fields. @@ -136,7 +255,13 @@ // encodeTyped writes the canonical encoding of a typed receipt to w. func (r *Receipt) encodeTyped(data *receiptRLP, w *bytes.Buffer) error { w.WriteByte(r.Type) - return rlp.Encode(w, data) + switch r.Type { + case DepositTxType: + withNonce := &depositReceiptRLP{data.PostStateOrStatus, data.CumulativeGasUsed, data.Bloom, data.Logs, r.DepositNonce, r.DepositReceiptVersion} + return rlp.Encode(w, withNonce) + default: + return rlp.Encode(w, data) + } }   // MarshalBinary returns the consensus encoding of the receipt. @@ -212,6 +337,16 @@ return err } r.Type = b[0] return r.setFromRLP(data) + case DepositTxType: + var data depositReceiptRLP + err := rlp.DecodeBytes(b[1:], &data) + if err != nil { + return err + } + r.Type = b[0] + r.DepositNonce = data.DepositNonce + r.DepositReceiptVersion = data.DepositReceiptVersion + return r.setFromRLP(receiptRLP{data.PostStateOrStatus, data.CumulativeGasUsed, data.Bloom, data.Logs}) default: return ErrTxTypeNotSupported } @@ -275,6 +410,12 @@ return err } } w.ListEnd(logList) + if r.DepositNonce != nil { + w.WriteUint64(*r.DepositNonce) + if r.DepositReceiptVersion != nil { + w.WriteUint64(*r.DepositReceiptVersion) + } + } w.ListEnd(outerList) return w.Flush() } @@ -282,8 +423,51 @@ // DecodeRLP implements rlp.Decoder, and loads both consensus and implementation // fields of a receipt from an RLP stream. func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error { + // Retrieve the entire receipt blob as we need to try multiple decoders + blob, err := s.Raw() + if err != nil { + return err + } + // First try to decode the latest receipt database format, try the pre-bedrock Optimism legacy format otherwise. + if err := decodeStoredReceiptRLP(r, blob); err == nil { + return nil + } + return decodeLegacyOptimismReceiptRLP(r, blob) +} + +func decodeLegacyOptimismReceiptRLP(r *ReceiptForStorage, blob []byte) error { + var stored LegacyOptimismStoredReceiptRLP + if err := rlp.DecodeBytes(blob, &stored); err != nil { + return err + } + if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { + return err + } + r.CumulativeGasUsed = stored.CumulativeGasUsed + r.Logs = make([]*Log, len(stored.Logs)) + for i, log := range stored.Logs { + r.Logs[i] = (*Log)(log) + } + r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) + // UsingOVM + scalar := new(big.Float) + if stored.FeeScalar != "" { + var ok bool + scalar, ok = scalar.SetString(stored.FeeScalar) + if !ok { + return errors.New("cannot parse fee scalar") + } + } + r.L1GasUsed = stored.L1GasUsed + r.L1GasPrice = stored.L1GasPrice + r.L1Fee = stored.L1Fee + r.FeeScalar = scalar + return nil +} + +func decodeStoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { var stored storedReceiptRLP - if err := s.Decode(&stored); err != nil { + if err := rlp.DecodeBytes(blob, &stored); err != nil { return err } if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { @@ -292,7 +476,10 @@ } r.CumulativeGasUsed = stored.CumulativeGasUsed r.Logs = stored.Logs r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) - + if stored.DepositNonce != nil { + r.DepositNonce = stored.DepositNonce + r.DepositReceiptVersion = stored.DepositReceiptVersion + } return nil }   @@ -302,7 +489,10 @@ // Len returns the number of receipts in this list. func (rs Receipts) Len() int { return len(rs) }   -// EncodeIndex encodes the i'th receipt to w. +// EncodeIndex encodes the i'th receipt to w. For DepositTxType receipts with non-nil DepositNonce +// but nil DepositReceiptVersion, the output will differ than calling r.MarshalBinary(); this +// behavior difference should not be changed to preserve backwards compatibility of receipt-root +// hash computation. func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) { r := rs[i] data := &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs} @@ -314,6 +504,14 @@ w.WriteByte(r.Type) switch r.Type { case AccessListTxType, DynamicFeeTxType, BlobTxType: rlp.Encode(w, data) + case DepositTxType: + if r.DepositReceiptVersion != nil { + // post-canyon receipt hash computation update + depositData := &depositReceiptRLP{data.PostStateOrStatus, data.CumulativeGasUsed, r.Bloom, r.Logs, r.DepositNonce, r.DepositReceiptVersion} + rlp.Encode(w, depositData) + } else { + rlp.Encode(w, data) + } default: // For unsupported types, write nothing. Since this is for // DeriveSha, the error will be caught matching the derived hash @@ -351,7 +549,11 @@ // The contract address can be derived from the transaction itself if txs[i].To() == nil { // Deriving the signer is expensive, only do if it's actually needed from, _ := Sender(signer, txs[i]) - rs[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce()) + nonce := txs[i].Nonce() + if rs[i].DepositNonce != nil { + nonce = *rs[i].DepositNonce + } + rs[i].ContractAddress = crypto.CreateAddress(from, nonce) } else { rs[i].ContractAddress = common.Address{} } @@ -373,5 +575,30 @@ rs[i].Logs[j].Index = logIndex logIndex++ } } + if config.Optimism != nil && len(txs) >= 2 && config.IsBedrock(new(big.Int).SetUint64(number)) { // need at least an info tx and a non-info tx + gasParams, err := extractL1GasParams(config, time, txs[0].Data()) + if err != nil { + return err + } + for i := 0; i < len(rs); i++ { + if txs[i].IsDepositTx() { + continue + } + rs[i].L1GasPrice = gasParams.l1BaseFee + rs[i].L1BlobBaseFee = gasParams.l1BlobBaseFee + rs[i].L1Fee, rs[i].L1GasUsed = gasParams.costFunc(txs[i].RollupCostData()) + rs[i].FeeScalar = gasParams.feeScalar + rs[i].L1BaseFeeScalar = u32ptrTou64ptr(gasParams.l1BaseFeeScalar) + rs[i].L1BlobBaseFeeScalar = u32ptrTou64ptr(gasParams.l1BlobBaseFeeScalar) + } + } return nil } + +func u32ptrTou64ptr(a *uint32) *uint64 { + if a == nil { + return nil + } + b := uint64(*a) + return &b +}

Forward transactions to the sequencer if configured.

diff --git go-ethereum/eth/api_backend.go op-geth/eth/api_backend.go index 65adccd8518cd6faee0c0a0898d44602ba2ce8ec..d92984ae2122da5c7c7cbd08e44fdbc4e6a55b36 100644 --- go-ethereum/eth/api_backend.go +++ op-geth/eth/api_backend.go @@ -19,12 +19,14 @@ import ( "context" "errors" + "fmt" "math/big" "time"   "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/bloombits" @@ -37,6 +39,7 @@ "github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" @@ -46,6 +49,7 @@ // EthAPIBackend implements ethapi.Backend and tracers.Backend for full nodes type EthAPIBackend struct { extRPCEnabled bool allowUnprotectedTxs bool + disableTxPool bool eth *Ethereum gpo *gasprice.Oracle } @@ -190,10 +194,11 @@ func (b *EthAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) { // Pending state is only known by the miner if number == rpc.PendingBlockNumber { block, state := b.eth.miner.Pending() - if block == nil || state == nil { - return nil, nil, errors.New("pending state is not available") + if block != nil && state != nil { + return state, block.Header(), nil + } else { + number = rpc.LatestBlockNumber // fall back to latest state } - return state, block.Header(), nil } // Otherwise resolve the block number and return its state header, err := b.HeaderByNumber(ctx, number) @@ -201,7 +206,7 @@ if err != nil { return nil, nil, err } if header == nil { - return nil, nil, errors.New("header not found") + return nil, nil, fmt.Errorf("header %w", ethereum.NotFound) } stateDb, err := b.eth.BlockChain().StateAt(header.Root) if err != nil { @@ -220,7 +225,7 @@ if err != nil { return nil, nil, err } if header == nil { - return nil, nil, errors.New("header for hash not found") + return nil, nil, fmt.Errorf("header for hash %w", ethereum.NotFound) } if blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash { return nil, nil, errors.New("hash is not currently canonical") @@ -258,7 +263,7 @@ var context vm.BlockContext if blockCtx != nil { context = *blockCtx } else { - context = core.NewEVMBlockContext(header, b.eth.BlockChain(), nil) + context = core.NewEVMBlockContext(header, b.eth.BlockChain(), nil, b.eth.blockchain.Config(), state) } return vm.NewEVM(context, txContext, state, b.ChainConfig(), *vmConfig) } @@ -288,6 +293,29 @@ return b.eth.BlockChain().SubscribeLogsEvent(ch) }   func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { + if b.ChainConfig().IsOptimism() && signedTx.Type() == types.BlobTxType { + return types.ErrTxTypeNotSupported + } + if b.eth.seqRPCService != nil { + data, err := signedTx.MarshalBinary() + if err != nil { + return err + } + if err := b.eth.seqRPCService.CallContext(ctx, nil, "eth_sendRawTransaction", hexutil.Encode(data)); err != nil { + return err + } + if b.disableTxPool { + return nil + } + // Retain tx in local tx pool after forwarding, for local RPC usage. + if err := b.eth.txPool.Add([]*types.Transaction{signedTx}, true, false)[0]; err != nil { + log.Warn("successfully sent tx to sequencer, but failed to persist in local tx pool", "err", err, "tx", signedTx.Hash()) + } + return nil + } + if b.disableTxPool { + return nil + } return b.eth.txPool.Add([]*types.Transaction{signedTx}, true, false)[0] }   @@ -436,3 +464,11 @@ func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { return b.eth.stateAtTransaction(ctx, block, txIndex, reexec) } + +func (b *EthAPIBackend) HistoricalRPCService() *rpc.Client { + return b.eth.historicalRPCService +} + +func (b *EthAPIBackend) Genesis() *types.Block { + return b.eth.blockchain.Genesis() +}
diff --git go-ethereum/eth/backend.go op-geth/eth/backend.go index 0a0813aafac6a73f95cca62b8ca9f4587dae9377..aedbd01634cefd9d8877e94d59f7d451cd2deaf5 100644 --- go-ethereum/eth/backend.go +++ op-geth/eth/backend.go @@ -18,11 +18,13 @@ // Package eth implements the Ethereum protocol. package eth   import ( + "context" "errors" "fmt" "math/big" "runtime" "sync" + "time"   "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" @@ -76,6 +78,9 @@ ethDialCandidates enode.Iterator snapDialCandidates enode.Iterator merger *consensus.Merger   + seqRPCService *rpc.Client + historicalRPCService *rpc.Client + // DB interfaces chainDb ethdb.Database // Block chain database   @@ -101,6 +106,8 @@ lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase)   shutdownTracker *shutdowncheck.ShutdownTracker // Tracks if and when the node has shutdown ungracefully + + nodeCloser func() error }   // New creates a new Ethereum object (including the @@ -171,13 +178,13 @@ bloomRequests: make(chan chan *bloombits.Retrieval), bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms), p2pServer: stack.Server(), shutdownTracker: shutdowncheck.NewShutdownTracker(chainDb), + nodeCloser: stack.Close, } bcVersion := rawdb.ReadDatabaseVersion(chainDb) - var dbVer = "<nil>" + dbVer := "<nil>" if bcVersion != nil { dbVer = fmt.Sprintf("%d", *bcVersion) } - log.Info("Initialising Ethereum protocol", "network", networkID, "dbversion", dbVer)   if !config.SkipBcVersionCheck { if bcVersion != nil && *bcVersion > core.BlockChainVersion { @@ -213,23 +220,50 @@ } if config.OverrideVerkle != nil { overrides.OverrideVerkle = config.OverrideVerkle } + if config.OverrideOptimismCanyon != nil { + overrides.OverrideOptimismCanyon = config.OverrideOptimismCanyon + } + if config.OverrideOptimismEcotone != nil { + overrides.OverrideOptimismEcotone = config.OverrideOptimismEcotone + } + if config.OverrideOptimismFjord != nil { + overrides.OverrideOptimismFjord = config.OverrideOptimismFjord + } + if config.OverrideOptimismInterop != nil { + overrides.OverrideOptimismInterop = config.OverrideOptimismInterop + } + overrides.ApplySuperchainUpgrades = config.ApplySuperchainUpgrades eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TransactionHistory) if err != nil { return nil, err } + if chainConfig := eth.blockchain.Config(); chainConfig.Optimism != nil { // config.Genesis.Config.ChainID cannot be used because it's based on CLI flags only, thus default to mainnet L1 + config.NetworkId = chainConfig.ChainID.Uint64() // optimism defaults eth network ID to chain ID + eth.networkID = config.NetworkId + } + log.Info("Initialising Ethereum protocol", "network", config.NetworkId, "dbversion", dbVer) + + if eth.blockchain.Config().Optimism != nil { // Optimism Bedrock depends on Merge functionality + eth.merger.FinalizePoS() + } + eth.bloomIndexer.Start(eth.blockchain)   if config.BlobPool.Datadir != "" { config.BlobPool.Datadir = stack.ResolvePath(config.BlobPool.Datadir) } - blobPool := blobpool.New(config.BlobPool, eth.blockchain)   if config.TxPool.Journal != "" { config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal) } legacyPool := legacypool.New(config.TxPool, eth.blockchain)   - eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{legacyPool, blobPool}) + txPools := []txpool.SubPool{legacyPool} + if !eth.BlockChain().Config().IsOptimism() { + blobPool := blobpool.New(config.BlobPool, eth.blockchain) + txPools = append(txPools, blobPool) + } + eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, txPools) if err != nil { return nil, err } @@ -245,6 +279,7 @@ Sync: config.SyncMode, BloomCache: uint64(cacheLimit), EventMux: eth.eventMux, RequiredBlocks: config.RequiredBlocks, + NoTxGossip: config.RollupDisableTxPoolGossip, }); err != nil { return nil, err } @@ -252,7 +287,7 @@ eth.miner = miner.New(eth, &config.Miner, eth.blockchain.Config(), eth.EventMux(), eth.engine, eth.isLocalBlock) eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData))   - eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil} + eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, config.RollupDisableTxPoolAdmission, eth, nil} if eth.APIBackend.allowUnprotectedTxs { log.Info("Unprotected transactions allowed") } @@ -273,6 +308,26 @@ if err != nil { return nil, err }   + if config.RollupSequencerHTTP != "" { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + client, err := rpc.DialContext(ctx, config.RollupSequencerHTTP) + cancel() + if err != nil { + return nil, err + } + eth.seqRPCService = client + } + + if config.RollupHistoricalRPC != "" { + ctx, cancel := context.WithTimeout(context.Background(), config.RollupHistoricalRPCTimeout) + client, err := rpc.DialContext(ctx, config.RollupHistoricalRPC) + cancel() + if err != nil { + return nil, err + } + eth.historicalRPCService = client + } + // Start the RPC service eth.netRPCService = ethapi.NewNetAPI(eth.p2pServer, networkID)   @@ -291,7 +346,7 @@ func makeExtraData(extra []byte) []byte { if len(extra) == 0 { // create default extradata extra, _ = rlp.EncodeToBytes([]interface{}{ - uint(params.VersionMajor<<16 | params.VersionMinor<<8 | params.VersionPatch), + uint(params.OPVersionMajor<<16 | params.OPVersionMinor<<8 | params.OPVersionPatch), "geth", runtime.Version(), runtime.GOOS, @@ -541,6 +596,12 @@ s.txPool.Close() s.miner.Close() s.blockchain.Stop() s.engine.Close() + if s.seqRPCService != nil { + s.seqRPCService.Close() + } + if s.historicalRPCService != nil { + s.historicalRPCService.Close() + }   // Clean shutdown marker as the last thing before closing db s.shutdownTracker.Stop() @@ -550,3 +611,33 @@ s.eventMux.Stop()   return nil } + +// HandleRequiredProtocolVersion handles the protocol version signal. This implements opt-in halting, +// the protocol version data is already logged and metered when signaled through the Engine API. +func (s *Ethereum) HandleRequiredProtocolVersion(required params.ProtocolVersion) error { + var needLevel int + switch s.config.RollupHaltOnIncompatibleProtocolVersion { + case "major": + needLevel = 3 + case "minor": + needLevel = 2 + case "patch": + needLevel = 1 + default: + return nil // do not consider halting if not configured to + } + haveLevel := 0 + switch params.OPStackSupport.Compare(required) { + case params.OutdatedMajor: + haveLevel = 3 + case params.OutdatedMinor: + haveLevel = 2 + case params.OutdatedPatch: + haveLevel = 1 + } + if haveLevel >= needLevel { // halt if we opted in to do so at this granularity + log.Error("Opted to halt, unprepared for protocol change", "required", required, "local", params.OPStackSupport) + return s.nodeCloser() + } + return nil +}
diff --git go-ethereum/internal/ethapi/backend.go op-geth/internal/ethapi/backend.go index 5f408ba20ba57faf14254a11577f4855a61ad2bf..7c0e655d205c7e7d2faf92a5ef39b8a76d0439c8 100644 --- go-ethereum/internal/ethapi/backend.go +++ op-geth/internal/ethapi/backend.go @@ -86,6 +86,8 @@ SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription   ChainConfig() *params.ChainConfig Engine() consensus.Engine + HistoricalRPCService() *rpc.Client + Genesis() *types.Block   // This is copied from filters.Backend // eth/filters needs to be initialized from this backend type, so methods needed by
diff --git go-ethereum/eth/state_accessor.go op-geth/eth/state_accessor.go index 526361a2b8a6da8d3a5c7a0e56a37238f500cd1c..cd7198c64d2923b1643fda29df399830f7d250ab 100644 --- go-ethereum/eth/state_accessor.go +++ op-geth/eth/state_accessor.go @@ -242,7 +242,7 @@ for idx, tx := range block.Transactions() { // Assemble the transaction call message and return if the requested offset msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) txContext := core.NewEVMTxContext(msg) - context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil) + context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil, eth.blockchain.Config(), statedb) if idx == txIndex { return msg, context, statedb, release, nil }

Format deposit and L1-cost data in transaction responses. Add debug_chainConfig API.

diff --git go-ethereum/internal/ethapi/api.go op-geth/internal/ethapi/api.go index 863849f4da6a3457fcfbe787c0f041265029523a..a0b64c86e3bdc5b65d47ddd23ea1dc0365dfe6b7 100644 --- go-ethereum/internal/ethapi/api.go +++ op-geth/internal/ethapi/api.go @@ -26,6 +26,7 @@ "strings" "time"   "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/scwallet" @@ -647,6 +648,24 @@ // GetBalance returns the amount of wei for the given address in the state of the // given block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta // block numbers are also allowed. func (s *BlockChainAPI) GetBalance(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Big, error) { + header, err := headerByNumberOrHash(ctx, s.b, blockNrOrHash) + if err != nil { + return nil, err + } + + if s.b.ChainConfig().IsOptimismPreBedrock(header.Number) { + if s.b.HistoricalRPCService() != nil { + var res hexutil.Big + err := s.b.HistoricalRPCService().CallContext(ctx, &res, "eth_getBalance", address, blockNrOrHash) + if err != nil { + return nil, fmt.Errorf("historical backend error: %w", err) + } + return &res, nil + } else { + return nil, rpc.ErrNoHistoricalFallback + } + } + state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if state == nil || err != nil { return nil, err @@ -687,6 +706,22 @@ }   // GetProof returns the Merkle-proof for a given account and optionally some storage keys. func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNrOrHash rpc.BlockNumberOrHash) (*AccountResult, error) { + header, err := headerByNumberOrHash(ctx, s.b, blockNrOrHash) + if err != nil { + return nil, err + } + if s.b.ChainConfig().IsOptimismPreBedrock(header.Number) { + if s.b.HistoricalRPCService() != nil { + var res AccountResult + err := s.b.HistoricalRPCService().CallContext(ctx, &res, "eth_getProof", address, storageKeys, blockNrOrHash) + if err != nil { + return nil, fmt.Errorf("historical backend error: %w", err) + } + return &res, nil + } else { + return nil, rpc.ErrNoHistoricalFallback + } + } var ( keys = make([]common.Hash, len(storageKeys)) keyLengths = make([]int, len(storageKeys)) @@ -790,7 +825,7 @@ func (s *BlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) { header, err := s.b.HeaderByNumber(ctx, number) if header != nil && err == nil { response := s.rpcMarshalHeader(ctx, header) - if number == rpc.PendingBlockNumber { + if number == rpc.PendingBlockNumber && s.b.ChainConfig().Optimism == nil { // don't remove info if optimism // Pending header need to nil out a few fields for _, field := range []string{"hash", "nonce", "miner"} { response[field] = nil @@ -821,7 +856,7 @@ func (s *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { block, err := s.b.BlockByNumber(ctx, number) if block != nil && err == nil { response, err := s.rpcMarshalBlock(ctx, block, true, fullTx) - if err == nil && number == rpc.PendingBlockNumber { + if err == nil && number == rpc.PendingBlockNumber && s.b.ChainConfig().Optimism == nil { // don't remove info if optimism // Pending blocks need to nil out a few fields for _, field := range []string{"hash", "nonce", "miner"} { response[field] = nil @@ -892,10 +927,29 @@ }   // GetCode returns the code stored at the given address in the state for the given block number. func (s *BlockChainAPI) GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { + header, err := headerByNumberOrHash(ctx, s.b, blockNrOrHash) + if err != nil { + return nil, err + } + + if s.b.ChainConfig().IsOptimismPreBedrock(header.Number) { + if s.b.HistoricalRPCService() != nil { + var res hexutil.Bytes + err := s.b.HistoricalRPCService().CallContext(ctx, &res, "eth_getCode", address, blockNrOrHash) + if err != nil { + return nil, fmt.Errorf("historical backend error: %w", err) + } + return res, nil + } else { + return nil, rpc.ErrNoHistoricalFallback + } + } + state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if state == nil || err != nil { return nil, err } + code := state.GetCode(address) return code, state.Error() } @@ -904,10 +958,29 @@ // GetStorageAt returns the storage from the state at the given address, key and // block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta block // numbers are also allowed. func (s *BlockChainAPI) GetStorageAt(ctx context.Context, address common.Address, hexKey string, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { + header, err := headerByNumberOrHash(ctx, s.b, blockNrOrHash) + if err != nil { + return nil, err + } + + if s.b.ChainConfig().IsOptimismPreBedrock(header.Number) { + if s.b.HistoricalRPCService() != nil { + var res hexutil.Bytes + err := s.b.HistoricalRPCService().CallContext(ctx, &res, "eth_getStorageAt", address, hexKey, blockNrOrHash) + if err != nil { + return nil, fmt.Errorf("historical backend error: %w", err) + } + return res, nil + } else { + return nil, rpc.ErrNoHistoricalFallback + } + } + state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if state == nil || err != nil { return nil, err } + key, _, err := decodeHash(hexKey) if err != nil { return nil, fmt.Errorf("unable to decode storage key: %s", err) @@ -916,6 +989,18 @@ res := state.GetState(address, key) return res[:], state.Error() }   +// The HeaderByNumberOrHash method returns a nil error and nil header +// if the header is not found, but only for nonexistent block numbers. This is +// different from StateAndHeaderByNumberOrHash. To account for this discrepancy, +// headerOrNumberByHash will properly convert the error into an ethereum.NotFound. +func headerByNumberOrHash(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { + header, err := b.HeaderByNumberOrHash(ctx, blockNrOrHash) + if header == nil { + return nil, fmt.Errorf("header %w", ethereum.NotFound) + } + return header, err +} + // GetBlockReceipts returns the block receipts for the given block hash or number or tag. func (s *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]map[string]interface{}, error) { block, err := s.b.BlockByNumberOrHash(ctx, blockNrOrHash) @@ -938,7 +1023,7 @@ signer := types.MakeSigner(s.b.ChainConfig(), block.Number(), block.Time())   result := make([]map[string]interface{}, len(receipts)) for i, receipt := range receipts { - result[i] = marshalReceipt(receipt, block.Hash(), block.NumberU64(), signer, txs[i], i) + result[i] = marshalReceipt(receipt, block.Hash(), block.NumberU64(), signer, txs[i], i, s.b.ChainConfig()) }   return result, nil @@ -1093,7 +1178,7 @@ // this makes sure resources are cleaned up. defer cancel()   // Get a new instance of the EVM. - blockCtx := core.NewEVMBlockContext(header, NewChainContext(ctx, b), nil) + blockCtx := core.NewEVMBlockContext(header, NewChainContext(ctx, b), nil, b.ChainConfig(), state) if blockOverrides != nil { blockOverrides.Apply(&blockCtx) } @@ -1149,6 +1234,25 @@ if blockNrOrHash == nil { latest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) blockNrOrHash = &latest } + + header, err := headerByNumberOrHash(ctx, s.b, *blockNrOrHash) + if err != nil { + return nil, err + } + + if s.b.ChainConfig().IsOptimismPreBedrock(header.Number) { + if s.b.HistoricalRPCService() != nil { + var res hexutil.Bytes + err := s.b.HistoricalRPCService().CallContext(ctx, &res, "eth_call", args, blockNrOrHash, overrides) + if err != nil { + return nil, fmt.Errorf("historical backend error: %w", err) + } + return res, nil + } else { + return nil, rpc.ErrNoHistoricalFallback + } + } + result, err := DoCall(ctx, s.b, args, *blockNrOrHash, overrides, blockOverrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap()) if err != nil { return nil, err @@ -1207,6 +1311,25 @@ bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) if blockNrOrHash != nil { bNrOrHash = *blockNrOrHash } + + header, err := headerByNumberOrHash(ctx, s.b, bNrOrHash) + if err != nil { + return 0, err + } + + if s.b.ChainConfig().IsOptimismPreBedrock(header.Number) { + if s.b.HistoricalRPCService() != nil { + var res hexutil.Uint64 + err := s.b.HistoricalRPCService().CallContext(ctx, &res, "eth_estimateGas", args, blockNrOrHash) + if err != nil { + return 0, fmt.Errorf("historical backend error: %w", err) + } + return res, nil + } else { + return 0, rpc.ErrNoHistoricalFallback + } + } + return DoEstimateGas(ctx, s.b, args, bNrOrHash, overrides, s.b.RPCGasCap()) }   @@ -1251,7 +1374,7 @@ // RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are // returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain // transaction hashes. -func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *params.ChainConfig) map[string]interface{} { +func RPCMarshalBlock(ctx context.Context, block *types.Block, inclTx bool, fullTx bool, config *params.ChainConfig, backend Backend) (map[string]interface{}, error) { fields := RPCMarshalHeader(block.Header()) fields["size"] = hexutil.Uint64(block.Size())   @@ -1261,7 +1384,7 @@ return tx.Hash() } if fullTx { formatTx = func(idx int, tx *types.Transaction) interface{} { - return newRPCTransactionFromBlockIndex(block, uint64(idx), config) + return newRPCTransactionFromBlockIndex(ctx, block, uint64(idx), config, backend) } } txs := block.Transactions() @@ -1280,7 +1403,7 @@ fields["uncles"] = uncleHashes if block.Header().WithdrawalsHash != nil { fields["withdrawals"] = block.Withdrawals() } - return fields + return fields, nil }   // rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field, which requires @@ -1294,7 +1417,10 @@ // rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field, which requires // a `BlockchainAPI`. func (s *BlockChainAPI) rpcMarshalBlock(ctx context.Context, b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { - fields := RPCMarshalBlock(b, inclTx, fullTx, s.b.ChainConfig()) + fields, err := RPCMarshalBlock(ctx, b, inclTx, fullTx, s.b.ChainConfig(), s.b) + if err != nil { + return nil, err + } if inclTx { fields["totalDifficulty"] = (*hexutil.Big)(s.b.GetTd(ctx, b.Hash())) } @@ -1325,11 +1451,18 @@ V *hexutil.Big `json:"v"` R *hexutil.Big `json:"r"` S *hexutil.Big `json:"s"` YParity *hexutil.Uint64 `json:"yParity,omitempty"` + + // deposit-tx only + SourceHash *common.Hash `json:"sourceHash,omitempty"` + Mint *hexutil.Big `json:"mint,omitempty"` + IsSystemTx *bool `json:"isSystemTx,omitempty"` + // deposit-tx post-Canyon only + DepositReceiptVersion *hexutil.Uint64 `json:"depositReceiptVersion,omitempty"` }   // newRPCTransaction returns a transaction that will serialize to the RPC // representation, with the given location metadata set (if available). -func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, blockTime uint64, index uint64, baseFee *big.Int, config *params.ChainConfig) *RPCTransaction { +func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, blockTime uint64, index uint64, baseFee *big.Int, config *params.ChainConfig, receipt *types.Receipt) *RPCTransaction { signer := types.MakeSigner(config, new(big.Int).SetUint64(blockNumber), blockTime) from, _ := types.Sender(signer, tx) v, r, s := tx.RawSignatureValues() @@ -1354,7 +1487,27 @@ result.TransactionIndex = (*hexutil.Uint64)(&index) }   switch tx.Type() { + case types.DepositTxType: + srcHash := tx.SourceHash() + isSystemTx := tx.IsSystemTx() + result.SourceHash = &srcHash + if isSystemTx { + // Only include IsSystemTx when true + result.IsSystemTx = &isSystemTx + } + result.Mint = (*hexutil.Big)(tx.Mint()) + if receipt != nil && receipt.DepositNonce != nil { + result.Nonce = hexutil.Uint64(*receipt.DepositNonce) + if receipt.DepositReceiptVersion != nil { + result.DepositReceiptVersion = new(hexutil.Uint64) + *result.DepositReceiptVersion = hexutil.Uint64(*receipt.DepositReceiptVersion) + } + } case types.LegacyTxType: + if v.Sign() == 0 && r.Sign() == 0 && s.Sign() == 0 { // pre-bedrock relayed tx does not have a signature + result.ChainID = (*hexutil.Big)(new(big.Int).Set(config.ChainID)) + break + } // if a legacy transaction has an EIP-155 chain id, include it explicitly if id := tx.ChainId(); id.Sign() != 0 { result.ChainID = (*hexutil.Big)(id) @@ -1423,20 +1576,36 @@ blockNumber = uint64(0) blockTime = uint64(0) ) if current != nil { - baseFee = eip1559.CalcBaseFee(config, current) + baseFee = eip1559.CalcBaseFee(config, current, current.Time+1) blockNumber = current.Number.Uint64() blockTime = current.Time } - return newRPCTransaction(tx, common.Hash{}, blockNumber, blockTime, 0, baseFee, config) + return newRPCTransaction(tx, common.Hash{}, blockNumber, blockTime, 0, baseFee, config, nil) }   // newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation. -func newRPCTransactionFromBlockIndex(b *types.Block, index uint64, config *params.ChainConfig) *RPCTransaction { +func newRPCTransactionFromBlockIndex(ctx context.Context, b *types.Block, index uint64, config *params.ChainConfig, backend Backend) *RPCTransaction { txs := b.Transactions() if index >= uint64(len(txs)) { return nil } - return newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), b.Time(), index, b.BaseFee(), config) + tx := txs[index] + rcpt := depositTxReceipt(ctx, b.Hash(), index, backend, tx) + return newRPCTransaction(tx, b.Hash(), b.NumberU64(), b.Time(), index, b.BaseFee(), config, rcpt) +} + +func depositTxReceipt(ctx context.Context, blockHash common.Hash, index uint64, backend Backend, tx *types.Transaction) *types.Receipt { + if tx.Type() != types.DepositTxType { + return nil + } + receipts, err := backend.GetReceipts(ctx, blockHash) + if err != nil { + return nil + } + if index >= uint64(len(receipts)) { + return nil + } + return receipts[index] }   // newRPCRawTransactionFromBlockIndex returns the bytes of a transaction given a block and a transaction index. @@ -1465,6 +1634,21 @@ bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) if blockNrOrHash != nil { bNrOrHash = *blockNrOrHash } + + header, err := headerByNumberOrHash(ctx, s.b, bNrOrHash) + if err == nil && header != nil && s.b.ChainConfig().IsOptimismPreBedrock(header.Number) { + if s.b.HistoricalRPCService() != nil { + var res accessListResult + err := s.b.HistoricalRPCService().CallContext(ctx, &res, "eth_createAccessList", args, blockNrOrHash) + if err != nil { + return nil, fmt.Errorf("historical backend error: %w", err) + } + return &res, nil + } else { + return nil, rpc.ErrNoHistoricalFallback + } + } + acl, gasUsed, vmerr, err := AccessList(ctx, s.b, bNrOrHash, args) if err != nil { return nil, err @@ -1570,7 +1754,7 @@ // GetTransactionByBlockNumberAndIndex returns the transaction for the given block number and index. func (s *TransactionAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) *RPCTransaction { if block, _ := s.b.BlockByNumber(ctx, blockNr); block != nil { - return newRPCTransactionFromBlockIndex(block, uint64(index), s.b.ChainConfig()) + return newRPCTransactionFromBlockIndex(ctx, block, uint64(index), s.b.ChainConfig(), s.b) } return nil } @@ -1578,7 +1762,7 @@ // GetTransactionByBlockHashAndIndex returns the transaction for the given block hash and index. func (s *TransactionAPI) GetTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) *RPCTransaction { if block, _ := s.b.BlockByHash(ctx, blockHash); block != nil { - return newRPCTransactionFromBlockIndex(block, uint64(index), s.b.ChainConfig()) + return newRPCTransactionFromBlockIndex(ctx, block, uint64(index), s.b.ChainConfig(), s.b) } return nil } @@ -1610,10 +1794,29 @@ } return (*hexutil.Uint64)(&nonce), nil } // Resolve block number and use its state to ask for the nonce + header, err := headerByNumberOrHash(ctx, s.b, blockNrOrHash) + if err != nil { + return nil, err + } + + if s.b.ChainConfig().IsOptimismPreBedrock(header.Number) { + if s.b.HistoricalRPCService() != nil { + var res hexutil.Uint64 + err := s.b.HistoricalRPCService().CallContext(ctx, &res, "eth_getTransactionCount", address, blockNrOrHash) + if err != nil { + return nil, fmt.Errorf("historical backend error: %w", err) + } + return &res, nil + } else { + return nil, rpc.ErrNoHistoricalFallback + } + } + state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if state == nil || err != nil { return nil, err } + nonce := state.GetNonce(address) return (*hexutil.Uint64)(&nonce), state.Error() } @@ -1636,7 +1839,8 @@ header, err := s.b.HeaderByHash(ctx, blockHash) if err != nil { return nil, err } - return newRPCTransaction(tx, blockHash, blockNumber, header.Time, index, header.BaseFee, s.b.ChainConfig()), nil + rcpt := depositTxReceipt(ctx, blockHash, index, s.b, tx) + return newRPCTransaction(tx, blockHash, blockNumber, header.Time, index, header.BaseFee, s.b.ChainConfig(), rcpt), nil }   // GetRawTransactionByHash returns the bytes of the transaction for the given hash. @@ -1679,11 +1883,11 @@ receipt := receipts[index]   // Derive the sender. signer := types.MakeSigner(s.b.ChainConfig(), header.Number, header.Time) - return marshalReceipt(receipt, blockHash, blockNumber, signer, tx, int(index)), nil + return marshalReceipt(receipt, blockHash, blockNumber, signer, tx, int(index), s.b.ChainConfig()), nil }   // marshalReceipt marshals a transaction receipt into a JSON object. -func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber uint64, signer types.Signer, tx *types.Transaction, txIndex int) map[string]interface{} { +func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber uint64, signer types.Signer, tx *types.Transaction, txIndex int, chainConfig *params.ChainConfig) map[string]interface{} { from, _ := types.Sender(signer, tx)   fields := map[string]interface{}{ @@ -1700,6 +1904,32 @@ "logs": receipt.Logs, "logsBloom": receipt.Bloom, "type": hexutil.Uint(tx.Type()), "effectiveGasPrice": (*hexutil.Big)(receipt.EffectiveGasPrice), + } + + if chainConfig.Optimism != nil && !tx.IsDepositTx() { + fields["l1GasPrice"] = (*hexutil.Big)(receipt.L1GasPrice) + fields["l1GasUsed"] = (*hexutil.Big)(receipt.L1GasUsed) + fields["l1Fee"] = (*hexutil.Big)(receipt.L1Fee) + // Fields removed with Ecotone + if receipt.FeeScalar != nil { + fields["l1FeeScalar"] = receipt.FeeScalar.String() + } + // Fields added in Ecotone + if receipt.L1BlobBaseFee != nil { + fields["l1BlobBaseFee"] = (*hexutil.Big)(receipt.L1BlobBaseFee) + } + if receipt.L1BaseFeeScalar != nil { + fields["l1BaseFeeScalar"] = hexutil.Uint64(*receipt.L1BaseFeeScalar) + } + if receipt.L1BlobBaseFeeScalar != nil { + fields["l1BlobBaseFeeScalar"] = hexutil.Uint64(*receipt.L1BlobBaseFeeScalar) + } + } + if chainConfig.Optimism != nil && tx.IsDepositTx() && receipt.DepositNonce != nil { + fields["depositNonce"] = hexutil.Uint64(*receipt.DepositNonce) + if receipt.DepositReceiptVersion != nil { + fields["depositReceiptVersion"] = hexutil.Uint64(*receipt.DepositReceiptVersion) + } }   // Assign receipt status or post state. @@ -2104,6 +2334,10 @@ // SetHead rewinds the head of the blockchain to a previous block. func (api *DebugAPI) SetHead(number hexutil.Uint64) { api.b.SetHead(uint64(number)) +} + +func (api *DebugAPI) ChainConfig() *params.ChainConfig { + return api.b.ChainConfig() }   // NetAPI offers network related RPC methods
diff --git go-ethereum/rpc/errors.go op-geth/rpc/errors.go index 438aff218c2e306d322160d8be8dcac16dd0dfa2..67c523d11a36294e4448988dc4c906f38541ec04 100644 --- go-ethereum/rpc/errors.go +++ op-geth/rpc/errors.go @@ -73,6 +73,16 @@ errMsgResponseTooLarge = "response too large" errMsgBatchTooLarge = "batch too large" )   +var ErrNoHistoricalFallback = NoHistoricalFallbackError{} + +type NoHistoricalFallbackError struct{} + +func (e NoHistoricalFallbackError) ErrorCode() int { return -32801 } + +func (e NoHistoricalFallbackError) Error() string { + return "no historical RPC is available for this historical (pre-bedrock) execution request" +} + type methodNotFoundError struct{ method string }   func (e *methodNotFoundError) ErrorCode() int { return -32601 }

Forward pre-bedrock tracing calls to legacy node.

diff --git go-ethereum/eth/tracers/api.go op-geth/eth/tracers/api.go index 68331082052648ad30e8f70a20aee0488ec7b7eb..164626af67f8227d9a57a9bda231684ea32f2163 100644 --- go-ethereum/eth/tracers/api.go +++ op-geth/eth/tracers/api.go @@ -22,6 +22,7 @@ "context" "encoding/json" "errors" "fmt" + "math/big" "os" "runtime" "sync" @@ -67,8 +68,6 @@ // trace states exceed this limit. maximumPendingTraceStates = 128 )   -var errTxNotFound = errors.New("transaction not found") - // StateReleaseFunc is used to deallocate resources held by constructing a // historical state for tracing purposes. type StateReleaseFunc func() @@ -87,6 +86,7 @@ Engine() consensus.Engine ChainDb() ethdb.Database StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error) + HistoricalRPCService() *rpc.Client }   // API is the collection of tracing APIs exposed over the private debugging endpoint. @@ -208,6 +208,7 @@ // TraceChain returns the structured logs created during the execution of EVM // between two blocks (excluding start) and returns them as a JSON object. func (api *API) TraceChain(ctx context.Context, start, end rpc.BlockNumber, config *TraceConfig) (*rpc.Subscription, error) { // Fetch the block interval that we want to trace + // TODO: Need to implement a fallback for this from, err := api.blockByNumber(ctx, start) if err != nil { return nil, err @@ -266,7 +267,7 @@ // Fetch and execute the block trace taskCh for task := range taskCh { var ( signer = types.MakeSigner(api.backend.ChainConfig(), task.block.Number(), task.block.Time()) - blockCtx = core.NewEVMBlockContext(task.block.Header(), api.chainContext(ctx), nil) + blockCtx = core.NewEVMBlockContext(task.block.Header(), api.chainContext(ctx), nil, api.backend.ChainConfig(), task.statedb) ) // Trace all the transactions contained within for i, tx := range task.block.Transactions() { @@ -436,6 +437,20 @@ block, err := api.blockByNumber(ctx, number) if err != nil { return nil, err } + + if api.backend.ChainConfig().IsOptimismPreBedrock(block.Number()) { + if api.backend.HistoricalRPCService() != nil { + var histResult []*txTraceResult + err = api.backend.HistoricalRPCService().CallContext(ctx, &histResult, "debug_traceBlockByNumber", number, config) + if err != nil { + return nil, fmt.Errorf("historical backend error: %w", err) + } + return histResult, nil + } else { + return nil, rpc.ErrNoHistoricalFallback + } + } + return api.traceBlock(ctx, block, config) }   @@ -446,6 +461,20 @@ block, err := api.blockByHash(ctx, hash) if err != nil { return nil, err } + + if api.backend.ChainConfig().IsOptimismPreBedrock(block.Number()) { + if api.backend.HistoricalRPCService() != nil { + var histResult []*txTraceResult + err = api.backend.HistoricalRPCService().CallContext(ctx, &histResult, "debug_traceBlockByHash", hash, config) + if err != nil { + return nil, fmt.Errorf("historical backend error: %w", err) + } + return histResult, nil + } else { + return nil, rpc.ErrNoHistoricalFallback + } + } + return api.traceBlock(ctx, block, config) }   @@ -495,6 +524,7 @@ // IntermediateRoots executes a block (bad- or canon- or side-), and returns a list // of intermediate roots: the stateroot after each transaction. func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config *TraceConfig) ([]common.Hash, error) { block, _ := api.blockByHash(ctx, hash) + // TODO: Cannot get intermediate roots for pre-bedrock block without daisy chain if block == nil { // Check in the bad blocks block = rawdb.ReadBadBlock(api.backend.ChainDb(), hash) @@ -523,7 +553,7 @@ var ( roots []common.Hash signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time()) chainConfig = api.backend.ChainConfig() - vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) + vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil, chainConfig, statedb) deleteEmptyObjects = chainConfig.IsEIP158(block.Number()) ) for i, tx := range block.Transactions() { @@ -599,7 +629,7 @@ var ( txs = block.Transactions() blockHash = block.Hash() is158 = api.backend.ChainConfig().IsEIP158(block.Number()) - blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) + blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil, api.backend.ChainConfig(), statedb) signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time()) results = make([]*txTraceResult, len(txs)) ) @@ -632,7 +662,6 @@ // Execute all the transaction contained within the block concurrently var ( txs = block.Transactions() blockHash = block.Hash() - blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time()) results = make([]*txTraceResult, len(txs)) pend sync.WaitGroup @@ -648,6 +677,7 @@ go func() { defer pend.Done() // Fetch and execute the next transaction trace tasks for task := range jobs { + blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil, api.backend.ChainConfig(), task.statedb) msg, _ := core.TransactionToMessage(txs[task.index], signer, block.BaseFee()) txctx := &Context{ BlockHash: blockHash, @@ -667,6 +697,7 @@ }   // Feed the transactions into the tracers and return var failed error + blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil, api.backend.ChainConfig(), statedb) txloop: for i, tx := range txs { // Send the trace task over for execution @@ -744,7 +775,7 @@ var ( dumps []string signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time()) chainConfig = api.backend.ChainConfig() - vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) + vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil, chainConfig, statedb) canon = true ) // Check if there are any overrides: the caller may wish to enable a future @@ -826,18 +857,29 @@ // TraceTransaction returns the structured logs created during the execution of EVM // and returns them as a JSON object. func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) { - found, _, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash) + _, _, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash) if err != nil { return nil, ethapi.NewTxIndexingError() } - // Only mined txes are supported - if !found { - return nil, errTxNotFound + + if api.backend.ChainConfig().IsOptimismPreBedrock(new(big.Int).SetUint64(blockNumber)) { + if api.backend.HistoricalRPCService() != nil { + var histResult json.RawMessage + err := api.backend.HistoricalRPCService().CallContext(ctx, &histResult, "debug_traceTransaction", hash, config) + if err != nil { + return nil, fmt.Errorf("historical backend error: %w", err) + } + return histResult, nil + } else { + return nil, rpc.ErrNoHistoricalFallback + } } + // It shouldn't happen in practice. if blockNumber == 0 { return nil, errors.New("genesis is not traceable") } + reexec := defaultTraceReexec if config != nil && config.Reexec != nil { reexec = *config.Reexec @@ -894,6 +936,11 @@ } if err != nil { return nil, err } + + if api.backend.ChainConfig().IsOptimismPreBedrock(block.Number()) { + return nil, errors.New("l2geth does not have a debug_traceCall method") + } + // try to recompute the state reexec := defaultTraceReexec if config != nil && config.Reexec != nil { @@ -910,7 +957,7 @@ return nil, err } defer release()   - vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) + vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil, api.backend.ChainConfig(), statedb) // Apply the customization rules if required. if config != nil { if err := config.StateOverrides.Apply(statedb); err != nil {
diff --git go-ethereum/eth/tracers/api_test.go op-geth/eth/tracers/api_test.go index d8e4b9a4ef3d21c431b624efc589227d345d683b..9c3a423f6f8ff6b4b9f02d55b29cfaaa7e1e12bf 100644 --- go-ethereum/eth/tracers/api_test.go +++ op-geth/eth/tracers/api_test.go @@ -23,11 +23,14 @@ "encoding/json" "errors" "fmt" "math/big" + "net" + "net/http" "reflect" "sync/atomic" "testing" "time"   + "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus" @@ -41,8 +44,10 @@ "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" + "github.com/stretchr/testify/mock" "golang.org/x/exp/slices" )   @@ -51,6 +56,66 @@ errStateNotFound = errors.New("state not found") errBlockNotFound = errors.New("block not found") )   +type mockHistoricalBackend struct { + mock.Mock +} + +// mockHistoricalBackend does not have a TraceCall, because pre-bedrock there is no debug_traceCall available + +func (m *mockHistoricalBackend) TraceBlockByNumber(ctx context.Context, number rpc.BlockNumber, config *TraceConfig) ([]*txTraceResult, error) { + ret := m.Mock.MethodCalled("TraceBlockByNumber", number, config) + return ret[0].([]*txTraceResult), *ret[1].(*error) +} + +func (m *mockHistoricalBackend) ExpectTraceBlockByNumber(number rpc.BlockNumber, config *TraceConfig, out []*txTraceResult, err error) { + m.Mock.On("TraceBlockByNumber", number, config).Once().Return(out, &err) +} + +func (m *mockHistoricalBackend) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) { + ret := m.Mock.MethodCalled("TraceTransaction", hash, config) + return ret[0], *ret[1].(*error) +} + +func (m *mockHistoricalBackend) ExpectTraceTransaction(hash common.Hash, config *TraceConfig, out interface{}, err error) { + jsonOut, _ := json.Marshal(out) + m.Mock.On("TraceTransaction", hash, config).Once().Return(json.RawMessage(jsonOut), &err) +} + +func newMockHistoricalBackend(t *testing.T, backend *mockHistoricalBackend) string { + s := rpc.NewServer() + err := node.RegisterApis([]rpc.API{ + { + Namespace: "debug", + Service: backend, + Public: true, + Authenticated: false, + }, + }, nil, s) + if err != nil { + t.Fatalf("error creating mock historical backend: %v", err) + } + + hdlr := node.NewHTTPHandlerStack(s, []string{"*"}, []string{"*"}, nil) + mux := http.NewServeMux() + mux.Handle("/", hdlr) + + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("error creating mock historical backend listener: %v", err) + } + + go func() { + httpS := &http.Server{Handler: mux} + httpS.Serve(listener) + + t.Cleanup(func() { + httpS.Shutdown(context.Background()) + }) + }() + + return fmt.Sprintf("http://%s", listener.Addr().String()) +} + type testBackend struct { chainConfig *params.ChainConfig engine consensus.Engine @@ -59,15 +124,28 @@ chain *core.BlockChain   refHook func() // Hook is invoked when the requested state is referenced relHook func() // Hook is invoked when the requested state is released + + historical *rpc.Client + mockHistorical *mockHistoricalBackend }   // testBackend creates a new test backend. OBS: After test is done, teardown must be // invoked in order to release associated resources. func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { + mock := new(mockHistoricalBackend) + historicalAddr := newMockHistoricalBackend(t, mock) + + historicalClient, err := rpc.Dial(historicalAddr) + if err != nil { + t.Fatalf("error making historical client: %v", err) + } + backend := &testBackend{ - chainConfig: gspec.Config, - engine: ethash.NewFaker(), - chaindb: rawdb.NewMemoryDatabase(), + chainConfig: gspec.Config, + engine: ethash.NewFaker(), + chaindb: rawdb.NewMemoryDatabase(), + historical: historicalClient, + mockHistorical: mock, } // Generate blocks for testing _, blocks, _ := core.GenerateChainWithGenesis(gspec, backend.engine, n, generator) @@ -172,7 +250,7 @@ signer := types.MakeSigner(b.chainConfig, block.Number(), block.Time()) for idx, tx := range block.Transactions() { msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) txContext := core.NewEVMTxContext(msg) - context := core.NewEVMBlockContext(block.Header(), b.chain, nil) + context := core.NewEVMBlockContext(block.Header(), b.chain, nil, b.chainConfig, statedb) if idx == txIndex { return msg, context, statedb, release, nil } @@ -183,6 +261,10 @@ } statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) } return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) +} + +func (b *testBackend) HistoricalRPCService() *rpc.Client { + return b.historical }   func TestTraceCall(t *testing.T) { @@ -450,11 +532,58 @@ StructLogs: []logger.StructLogRes{}, }) { t.Error("Transaction tracing result is different") } +} +func TestTraceTransactionHistorical(t *testing.T) { + t.Parallel()   - // Test non-existent transaction - _, err = api.TraceTransaction(context.Background(), common.Hash{42}, nil) - if !errors.Is(err, errTxNotFound) { - t.Fatalf("want %v, have %v", errTxNotFound, err) + // Initialize test accounts + accounts := newAccounts(2) + genesis := &core.Genesis{ + Config: params.OptimismTestConfig, + Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + }, + } + target := common.Hash{} + signer := types.HomesteadSigner{} + backend := newTestBackend(t, 1, genesis, func(i int, b *core.BlockGen) { + // Transfer from account[0] to account[1] + // value: 1000 wei + // fee: 0 wei + tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[0].key) + b.AddTx(tx) + target = tx.Hash() + }) + defer backend.mockHistorical.AssertExpectations(t) + defer backend.chain.Stop() + backend.mockHistorical.ExpectTraceTransaction( + target, + nil, + logger.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []logger.StructLogRes{}, + }, + nil) + api := NewAPI(backend) + result, err := api.TraceTransaction(context.Background(), target, nil) + if err != nil { + t.Errorf("Failed to trace transaction %v", err) + } + var have *logger.ExecutionResult + spew.Dump(result) + if err := json.Unmarshal(result.(json.RawMessage), &have); err != nil { + t.Errorf("failed to unmarshal result %v", err) + } + if !reflect.DeepEqual(have, &logger.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []logger.StructLogRes{}, + }) { + t.Error("Transaction tracing result is different") } }   @@ -545,6 +674,59 @@ want := tc.want if string(have) != want { t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, string(have), want) } + } +} + +func TestTraceBlockHistorical(t *testing.T) { + t.Parallel() + + // Initialize test accounts + accounts := newAccounts(3) + genesis := &core.Genesis{ + Config: params.OptimismTestConfig, + Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + accounts[2].addr: {Balance: big.NewInt(params.Ether)}, + }, + } + genBlocks := 10 + signer := types.HomesteadSigner{} + backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + // Transfer from account[0] to account[1] + // value: 1000 wei + // fee: 0 wei + tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[0].key) + b.AddTx(tx) + }) + defer backend.mockHistorical.AssertExpectations(t) + defer backend.chain.Stop() + api := NewAPI(backend) + + var expectErr error + var config *TraceConfig + blockNumber := rpc.BlockNumber(3) + want := `[{"txHash":"0x0000000000000000000000000000000000000000000000000000000000000000","result":{"failed":false,"gas":21000,"returnValue":"","structLogs":[]}}]` + var ret []*txTraceResult + _ = json.Unmarshal([]byte(want), &ret) + + backend.mockHistorical.ExpectTraceBlockByNumber(blockNumber, config, ret, nil) + + result, err := api.TraceBlockByNumber(context.Background(), blockNumber, config) + if expectErr != nil { + if err == nil { + t.Errorf("want error %v", expectErr) + } + if !reflect.DeepEqual(err, expectErr) { + t.Errorf("error mismatch, want %v, get %v", expectErr, err) + } + } + if err != nil { + t.Errorf("want no error, have %v", err) + } + have, _ := json.Marshal(result) + if string(have) != want { + t.Errorf("result mismatch, have\n%v\n, want\n%v\n", string(have), want) } }
diff --git go-ethereum/ethclient/ethclient_test.go op-geth/ethclient/ethclient_test.go index 0d2675f8d10d8d126c43b1063689bb08a561159f..dd643f0c18dbb3188e2ea672a1d4b13e207cb2f8 100644 --- go-ethereum/ethclient/ethclient_test.go +++ op-geth/ethclient/ethclient_test.go @@ -20,10 +20,18 @@ import ( "bytes" "context" "errors" + "fmt" "math/big" + "net" + "net/http" "reflect" "testing" "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/beacon" + "github.com/ethereum/go-ethereum/internal/ethapi"   "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" @@ -193,6 +201,21 @@ Timestamp: 9000, BaseFee: big.NewInt(params.InitialBaseFee), }   +var genesisForHistorical = &core.Genesis{ + Config: params.OptimismTestConfig, + Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance}}, + ExtraData: []byte("test genesis"), + Timestamp: 9000, + BaseFee: big.NewInt(params.InitialBaseFee), +} + +var depositTx = types.NewTx(&types.DepositTx{ + Value: big.NewInt(12), + Gas: params.TxGas + 2000, + To: &common.Address{2}, + Data: make([]byte, 500), +}) + var testTx1 = types.MustSignNewTx(testKey, types.LatestSigner(genesis.Config), &types.LegacyTx{ Nonce: 0, Value: big.NewInt(12), @@ -209,9 +232,77 @@ Gas: params.TxGas, To: &common.Address{2}, })   -func newTestBackend(t *testing.T) (*node.Node, []*types.Block) { - // Generate test chain. - blocks := generateTestChain() +type mockHistoricalBackend struct{} + +func (m *mockHistoricalBackend) Call(ctx context.Context, args ethapi.TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *ethapi.StateOverride) (hexutil.Bytes, error) { + num, ok := blockNrOrHash.Number() + if ok && num == 1 { + return hexutil.Bytes("test"), nil + } + return nil, ethereum.NotFound +} + +func (m *mockHistoricalBackend) EstimateGas(ctx context.Context, args ethapi.TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) { + num, ok := blockNrOrHash.Number() + if ok && num == 1 { + return hexutil.Uint64(12345), nil + } + return 0, ethereum.NotFound +} + +func newMockHistoricalBackend(t *testing.T) string { + s := rpc.NewServer() + err := node.RegisterApis([]rpc.API{ + { + Namespace: "eth", + Service: new(mockHistoricalBackend), + Public: true, + Authenticated: false, + }, + }, nil, s) + if err != nil { + t.Fatalf("error creating mock historical backend: %v", err) + } + + hdlr := node.NewHTTPHandlerStack(s, []string{"*"}, []string{"*"}, nil) + mux := http.NewServeMux() + mux.Handle("/", hdlr) + + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("error creating mock historical backend listener: %v", err) + } + + go func() { + httpS := &http.Server{Handler: mux} + httpS.Serve(listener) + + t.Cleanup(func() { + httpS.Shutdown(context.Background()) + }) + }() + + return fmt.Sprintf("http://%s", listener.Addr().String()) +} + +func newTestBackend(t *testing.T, enableHistoricalState bool) (*node.Node, []*types.Block) { + histAddr := newMockHistoricalBackend(t) + + var consensusEngine consensus.Engine + var actualGenesis *core.Genesis + var chainLength int + if enableHistoricalState { + actualGenesis = genesisForHistorical + consensusEngine = beacon.New(ethash.NewFaker()) + chainLength = 10 + } else { + actualGenesis = genesis + consensusEngine = ethash.NewFaker() + chainLength = 2 + } + + // Generate test chain + blocks := generateTestChain(consensusEngine, actualGenesis, chainLength)   // Create node n, err := node.New(&node.Config{}) @@ -219,11 +310,18 @@ if err != nil { t.Fatalf("can't create new node: %v", err) } // Create Ethereum Service - config := &ethconfig.Config{Genesis: genesis} + config := &ethconfig.Config{Genesis: actualGenesis} + if enableHistoricalState { + config.RollupHistoricalRPC = histAddr + config.RollupHistoricalRPCTimeout = time.Second * 5 + } ethservice, err := eth.New(n, config) if err != nil { t.Fatalf("can't create new ethereum service: %v", err) } + if enableHistoricalState { // swap to the pre-bedrock consensus-engine that we used to generate the historical blocks + ethservice.BlockChain().Engine().(*beacon.Beacon).SwapInner(ethash.NewFaker()) + } // Import the test chain. if err := n.Start(); err != nil { t.Fatalf("can't start test node: %v", err) @@ -231,6 +329,7 @@ } if _, err := ethservice.BlockChain().InsertChain(blocks[1:]); err != nil { t.Fatalf("can't import test blocks: %v", err) } + // Ensure the tx indexing is fully generated for ; ; time.Sleep(time.Millisecond * 100) { progress, err := ethservice.BlockChain().TxIndexProgress() @@ -238,25 +337,44 @@ if err == nil && progress.Done() { break } } + + if enableHistoricalState { + // Now that we have a filled DB, swap the pre-Bedrock consensus to OpLegacy, + // which does not support re-processing of pre-bedrock data. + ethservice.Engine().(*beacon.Beacon).SwapInner(&beacon.OpLegacy{}) + } + return n, blocks }   -func generateTestChain() []*types.Block { +func generateTestChain(consensusEngine consensus.Engine, genesis *core.Genesis, length int) []*types.Block { generate := func(i int, g *core.BlockGen) { g.OffsetTime(5) g.SetExtra([]byte("test")) if i == 1 { // Test transactions are included in block #2. + if genesis.Config.Optimism != nil && genesis.Config.IsBedrock(big.NewInt(1)) { + g.AddTx(depositTx) + } g.AddTx(testTx1) g.AddTx(testTx2) } } - _, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 2, generate) + _, blocks, _ := core.GenerateChainWithGenesis(genesis, consensusEngine, length, generate) return append([]*types.Block{genesis.ToBlock()}, blocks...) }   +func TestEthClientHistoricalBackend(t *testing.T) { + backend, _ := newTestBackend(t, true) + client := backend.Attach() + defer backend.Close() + defer client.Close() + + testHistoricalRPC(t, client) +} + func TestEthClient(t *testing.T) { - backend, chain := newTestBackend(t) + backend, chain := newTestBackend(t, false) client := backend.Attach() defer backend.Close() defer client.Close() @@ -293,6 +411,9 @@ func(t *testing.T) { testAtFunctions(t, client) }, }, "TransactionSender": { func(t *testing.T) { testTransactionSender(t, client) }, + }, + "EstimateGas": { + func(t *testing.T) { testEstimateGas(t, client) }, }, }   @@ -729,6 +850,54 @@ t.Fatal(err) } if sender2 != testAddr { t.Fatal("wrong sender:", sender2) + } +} + +func testEstimateGas(t *testing.T, client *rpc.Client) { + ec := NewClient(client) + + // EstimateGas + msg := ethereum.CallMsg{ + From: testAddr, + To: &common.Address{}, + Gas: 21000, + Value: big.NewInt(1), + } + gas, err := ec.EstimateGas(context.Background(), msg) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if gas != 21000 { + t.Fatalf("unexpected gas price: %v", gas) + } +} + +func testHistoricalRPC(t *testing.T, client *rpc.Client) { + ec := NewClient(client) + + // Estimate Gas RPC + msg := ethereum.CallMsg{ + From: testAddr, + To: &common.Address{}, + Gas: 21000, + Value: big.NewInt(1), + } + var res hexutil.Uint64 + err := client.CallContext(context.Background(), &res, "eth_estimateGas", toCallArg(msg), rpc.BlockNumberOrHashWithNumber(1)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if res != 12345 { + t.Fatalf("invalid result: %d", res) + } + + // Call Contract RPC + histVal, err := ec.CallContract(context.Background(), msg, big.NewInt(1)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if string(histVal) != "test" { + t.Fatalf("expected %s to equal test", string(histVal)) } }
diff --git go-ethereum/internal/ethapi/transaction_args_test.go op-geth/internal/ethapi/transaction_args_test.go index 1b1634b250317604dd22291aca69143177f3fbe6..f98683baca5290c91bb1ad1c1405f0e688c006c5 100644 --- go-ethereum/internal/ethapi/transaction_args_test.go +++ op-geth/internal/ethapi/transaction_args_test.go @@ -403,4 +403,6 @@ func (b *backendMock) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { return nil }   -func (b *backendMock) Engine() consensus.Engine { return nil } +func (b *backendMock) Engine() consensus.Engine { return nil } +func (b *backendMock) HistoricalRPCService() *rpc.Client { return nil } +func (b *backendMock) Genesis() *types.Block { return nil }

Fix Debug API block marshaling to include deposits

diff --git go-ethereum/eth/api_debug.go op-geth/eth/api_debug.go index 05010a3969c61befb62d4bc2f413ddd72609851f..7d98e1f98f47860d3443f608c2921f1a00e6bbc5 100644 --- go-ethereum/eth/api_debug.go +++ op-geth/eth/api_debug.go @@ -119,7 +119,10 @@ blockRlp = err.Error() // Hacky, but hey, it works } else { blockRlp = fmt.Sprintf("%#x", rlpBytes) } - blockJSON = ethapi.RPCMarshalBlock(block, true, true, api.eth.APIBackend.ChainConfig()) + var err error + if blockJSON, err = ethapi.RPCMarshalBlock(ctx, block, true, true, api.eth.APIBackend.ChainConfig(), api.eth.APIBackend); err != nil { + blockJSON = map[string]interface{}{"error": err.Error()} + } results = append(results, &BadBlockArgs{ Hash: block.Hash(), RLP: blockRlp,

gasprice suggestion adjustments to accommodate faster L2 blocks and lower fees.

diff --git go-ethereum/eth/gasprice/gasprice.go op-geth/eth/gasprice/gasprice.go index b71964981145fa58b7894a2be4637d8a7e2a6c9a..e8ef6c7459ecd92841f902d54f9bf124930053df 100644 --- go-ethereum/eth/gasprice/gasprice.go +++ op-geth/eth/gasprice/gasprice.go @@ -37,6 +37,8 @@ var ( DefaultMaxPrice = big.NewInt(500 * params.GWei) DefaultIgnorePrice = big.NewInt(2 * params.Wei) + + DefaultMinSuggestedPriorityFee = big.NewInt(1e6 * params.Wei) // 0.001 gwei, for Optimism fee suggestion )   type Config struct { @@ -47,6 +49,8 @@ MaxBlockHistory uint64 Default *big.Int `toml:",omitempty"` MaxPrice *big.Int `toml:",omitempty"` IgnorePrice *big.Int `toml:",omitempty"` + + MinSuggestedPriorityFee *big.Int `toml:",omitempty"` // for Optimism fee suggestion }   // OracleBackend includes all necessary background APIs for oracle. @@ -74,6 +78,8 @@ checkBlocks, percentile int maxHeaderHistory, maxBlockHistory uint64   historyCache *lru.Cache[cacheKey, processedFees] + + minSuggestedPriorityFee *big.Int // for Optimism fee suggestion }   // NewOracle returns a new gasprice oracle which can recommend suitable @@ -128,7 +134,7 @@ lastHead = ev.Block.Hash() } }()   - return &Oracle{ + r := &Oracle{ backend: backend, lastPrice: params.Default, maxPrice: maxPrice, @@ -139,6 +145,17 @@ maxHeaderHistory: maxHeaderHistory, maxBlockHistory: maxBlockHistory, historyCache: cache, } + + if backend.ChainConfig().IsOptimism() { + r.minSuggestedPriorityFee = params.MinSuggestedPriorityFee + if r.minSuggestedPriorityFee == nil || r.minSuggestedPriorityFee.Int64() <= 0 { + r.minSuggestedPriorityFee = DefaultMinSuggestedPriorityFee + log.Warn("Sanitizing invalid optimism gasprice oracle min priority fee suggestion", + "provided", params.MinSuggestedPriorityFee, + "updated", r.minSuggestedPriorityFee) + } + } + return r }   // SuggestTipCap returns a tip cap so that newly created transaction can have a @@ -168,6 +185,11 @@ oracle.cacheLock.RUnlock() if headHash == lastHead { return new(big.Int).Set(lastPrice), nil } + + if oracle.backend.ChainConfig().IsOptimism() { + return oracle.SuggestOptimismPriorityFee(ctx, head, headHash), nil + } + var ( sent, exp int number = head.Number.Uint64() @@ -274,3 +296,9 @@ case result <- results{prices, nil}: case <-quit: } } + +type bigIntArray []*big.Int + +func (s bigIntArray) Len() int { return len(s) } +func (s bigIntArray) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 } +func (s bigIntArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
diff --git go-ethereum/eth/gasprice/optimism-gasprice.go op-geth/eth/gasprice/optimism-gasprice.go new file mode 100644 index 0000000000000000000000000000000000000000..71cd021f637f92e4b6d99ec5fc337d27edf547e5 --- /dev/null +++ op-geth/eth/gasprice/optimism-gasprice.go @@ -0,0 +1,110 @@ +package gasprice + +import ( + "context" + "math/big" + "sort" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" +) + +// SuggestOptimismPriorityFee returns a max priority fee value that can be used such that newly +// created transactions have a very high chance to be included in the following blocks, using a +// simplified and more predictable algorithm appropriate for chains like Optimism with a single +// known block builder. +// +// In the typical case, which results whenever the last block had room for more transactions, this +// function returns a minimum suggested priority fee value. Otherwise it returns the higher of this +// minimum suggestion or 10% over the median effective priority fee from the last block. +// +// Rationale: For a chain such as Optimism where there is a single block builder whose behavior is +// known, we know priority fee (as long as it is non-zero) has no impact on the probability for tx +// inclusion as long as there is capacity for it in the block. In this case then, there's no reason +// to return any value higher than some fixed minimum. Blocks typically reach capacity only under +// extreme events such as airdrops, meaning predicting whether the next block is going to be at +// capacity is difficult *except* in the case where we're already experiencing the increased demand +// from such an event. We therefore expect whether the last known block is at capacity to be one of +// the best predictors of whether the next block is likely to be at capacity. (An even better +// predictor is to look at the state of the transaction pool, but we want an algorithm that works +// even if the txpool is private or unavailable.) +// +// In the event the next block may be at capacity, the algorithm should allow for average fees to +// rise in order to reach a market price that appropriately reflects demand. We accomplish this by +// returning a suggestion that is a significant amount (10%) higher than the median effective +// priority fee from the previous block. +func (oracle *Oracle) SuggestOptimismPriorityFee(ctx context.Context, h *types.Header, headHash common.Hash) *big.Int { + suggestion := new(big.Int).Set(oracle.minSuggestedPriorityFee) + + // find the maximum gas used by any of the transactions in the block to use as the capacity + // margin + receipts, err := oracle.backend.GetReceipts(ctx, headHash) + if receipts == nil || err != nil { + log.Error("failed to get block receipts", "err", err) + return suggestion + } + var maxTxGasUsed uint64 + for i := range receipts { + gu := receipts[i].GasUsed + if gu > maxTxGasUsed { + maxTxGasUsed = gu + } + } + + // sanity check the max gas used value + if maxTxGasUsed > h.GasLimit { + log.Error("found tx consuming more gas than the block limit", "gas", maxTxGasUsed) + return suggestion + } + + if h.GasUsed+maxTxGasUsed > h.GasLimit { + // A block is "at capacity" if, when it is built, there is a pending tx in the txpool that + // could not be included because the block's gas limit would be exceeded. Since we don't + // have access to the txpool, we instead adopt the following heuristic: consider a block as + // at capacity if the total gas consumed by its transactions is within max-tx-gas-used of + // the block limit, where max-tx-gas-used is the most gas used by any one transaction + // within the block. This heuristic is almost perfectly accurate when transactions always + // consume the same amount of gas, but becomes less accurate as tx gas consumption begins + // to vary. The typical error is we assume a block is at capacity when it was not because + // max-tx-gas-used will in most cases over-estimate the "capacity margin". But it's better + // to err on the side of returning a higher-than-needed suggestion than a lower-than-needed + // one in order to satisfy our desire for high chance of inclusion and rising fees under + // high demand. + block, err := oracle.backend.BlockByNumber(ctx, rpc.BlockNumber(h.Number.Int64())) + if block == nil || err != nil { + log.Error("failed to get last block", "err", err) + return suggestion + } + baseFee := block.BaseFee() + txs := block.Transactions() + if len(txs) == 0 { + log.Error("block was at capacity but doesn't have transactions") + return suggestion + } + tips := bigIntArray(make([]*big.Int, len(txs))) + for i := range txs { + tips[i] = txs[i].EffectiveGasTipValue(baseFee) + } + sort.Sort(tips) + median := tips[len(tips)/2] + newSuggestion := new(big.Int).Add(median, new(big.Int).Div(median, big.NewInt(10))) + // use the new suggestion only if it's bigger than the minimum + if newSuggestion.Cmp(suggestion) > 0 { + suggestion = newSuggestion + } + } + + // the suggestion should be capped by oracle.maxPrice + if suggestion.Cmp(oracle.maxPrice) > 0 { + suggestion.Set(oracle.maxPrice) + } + + oracle.cacheLock.Lock() + oracle.lastHead = headHash + oracle.lastPrice = suggestion + oracle.cacheLock.Unlock() + + return new(big.Int).Set(suggestion) +}

Upstream test of broken behavior; in Optimism, a zero signature is valid (pre-bedrock for deposit-txs), and the chain ID formula on signature data must not be used, or an underflow happens.

diff --git go-ethereum/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending-fullTx.json op-geth/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending-fullTx.json index 1d524d6eced19249ab7f1d7beeb04863087e81c1..7087932286ea7facdae1e56caf15e618b5307e71 100644 --- go-ethereum/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending-fullTx.json +++ op-geth/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending-fullTx.json @@ -30,7 +30,7 @@ "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e", "transactionIndex": "0x0", "value": "0x6f", "type": "0x0", - "chainId": "0x7fffffffffffffee", + "chainId": "0x1", "v": "0x0", "r": "0x0", "s": "0x0"

Extend the tools available in geth to improve external testing and tooling.

diff --git go-ethereum/accounts/abi/bind/backends/simulated.go op-geth/accounts/abi/bind/backends/simulated.go index dfd929695286568a72fcb10a8c671e7b00b8cf22..38ac9d60d091204326f9fd8ec932fff4f4721bc9 100644 --- go-ethereum/accounts/abi/bind/backends/simulated.go +++ op-geth/accounts/abi/bind/backends/simulated.go @@ -21,6 +21,7 @@ "context"   "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethclient/simulated" )   @@ -50,3 +51,11 @@ Backend: b, Client: b.Client(), } } + +func NewSimulatedBackendFromConfig(cfg ethconfig.Config) *SimulatedBackend { + b := simulated.NewBackendFromConfig(cfg) + return &SimulatedBackend{ + Backend: b, + Client: b.Client(), + } +}
diff --git go-ethereum/ethclient/simulated/backend.go op-geth/ethclient/simulated/backend.go index 0c2a0b453c1b2dff518a1d2576c71d176943e9e6..1df0a73150e51d0ba3a1058f5dbe537e3096d44e 100644 --- go-ethereum/ethclient/simulated/backend.go +++ op-geth/ethclient/simulated/backend.go @@ -17,6 +17,7 @@ package simulated   import ( + "errors" "time"   "github.com/ethereum/go-ethereum" @@ -62,7 +63,7 @@ // Backend is a simulated blockchain. You can use it to test your contracts or // other code that interacts with the Ethereum chain. type Backend struct { - eth *eth.Ethereum + node *node.Node beacon *catalyst.SimulatedBeacon client simClient } @@ -102,6 +103,27 @@ } return sim }   +func NewBackendFromConfig(conf ethconfig.Config) *Backend { + // Setup the node object + nodeConf := node.DefaultConfig + nodeConf.DataDir = "" + nodeConf.P2P = p2p.Config{NoDiscovery: true} + stack, err := node.New(&nodeConf) + if err != nil { + // This should never happen, if it does, please open an issue + panic(err) + } + + conf.SyncMode = downloader.FullSync + conf.TxPool.NoLocals = true + sim, err := newWithNode(stack, &conf, 0) + if err != nil { + // This should never happen, if it does, please open an issue + panic(err) + } + return sim +} + // newWithNode sets up a simulated backend on an existing node. The provided node // must not be started and will be started by this method. func newWithNode(stack *node.Node, conf *eth.Config, blockPeriod uint64) (*Backend, error) { @@ -129,7 +151,7 @@ if err := beacon.Fork(backend.BlockChain().GetCanonicalHash(0)); err != nil { return nil, err } return &Backend{ - eth: backend, + node: stack, beacon: beacon, client: simClient{ethclient.NewClient(stack.Attach())}, }, nil @@ -142,12 +164,16 @@ if n.client.Client != nil { n.client.Close() n.client = simClient{} } + var err error if n.beacon != nil { - err := n.beacon.Stop() + err = n.beacon.Stop() n.beacon = nil - return err + } + if n.node != nil { + err = errors.Join(err, n.node.Close()) + n.node = nil } - return nil + return err }   // Commit seals a block and moves the chain forward to a new empty block.

Extend Ledger wallet support for newer devices on Macos

diff --git go-ethereum/accounts/usbwallet/hub.go op-geth/accounts/usbwallet/hub.go index e67942dbc1072ea952c204c1f30f3803dbf8e567..0682310867b2bd3c44788b158b8a4a716759c865 100644 --- go-ethereum/accounts/usbwallet/hub.go +++ op-geth/accounts/usbwallet/hub.go @@ -23,6 +23,8 @@ "sync" "sync/atomic" "time"   + "golang.org/x/exp/slices" + "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" @@ -48,7 +50,7 @@ type Hub struct { scheme string // Protocol scheme prefixing account and wallet URLs. vendorID uint16 // USB vendor identifier used for device discovery productIDs []uint16 // USB product identifiers used for device discovery - usageID uint16 // USB usage page identifier used for macOS device discovery + usageIDs []uint16 // USB usage page identifier used for macOS device discovery endpointID int // USB endpoint identifier used for non-macOS device discovery makeDriver func(log.Logger) driver // Factory method to construct a vendor specific driver   @@ -93,22 +95,22 @@ 0x1011, /* HID + WebUSB Ledger Nano S */ 0x4011, /* HID + WebUSB Ledger Nano X */ 0x5011, /* HID + WebUSB Ledger Nano S Plus */ 0x6011, /* HID + WebUSB Ledger Nano FTS */ - }, 0xffa0, 0, newLedgerDriver) + }, []uint16{0xffa0, 0}, 2, newLedgerDriver) }   // NewTrezorHubWithHID creates a new hardware wallet manager for Trezor devices. func NewTrezorHubWithHID() (*Hub, error) { - return newHub(TrezorScheme, 0x534c, []uint16{0x0001 /* Trezor HID */}, 0xff00, 0, newTrezorDriver) + return newHub(TrezorScheme, 0x534c, []uint16{0x0001 /* Trezor HID */}, []uint16{0xff00}, 0, newTrezorDriver) }   // NewTrezorHubWithWebUSB creates a new hardware wallet manager for Trezor devices with // firmware version > 1.8.0 func NewTrezorHubWithWebUSB() (*Hub, error) { - return newHub(TrezorScheme, 0x1209, []uint16{0x53c1 /* Trezor WebUSB */}, 0xffff /* No usage id on webusb, don't match unset (0) */, 0, newTrezorDriver) + return newHub(TrezorScheme, 0x1209, []uint16{0x53c1 /* Trezor WebUSB */}, []uint16{0xffff} /* No usage id on webusb, don't match unset (0) */, 0, newTrezorDriver) }   // newHub creates a new hardware wallet manager for generic USB devices. -func newHub(scheme string, vendorID uint16, productIDs []uint16, usageID uint16, endpointID int, makeDriver func(log.Logger) driver) (*Hub, error) { +func newHub(scheme string, vendorID uint16, productIDs []uint16, usageIDs []uint16, endpointID int, makeDriver func(log.Logger) driver) (*Hub, error) { if !usb.Supported() { return nil, errors.New("unsupported platform") } @@ -116,7 +118,7 @@ hub := &Hub{ scheme: scheme, vendorID: vendorID, productIDs: productIDs, - usageID: usageID, + usageIDs: usageIDs, endpointID: endpointID, makeDriver: makeDriver, quit: make(chan chan error), @@ -186,7 +188,9 @@ for _, info := range infos { for _, id := range hub.productIDs { // Windows and Macos use UsageID matching, Linux uses Interface matching - if info.ProductID == id && (info.UsagePage == hub.usageID || info.Interface == hub.endpointID) { + if info.ProductID == id && + info.Path != "" && + (slices.Contains(hub.usageIDs, info.UsagePage) || info.Interface == hub.endpointID) { devices = append(devices, info) break }

Additional or modified tests, not already captured by the above diff

diff --git go-ethereum/accounts/abi/packing_test.go op-geth/accounts/abi/packing_test.go index eae3b0df20562372b4e739ba0e72559c0d19bd96..9ed52a475eb900f5be20dfe9441ba25ac330defd 100644 --- go-ethereum/accounts/abi/packing_test.go +++ op-geth/accounts/abi/packing_test.go @@ -375,7 +375,7 @@ { def: `[{"type": "bytes"}]`, packed: "0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000020" + - "0100000000000000000000000000000000000000000000000000000000000000", + "0100000000000000000000000000000000000000000000000000000000000000", //nolint:all unpacked: common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), }, { @@ -476,7 +476,7 @@ { def: `[{"type": "int256[3]"}]`, packed: "0000000000000000000000000000000000000000000000000000000000000001" + "0000000000000000000000000000000000000000000000000000000000000002" + - "0000000000000000000000000000000000000000000000000000000000000003", + "0000000000000000000000000000000000000000000000000000000000000003", //nolint:all unpacked: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, }, // multi dimensional, if these pass, all types that don't require length prefix should pass @@ -536,7 +536,7 @@ { def: `[{"type": "uint8[][2]"}]`, packed: "0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000040" + - "0000000000000000000000000000000000000000000000000000000000000080" + + "0000000000000000000000000000000000000000000000000000000000000080" + //nolint:all "0000000000000000000000000000000000000000000000000000000000000001" + "0000000000000000000000000000000000000000000000000000000000000001" + "0000000000000000000000000000000000000000000000000000000000000001" + @@ -801,7 +801,7 @@ "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2 "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0] "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1] "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3 - "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0] + "0300000000000000000000000000000000000000000000000000000000000000" + //nolint:all // array[1][0] "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1] "0500000000000000000000000000000000000000000000000000000000000000", // array[1][2] }, @@ -845,7 +845,7 @@ E [2][3][32]byte }{1, big.NewInt(1), big.NewInt(-1), true, [2][3][32]byte{{{1}, {2}, {3}}, {{3}, {4}, {5}}}}, packed: "0000000000000000000000000000000000000000000000000000000000000001" + // struct[a] "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b] - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c] + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + //nolint:all // struct[c] "0000000000000000000000000000000000000000000000000000000000000001" + // struct[d] "0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0] "0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1]
diff --git go-ethereum/consensus/misc/create2deployer_test.go op-geth/consensus/misc/create2deployer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..fc0ef79fd7739d92431085d515481be8621e8fcb --- /dev/null +++ op-geth/consensus/misc/create2deployer_test.go @@ -0,0 +1,95 @@ +package misc + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" +) + +func TestEnsureCreate2Deployer(t *testing.T) { + canyonTime := uint64(1000) + var tests = []struct { + name string + override func(cfg *params.ChainConfig) + timestamp uint64 + codeExists bool + applied bool + }{ + { + name: "at hardfork", + timestamp: canyonTime, + applied: true, + }, + { + name: "another chain ID", + override: func(cfg *params.ChainConfig) { + cfg.ChainID = big.NewInt(params.OPMainnetChainID) + }, + timestamp: canyonTime, + applied: true, + }, + { + name: "code already exists", + timestamp: canyonTime, + codeExists: true, + applied: true, + }, + { + name: "pre canyon", + timestamp: canyonTime - 1, + applied: false, + }, + { + name: "post hardfork", + timestamp: canyonTime + 1, + applied: false, + }, + { + name: "canyon not configured", + override: func(cfg *params.ChainConfig) { + cfg.CanyonTime = nil + }, + timestamp: canyonTime, + applied: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := params.ChainConfig{ + ChainID: big.NewInt(params.BaseMainnetChainID), + Optimism: &params.OptimismConfig{}, + CanyonTime: &canyonTime, + } + if tt.override != nil { + tt.override(&cfg) + } + state := &stateDb{ + codeExists: tt.codeExists, + } + EnsureCreate2Deployer(&cfg, tt.timestamp, state) + assert.Equal(t, tt.applied, state.codeSet) + }) + } +} + +type stateDb struct { + vm.StateDB + codeExists bool + codeSet bool +} + +func (s *stateDb) GetCodeSize(_ common.Address) int { + if s.codeExists { + return 1 + } + return 0 +} + +func (s *stateDb) SetCode(_ common.Address, _ []byte) { + s.codeSet = true +}
diff --git go-ethereum/consensus/misc/eip1559/eip1559_test.go op-geth/consensus/misc/eip1559/eip1559_test.go index b5afdf0fe5e56ca6ced6031be233c6aafb17dcdd..d34da8fd8bbb21a6a649384435c3926a6735083d 100644 --- go-ethereum/consensus/misc/eip1559/eip1559_test.go +++ op-geth/consensus/misc/eip1559/eip1559_test.go @@ -55,6 +55,19 @@ config.LondonBlock = big.NewInt(5) return config }   +func opConfig() *params.ChainConfig { + config := copyConfig(params.TestChainConfig) + config.LondonBlock = big.NewInt(5) + ct := uint64(10) + config.CanyonTime = &ct + config.Optimism = &params.OptimismConfig{ + EIP1559Elasticity: 6, + EIP1559Denominator: 50, + EIP1559DenominatorCanyon: 250, + } + return config +} + // TestBlockGasLimits tests the gasLimit checks for blocks both across // the EIP-1559 boundary and post-1559 blocks func TestBlockGasLimits(t *testing.T) { @@ -124,7 +137,40 @@ GasLimit: test.parentGasLimit, GasUsed: test.parentGasUsed, BaseFee: big.NewInt(test.parentBaseFee), } - if have, want := CalcBaseFee(config(), parent), big.NewInt(test.expectedBaseFee); have.Cmp(want) != 0 { + if have, want := CalcBaseFee(config(), parent, 0), big.NewInt(test.expectedBaseFee); have.Cmp(want) != 0 { + t.Errorf("test %d: have %d want %d, ", i, have, want) + } + } +} + +// TestCalcBaseFeeOptimism assumes all blocks are 1559-blocks but tests the Canyon activation +func TestCalcBaseFeeOptimism(t *testing.T) { + tests := []struct { + parentBaseFee int64 + parentGasLimit uint64 + parentGasUsed uint64 + expectedBaseFee int64 + postCanyon bool + }{ + {params.InitialBaseFee, 30_000_000, 5_000_000, params.InitialBaseFee, false}, // usage == target + {params.InitialBaseFee, 30_000_000, 4_000_000, 996000000, false}, // usage below target + {params.InitialBaseFee, 30_000_000, 10_000_000, 1020000000, false}, // usage above target + {params.InitialBaseFee, 30_000_000, 5_000_000, params.InitialBaseFee, true}, // usage == target + {params.InitialBaseFee, 30_000_000, 4_000_000, 999200000, true}, // usage below target + {params.InitialBaseFee, 30_000_000, 10_000_000, 1004000000, true}, // usage above target + } + for i, test := range tests { + parent := &types.Header{ + Number: common.Big32, + GasLimit: test.parentGasLimit, + GasUsed: test.parentGasUsed, + BaseFee: big.NewInt(test.parentBaseFee), + Time: 6, + } + if test.postCanyon { + parent.Time = 8 + } + if have, want := CalcBaseFee(opConfig(), parent, parent.Time+2), big.NewInt(test.expectedBaseFee); have.Cmp(want) != 0 { t.Errorf("test %d: have %d want %d, ", i, have, want) } }
diff --git go-ethereum/core/rawdb/accessors_chain_test.go op-geth/core/rawdb/accessors_chain_test.go index a7ceb72998a115653130a19932f443dfda3f7cc4..3340c7f15c8200a11b2831abb90dd4a052b2b6f8 100644 --- go-ethereum/core/rawdb/accessors_chain_test.go +++ op-geth/core/rawdb/accessors_chain_test.go @@ -27,10 +27,12 @@ "reflect" "testing"   "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/require" "golang.org/x/crypto/sha3" )   @@ -347,6 +349,9 @@ // Create a live block since we need metadata to reconstruct the receipt tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil) tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)   + header := &types.Header{ + Number: big.NewInt(0), + } body := &types.Body{Transactions: types.Transactions{tx1, tx2}}   // Create the two receipts to manage afterwards @@ -378,13 +383,16 @@ receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2}) receipts := []*types.Receipt{receipt1, receipt2}   // Check that no receipt entries are in a pristine database - hash := common.BytesToHash([]byte{0x03, 0x14}) + hash := header.Hash() if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) != 0 { t.Fatalf("non existent receipts returned: %v", rs) } // Insert the body that corresponds to the receipts WriteBody(db, hash, 0, body)   + // Insert the header that corresponds to the receipts + WriteHeader(db, header) + // Insert the receipt slice into the database and check presence WriteReceipts(db, hash, 0, receipts) if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) == 0 { @@ -694,36 +702,56 @@ // Create a live block since we need metadata to reconstruct the receipt tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil) tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil) + tx3 := types.NewTransaction(3, common.HexToAddress("0x3"), big.NewInt(3), 3, big.NewInt(3), nil)   - body := &types.Body{Transactions: types.Transactions{tx1, tx2}} + body := &types.Body{Transactions: types.Transactions{tx1, tx2, tx3}}   - // Create the two receipts to manage afterwards - receipt1 := &types.Receipt{ + // Create the three receipts to manage afterwards + depositNonce := uint64(math.MaxUint64) + depositReceipt := types.Receipt{ Status: types.ReceiptStatusFailed, CumulativeGasUsed: 1, Logs: []*types.Log{ {Address: common.BytesToAddress([]byte{0x11})}, {Address: common.BytesToAddress([]byte{0x01, 0x11})}, }, - TxHash: tx1.Hash(), - ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), - GasUsed: 111111, + TxHash: tx1.Hash(), + ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), + GasUsed: 111111, + DepositNonce: &depositNonce, + DepositReceiptVersion: nil, } - receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1}) + depositReceipt.Bloom = types.CreateBloom(types.Receipts{&depositReceipt})   - receipt2 := &types.Receipt{ - PostState: common.Hash{2}.Bytes(), + receiptVersion := types.CanyonDepositReceiptVersion + versionedDepositReceipt := types.Receipt{ + Status: types.ReceiptStatusFailed, CumulativeGasUsed: 2, Logs: []*types.Log{ {Address: common.BytesToAddress([]byte{0x22})}, - {Address: common.BytesToAddress([]byte{0x02, 0x22})}, + {Address: common.BytesToAddress([]byte{0x01, 0x11})}, + }, + TxHash: tx2.Hash(), + ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}), + GasUsed: 222222, + DepositNonce: &depositNonce, + DepositReceiptVersion: &receiptVersion, + } + versionedDepositReceipt.Bloom = types.CreateBloom(types.Receipts{&versionedDepositReceipt}) + + receipt := types.Receipt{ + PostState: common.Hash{3}.Bytes(), + CumulativeGasUsed: 3, + Logs: []*types.Log{ + {Address: common.BytesToAddress([]byte{0x33})}, + {Address: common.BytesToAddress([]byte{0x03, 0x33})}, }, - TxHash: tx2.Hash(), - ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}), - GasUsed: 222222, + TxHash: tx3.Hash(), + ContractAddress: common.BytesToAddress([]byte{0x03, 0x33, 0x33}), + GasUsed: 333333, } - receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2}) - receipts := []*types.Receipt{receipt1, receipt2} + receipt.Bloom = types.CreateBloom(types.Receipts{&receipt}) + receipts := []*types.Receipt{&receipt, &depositReceipt, &versionedDepositReceipt}   hash := common.BytesToHash([]byte{0x03, 0x14}) // Check that no receipt entries are in a pristine database @@ -740,14 +768,13 @@ logs := ReadLogs(db, hash, 0) if len(logs) == 0 { t.Fatalf("no logs returned") } - if have, want := len(logs), 2; have != want { + if have, want := len(logs), 3; have != want { t.Fatalf("unexpected number of logs returned, have %d want %d", have, want) } - if have, want := len(logs[0]), 2; have != want { - t.Fatalf("unexpected number of logs[0] returned, have %d want %d", have, want) - } - if have, want := len(logs[1]), 2; have != want { - t.Fatalf("unexpected number of logs[1] returned, have %d want %d", have, want) + for i := range logs { + if have, want := len(logs[i]), 2; have != want { + t.Fatalf("unexpected number of logs[%d] returned, have %d want %d", i, have, want) + } }   for i, pr := range receipts { @@ -765,6 +792,36 @@ t.Fatalf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant)) } } } +} + +func TestParseLegacyReceiptRLP(t *testing.T) { + // Create a gasUsed value greater than a uint64 can represent + gasUsed := big.NewInt(0) + gasUsed = gasUsed.SetUint64(math.MaxUint64) + gasUsed = gasUsed.Add(gasUsed, big.NewInt(math.MaxInt64)) + sanityCheck := (&big.Int{}).SetUint64(gasUsed.Uint64()) + require.NotEqual(t, gasUsed, sanityCheck) + receipt := types.LegacyOptimismStoredReceiptRLP{ + CumulativeGasUsed: 1, + Logs: []*types.LogForStorage{ + {Address: common.BytesToAddress([]byte{0x11})}, + {Address: common.BytesToAddress([]byte{0x01, 0x11})}, + }, + L1GasUsed: gasUsed, + L1GasPrice: gasUsed, + L1Fee: gasUsed, + FeeScalar: "6", + } + + data, err := rlp.EncodeToBytes(receipt) + require.NoError(t, err) + var result storedReceiptRLP + err = rlp.DecodeBytes(data, &result) + require.NoError(t, err) + require.Equal(t, receipt.L1GasUsed, result.L1GasUsed) + require.Equal(t, receipt.L1GasPrice, result.L1GasPrice) + require.Equal(t, receipt.L1Fee, result.L1Fee) + require.Equal(t, receipt.FeeScalar, result.FeeScalar) }   func TestDeriveLogFields(t *testing.T) {
diff --git go-ethereum/core/state_processor_test.go op-geth/core/state_processor_test.go index 7718c0cde483dcc447ed867f60e469bec688208e..77efaede5886e8cc3e92e78be5739e784fd099c0 100644 --- go-ethereum/core/state_processor_test.go +++ op-geth/core/state_processor_test.go @@ -379,7 +379,7 @@ Time: parent.Time() + 10, UncleHash: types.EmptyUncleHash, } if config.IsLondon(header.Number) { - header.BaseFee = eip1559.CalcBaseFee(config, parent.Header()) + header.BaseFee = eip1559.CalcBaseFee(config, parent.Header(), header.Time) } if config.IsShanghai(header.Number, header.Time) { header.WithdrawalsHash = &types.EmptyWithdrawalsHash
diff --git go-ethereum/core/superchain_test.go op-geth/core/superchain_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4f7ea71ac5a773afb25ea8d67f0cade27f641cdc --- /dev/null +++ op-geth/core/superchain_test.go @@ -0,0 +1,53 @@ +package core + +import ( + "testing" + + "github.com/ethereum-optimism/superchain-registry/superchain" + + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/triedb" +) + +func TestOPStackGenesis(t *testing.T) { + for id := range superchain.OPChains { + _, err := LoadOPStackGenesis(id) + if err != nil { + t.Error(err) + } + } +} + +func TestRegistryChainConfigOverride(t *testing.T) { + db := rawdb.NewMemoryDatabase() + genesis, err := LoadOPStackGenesis(10) + if err != nil { + t.Fatal(err) + } + if genesis.Config.RegolithTime == nil { + t.Fatal("expected non-nil regolith time") + } + expectedRegolithTime := *genesis.Config.RegolithTime + genesis.Config.RegolithTime = nil + + // initialize the DB + tdb := triedb.NewDatabase(db, newDbConfig(rawdb.PathScheme)) + genesis.MustCommit(db, tdb) + bl := genesis.ToBlock() + rawdb.WriteCanonicalHash(db, bl.Hash(), 0) + rawdb.WriteBlock(db, bl) + + // create chain config, even with incomplete genesis input: the chain config should be corrected + chainConfig, _, err := SetupGenesisBlockWithOverride(db, tdb, genesis, &ChainOverrides{ + ApplySuperchainUpgrades: true, + }) + if err != nil { + t.Fatal(err) + } + // check if we have a corrected chain config + if chainConfig.RegolithTime == nil { + t.Fatal("expected regolith time to be corrected, but time is still nil") + } else if *chainConfig.RegolithTime != expectedRegolithTime { + t.Fatalf("expected regolith time to be %d, but got %d", expectedRegolithTime, *chainConfig.RegolithTime) + } +}
diff --git go-ethereum/core/types/receipt_test.go op-geth/core/types/receipt_test.go index a7b26444712fe8c669ea732fa423588a58497f32..1156f3e94f2d176d8eb1eb936ba3741527450a17 100644 --- go-ethereum/core/types/receipt_test.go +++ op-geth/core/types/receipt_test.go @@ -19,6 +19,7 @@ import ( "bytes" "encoding/json" + "fmt" "math" "math/big" "reflect" @@ -29,9 +30,25 @@ "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/holiman/uint256" "github.com/kylelemons/godebug/diff" + "github.com/stretchr/testify/require" )   var ( + bedrockGenesisTestConfig = func() *params.ChainConfig { + conf := *params.AllCliqueProtocolChanges // copy the config + conf.Clique = nil + conf.TerminalTotalDifficultyPassed = true + conf.BedrockBlock = big.NewInt(0) + conf.Optimism = &params.OptimismConfig{EIP1559Elasticity: 50, EIP1559Denominator: 10} + return &conf + }() + ecotoneTestConfig = func() *params.ChainConfig { + conf := *bedrockGenesisTestConfig // copy the config + time := uint64(0) + conf.EcotoneTime = &time + return &conf + }() + legacyReceipt = &Receipt{ Status: ReceiptStatusFailed, CumulativeGasUsed: 1, @@ -82,6 +99,63 @@ }, }, Type: DynamicFeeTxType, } + depositReceiptNoNonce = &Receipt{ + Status: ReceiptStatusFailed, + CumulativeGasUsed: 1, + Logs: []*Log{ + { + Address: common.BytesToAddress([]byte{0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + Data: []byte{0x01, 0x00, 0xff}, + }, + { + Address: common.BytesToAddress([]byte{0x01, 0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + Data: []byte{0x01, 0x00, 0xff}, + }, + }, + Type: DepositTxType, + } + nonce = uint64(1234) + depositReceiptWithNonce = &Receipt{ + Status: ReceiptStatusFailed, + CumulativeGasUsed: 1, + DepositNonce: &nonce, + DepositReceiptVersion: nil, + Logs: []*Log{ + { + Address: common.BytesToAddress([]byte{0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + Data: []byte{0x01, 0x00, 0xff}, + }, + { + Address: common.BytesToAddress([]byte{0x01, 0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + Data: []byte{0x01, 0x00, 0xff}, + }, + }, + Type: DepositTxType, + } + version = CanyonDepositReceiptVersion + depositReceiptWithNonceAndVersion = &Receipt{ + Status: ReceiptStatusFailed, + CumulativeGasUsed: 1, + DepositNonce: &nonce, + DepositReceiptVersion: &version, + Logs: []*Log{ + { + Address: common.BytesToAddress([]byte{0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + Data: []byte{0x01, 0x00, 0xff}, + }, + { + Address: common.BytesToAddress([]byte{0x01, 0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + Data: []byte{0x01, 0x00, 0xff}, + }, + }, + Type: DepositTxType, + }   // Create a few transactions to have receipts for to2 = common.HexToAddress("0x2") @@ -149,11 +223,23 @@ GasFeeCap: uint256.NewInt(1077), BlobFeeCap: uint256.NewInt(100077), BlobHashes: []common.Hash{{}, {}, {}}, }), + NewTx(&DepositTx{ + To: nil, // contract creation + Value: big.NewInt(6), + Gas: 50, + }), + NewTx(&DepositTx{ + To: nil, // contract creation + Value: big.NewInt(6), + Gas: 60, + }), } - - blockNumber = big.NewInt(1) - blockTime = uint64(2) - blockHash = common.BytesToHash([]byte{0x03, 0x14}) + depNonce1 = uint64(7) + depNonce2 = uint64(8) + blockNumber = big.NewInt(1) + blockTime = uint64(2) + blockHash = common.BytesToHash([]byte{0x03, 0x14}) + canyonDepositReceiptVersion = CanyonDepositReceiptVersion   // Create the corresponding receipts receipts = Receipts{ @@ -293,6 +379,78 @@ BlockHash: blockHash, BlockNumber: blockNumber, TransactionIndex: 6, }, + &Receipt{ + Type: DepositTxType, + PostState: common.Hash{5}.Bytes(), + CumulativeGasUsed: 50 + 28, + Logs: []*Log{ + { + Address: common.BytesToAddress([]byte{0x33}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[7].Hash(), + TxIndex: 7, + BlockHash: blockHash, + Index: 4, + }, + { + Address: common.BytesToAddress([]byte{0x03, 0x33}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[7].Hash(), + TxIndex: 7, + BlockHash: blockHash, + Index: 5, + }, + }, + TxHash: txs[7].Hash(), + ContractAddress: common.HexToAddress("0x3bb898b4bbe24f68a4e9be46cfe72d1787fd74f4"), + GasUsed: 50, + EffectiveGasPrice: big.NewInt(0), + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 7, + DepositNonce: &depNonce1, + DepositReceiptVersion: nil, + }, + &Receipt{ + Type: DepositTxType, + PostState: common.Hash{5}.Bytes(), + CumulativeGasUsed: 60 + 50 + 28, + Logs: []*Log{ + { + Address: common.BytesToAddress([]byte{0x33}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[8].Hash(), + TxIndex: 8, + BlockHash: blockHash, + Index: 6, + }, + { + Address: common.BytesToAddress([]byte{0x03, 0x33}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[8].Hash(), + TxIndex: 8, + BlockHash: blockHash, + Index: 7, + }, + }, + TxHash: txs[8].Hash(), + ContractAddress: common.HexToAddress("0x117814af22cb83d8ad6e8489e9477d28265bc105"), + GasUsed: 60, + EffectiveGasPrice: big.NewInt(0), + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 8, + DepositNonce: &depNonce2, + DepositReceiptVersion: &canyonDepositReceiptVersion, + }, } )   @@ -308,10 +466,10 @@ // Tests that receipt data can be correctly derived from the contextual infos func TestDeriveFields(t *testing.T) { // Re-derive receipts. - basefee := big.NewInt(1000) + baseFee := big.NewInt(1000) blobGasPrice := big.NewInt(920) derivedReceipts := clearComputedFieldsOnReceipts(receipts) - err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, basefee, blobGasPrice, txs) + err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, baseFee, blobGasPrice, txs) if err != nil { t.Fatalf("DeriveFields(...) = %v, want <nil>", err) } @@ -344,6 +502,18 @@ r := Receipt{} err = r.UnmarshalJSON(b) if err != nil { t.Fatal("error unmarshaling receipt from json:", err) + } + + // Make sure marshal/unmarshal doesn't affect receipt hash root computation by comparing + // the output of EncodeIndex + rsBefore := Receipts([]*Receipt{receipts[i]}) + rsAfter := Receipts([]*Receipt{&r}) + + encBefore, encAfter := bytes.Buffer{}, bytes.Buffer{} + rsBefore.EncodeIndex(0, &encBefore) + rsAfter.EncodeIndex(0, &encAfter) + if !bytes.Equal(encBefore.Bytes(), encAfter.Bytes()) { + t.Errorf("%v: EncodeIndex differs after JSON marshal/unmarshal", i) } } } @@ -527,3 +697,365 @@ l[i] = &cpy } return l } + +func getOptimismEcotoneTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1BlobGasPrice, l1GasUsed, l1Fee *big.Int, baseFeeScalar, blobBaseFeeScalar *uint64) ([]*Transaction, []*Receipt) { + // Create a few transactions to have receipts for + txs := Transactions{ + NewTx(&DepositTx{ + To: nil, // contract creation + Value: big.NewInt(6), + Gas: 50, + Data: l1AttributesPayload, + }), + emptyTx, + } + + // Create the corresponding receipts + receipts := Receipts{ + &Receipt{ + Type: DepositTxType, + PostState: common.Hash{5}.Bytes(), + CumulativeGasUsed: 50 + 15, + Logs: []*Log{ + { + Address: common.BytesToAddress([]byte{0x33}), + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[0].Hash(), + TxIndex: 0, + BlockHash: blockHash, + Index: 0, + }, + { + Address: common.BytesToAddress([]byte{0x03, 0x33}), + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[0].Hash(), + TxIndex: 0, + BlockHash: blockHash, + Index: 1, + }, + }, + TxHash: txs[0].Hash(), + ContractAddress: common.HexToAddress("0x3bb898b4bbe24f68a4e9be46cfe72d1787fd74f4"), + GasUsed: 65, + EffectiveGasPrice: big.NewInt(0), + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 0, + DepositNonce: &depNonce1, + }, + &Receipt{ + Type: LegacyTxType, + EffectiveGasPrice: big.NewInt(0), + PostState: common.Hash{4}.Bytes(), + CumulativeGasUsed: 10, + Logs: []*Log{}, + // derived fields: + TxHash: txs[1].Hash(), + GasUsed: 18446744073709551561, + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 1, + L1GasPrice: l1GasPrice, + L1BlobBaseFee: l1BlobGasPrice, + L1GasUsed: l1GasUsed, + L1Fee: l1Fee, + L1BaseFeeScalar: baseFeeScalar, + L1BlobBaseFeeScalar: blobBaseFeeScalar, + }, + } + return txs, receipts +} + +func getOptimismTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1GasUsed, l1Fee *big.Int, feeScalar *big.Float) ([]*Transaction, []*Receipt) { + // Create a few transactions to have receipts for + txs := Transactions{ + NewTx(&DepositTx{ + To: nil, // contract creation + Value: big.NewInt(6), + Gas: 50, + Data: l1AttributesPayload, + }), + emptyTx, + } + + // Create the corresponding receipts + receipts := Receipts{ + &Receipt{ + Type: DepositTxType, + PostState: common.Hash{5}.Bytes(), + CumulativeGasUsed: 50 + 15, + Logs: []*Log{ + { + Address: common.BytesToAddress([]byte{0x33}), + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[0].Hash(), + TxIndex: 0, + BlockHash: blockHash, + Index: 0, + }, + { + Address: common.BytesToAddress([]byte{0x03, 0x33}), + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[0].Hash(), + TxIndex: 0, + BlockHash: blockHash, + Index: 1, + }, + }, + TxHash: txs[0].Hash(), + ContractAddress: common.HexToAddress("0x3bb898b4bbe24f68a4e9be46cfe72d1787fd74f4"), + GasUsed: 65, + EffectiveGasPrice: big.NewInt(0), + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 0, + DepositNonce: &depNonce1, + }, + &Receipt{ + Type: LegacyTxType, + EffectiveGasPrice: big.NewInt(0), + PostState: common.Hash{4}.Bytes(), + CumulativeGasUsed: 10, + Logs: []*Log{}, + // derived fields: + TxHash: txs[1].Hash(), + GasUsed: 18446744073709551561, + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 1, + L1GasPrice: l1GasPrice, + L1GasUsed: l1GasUsed, + L1Fee: l1Fee, + FeeScalar: feeScalar, + }, + } + return txs, receipts +} + +func TestDeriveOptimismBedrockTxReceipts(t *testing.T) { + // Bedrock style l1 attributes with L1Scalar=7_000_000 (becomes 7 after division), L1Overhead=50, L1BaseFee=1000*1e6 + payload := common.Hex2Bytes("015d8eb900000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000003b9aca0000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000006acfc0015d8eb900000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000003b9aca0000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000006acfc0") + // the parameters we use below are defined in rollup_test.go + l1GasPrice := baseFee + l1GasUsed := bedrockGas + feeScalar := big.NewFloat(float64(scalar.Uint64() / 1e6)) + l1Fee := bedrockFee + txs, receipts := getOptimismTxReceipts(payload, l1GasPrice, l1GasUsed, l1Fee, feeScalar) + + // Re-derive receipts. + baseFee := big.NewInt(1000) + derivedReceipts := clearComputedFieldsOnReceipts(receipts) + err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + if err != nil { + t.Fatalf("DeriveFields(...) = %v, want <nil>", err) + } + checkBedrockReceipts(t, receipts, derivedReceipts) + + // Should get same result with the Ecotone config because it will assume this is "first ecotone block" + // if it sees the bedrock style L1 attributes. + err = Receipts(derivedReceipts).DeriveFields(ecotoneTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + if err != nil { + t.Fatalf("DeriveFields(...) = %v, want <nil>", err) + } + checkBedrockReceipts(t, receipts, derivedReceipts) +} + +func TestDeriveOptimismEcotoneTxReceipts(t *testing.T) { + // Ecotone style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6 + payload := common.Hex2Bytes("440a5e20000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2") + // the parameters we use below are defined in rollup_test.go + baseFeeScalarUint64 := baseFeeScalar.Uint64() + blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64() + txs, receipts := getOptimismEcotoneTxReceipts(payload, baseFee, blobBaseFee, ecotoneGas, ecotoneFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64) + + // Re-derive receipts. + baseFee := big.NewInt(1000) + derivedReceipts := clearComputedFieldsOnReceipts(receipts) + // Should error out if we try to process this with a pre-Ecotone config + err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + if err == nil { + t.Fatalf("expected error from deriving ecotone receipts with pre-ecotone config, got none") + } + + err = Receipts(derivedReceipts).DeriveFields(ecotoneTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + if err != nil { + t.Fatalf("DeriveFields(...) = %v, want <nil>", err) + } + diffReceipts(t, receipts, derivedReceipts) +} + +func diffReceipts(t *testing.T, receipts, derivedReceipts []*Receipt) { + // Check diff of receipts against derivedReceipts. + r1, err := json.MarshalIndent(receipts, "", " ") + if err != nil { + t.Fatal("error marshaling input receipts:", err) + } + r2, err := json.MarshalIndent(derivedReceipts, "", " ") + if err != nil { + t.Fatal("error marshaling derived receipts:", err) + } + d := diff.Diff(string(r1), string(r2)) + if d != "" { + t.Fatal("receipts differ:", d) + } +} + +func checkBedrockReceipts(t *testing.T, receipts, derivedReceipts []*Receipt) { + diffReceipts(t, receipts, derivedReceipts) + + // Check that we preserved the invariant: l1Fee = l1GasPrice * l1GasUsed * l1FeeScalar + // but with more difficult int math... + l2Rcpt := derivedReceipts[1] + l1GasCost := new(big.Int).Mul(l2Rcpt.L1GasPrice, l2Rcpt.L1GasUsed) + l1Fee := new(big.Float).Mul(new(big.Float).SetInt(l1GasCost), l2Rcpt.FeeScalar) + require.Equal(t, new(big.Float).SetInt(l2Rcpt.L1Fee), l1Fee) +} + +func TestBedrockDepositReceiptUnchanged(t *testing.T) { + expectedRlp := common.FromHex("7EF90156A003000000000000000000000000000000000000000000000000000000000000000AB9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000F0D7940000000000000000000000000000000000000033C001D7940000000000000000000000000000000000000333C002") + // Deposit receipt with no nonce + receipt := &Receipt{ + Type: DepositTxType, + PostState: common.Hash{3}.Bytes(), + CumulativeGasUsed: 10, + Logs: []*Log{ + {Address: common.BytesToAddress([]byte{0x33}), Data: []byte{1}, Topics: []common.Hash{}}, + {Address: common.BytesToAddress([]byte{0x03, 0x33}), Data: []byte{2}, Topics: []common.Hash{}}, + }, + TxHash: common.Hash{}, + ContractAddress: common.BytesToAddress([]byte{0x03, 0x33, 0x33}), + GasUsed: 4, + } + + rlp, err := receipt.MarshalBinary() + require.NoError(t, err) + require.Equal(t, expectedRlp, rlp) + + // Consensus values should be unchanged after reparsing + parsed := new(Receipt) + err = parsed.UnmarshalBinary(rlp) + require.NoError(t, err) + require.Equal(t, receipt.Status, parsed.Status) + require.Equal(t, receipt.CumulativeGasUsed, parsed.CumulativeGasUsed) + require.Equal(t, receipt.Bloom, parsed.Bloom) + require.EqualValues(t, receipt.Logs, parsed.Logs) + // And still shouldn't have a nonce + require.Nil(t, parsed.DepositNonce) + // ..or a deposit nonce + require.Nil(t, parsed.DepositReceiptVersion) +} + +// Regolith introduced an inconsistency in behavior between EncodeIndex and MarshalBinary for a +// deposit transaction receipt. TestReceiptEncodeIndexBugIsEnshrined makes sure this difference is +// preserved for backwards compatibility purposes, but also that there is no discrepancy for the +// post-Canyon encoding. +func TestReceiptEncodeIndexBugIsEnshrined(t *testing.T) { + // Check that a post-Regolith, pre-Canyon receipt produces the expected difference between + // EncodeIndex and MarshalBinary. + buf := new(bytes.Buffer) + receipts := Receipts{depositReceiptWithNonce} + receipts.EncodeIndex(0, buf) + indexBytes := buf.Bytes() + + regularBytes, _ := receipts[0].MarshalBinary() + + require.NotEqual(t, indexBytes, regularBytes) + + // Confirm the buggy encoding is as expected, which means it should encode as if it had no + // nonce specified (like that of a non-deposit receipt, whose encoding would differ only in the + // type byte). + buf.Reset() + tempReceipt := *depositReceiptWithNonce + tempReceipt.Type = eip1559Receipt.Type + buggyBytes, _ := tempReceipt.MarshalBinary() + + require.Equal(t, indexBytes[1:], buggyBytes[1:]) + + // check that the post-Canyon encoding has no differences between EncodeIndex and + // MarshalBinary. + buf.Reset() + receipts = Receipts{depositReceiptWithNonceAndVersion} + receipts.EncodeIndex(0, buf) + indexBytes = buf.Bytes() + + regularBytes, _ = receipts[0].MarshalBinary() + + require.Equal(t, indexBytes, regularBytes) + + // Check that bumping the nonce post-canyon changes the hash + bumpedReceipt := *depositReceiptWithNonceAndVersion + bumpedNonce := nonce + 1 + bumpedReceipt.DepositNonce = &bumpedNonce + bumpedBytes, _ := bumpedReceipt.MarshalBinary() + require.NotEqual(t, regularBytes, bumpedBytes) +} + +func TestRoundTripReceipt(t *testing.T) { + tests := []struct { + name string + rcpt *Receipt + }{ + {name: "Legacy", rcpt: legacyReceipt}, + {name: "AccessList", rcpt: accessListReceipt}, + {name: "EIP1559", rcpt: eip1559Receipt}, + {name: "DepositNoNonce", rcpt: depositReceiptNoNonce}, + {name: "DepositWithNonce", rcpt: depositReceiptWithNonce}, + {name: "DepositWithNonceAndVersion", rcpt: depositReceiptWithNonceAndVersion}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + data, err := test.rcpt.MarshalBinary() + require.NoError(t, err) + + d := &Receipt{} + err = d.UnmarshalBinary(data) + require.NoError(t, err) + require.Equal(t, test.rcpt, d) + require.Equal(t, test.rcpt.DepositNonce, d.DepositNonce) + require.Equal(t, test.rcpt.DepositReceiptVersion, d.DepositReceiptVersion) + }) + + t.Run(fmt.Sprintf("%sRejectExtraData", test.name), func(t *testing.T) { + data, err := test.rcpt.MarshalBinary() + require.NoError(t, err) + data = append(data, 1, 2, 3, 4) + d := &Receipt{} + err = d.UnmarshalBinary(data) + require.Error(t, err) + }) + } +} + +func TestRoundTripReceiptForStorage(t *testing.T) { + tests := []struct { + name string + rcpt *Receipt + }{ + {name: "Legacy", rcpt: legacyReceipt}, + {name: "AccessList", rcpt: accessListReceipt}, + {name: "EIP1559", rcpt: eip1559Receipt}, + {name: "DepositNoNonce", rcpt: depositReceiptNoNonce}, + {name: "DepositWithNonce", rcpt: depositReceiptWithNonce}, + {name: "DepositWithNonceAndVersion", rcpt: depositReceiptWithNonceAndVersion}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + data, err := rlp.EncodeToBytes((*ReceiptForStorage)(test.rcpt)) + require.NoError(t, err) + + d := &ReceiptForStorage{} + err = rlp.DecodeBytes(data, d) + require.NoError(t, err) + // Only check the stored fields - the others are derived later + require.Equal(t, test.rcpt.Status, d.Status) + require.Equal(t, test.rcpt.CumulativeGasUsed, d.CumulativeGasUsed) + require.Equal(t, test.rcpt.Logs, d.Logs) + require.Equal(t, test.rcpt.DepositNonce, d.DepositNonce) + require.Equal(t, test.rcpt.DepositReceiptVersion, d.DepositReceiptVersion) + }) + } +}
diff --git go-ethereum/core/types/rollup_cost_test.go op-geth/core/types/rollup_cost_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8d0f1e92185986c420aaeebce0bb46662f6e3ab0 --- /dev/null +++ op-geth/core/types/rollup_cost_test.go @@ -0,0 +1,407 @@ +package types + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" +) + +var ( + baseFee = big.NewInt(1000 * 1e6) + overhead = big.NewInt(50) + scalar = big.NewInt(7 * 1e6) + + blobBaseFee = big.NewInt(10 * 1e6) + baseFeeScalar = big.NewInt(2) + blobBaseFeeScalar = big.NewInt(3) + + // below are the expected cost func outcomes for the above parameter settings on the emptyTx + // which is defined in transaction_test.go + bedrockFee = big.NewInt(11326000000000) + regolithFee = big.NewInt(3710000000000) + ecotoneFee = big.NewInt(960900) // (480/16)*(2*16*1000 + 3*10) == 960900 + // the emptyTx is out of bounds for the linear regression so it uses the minimum size + fjordFee = big.NewInt(3203000) // 100_000_000 * (2 * 1000 * 1e6 * 16 + 3 * 10 * 1e6) / 1e12 + + bedrockGas = big.NewInt(1618) + regolithGas = big.NewInt(530) // 530 = 1618 - (16*68) + ecotoneGas = big.NewInt(480) + minimumFjordGas = big.NewInt(1600) // fastlz size of minimum txn, 100_000_000 * 16 / 1e6 +) + +func TestBedrockL1CostFunc(t *testing.T) { + costFunc0 := newL1CostFuncBedrockHelper(baseFee, overhead, scalar, false /*isRegolith*/) + costFunc1 := newL1CostFuncBedrockHelper(baseFee, overhead, scalar, true) + + c0, g0 := costFunc0(emptyTx.RollupCostData()) // pre-Regolith + c1, g1 := costFunc1(emptyTx.RollupCostData()) + + require.Equal(t, bedrockFee, c0) + require.Equal(t, bedrockGas, g0) // gas-used + + require.Equal(t, regolithFee, c1) + require.Equal(t, regolithGas, g1) +} + +func TestEcotoneL1CostFunc(t *testing.T) { + costFunc := newL1CostFuncEcotone(baseFee, blobBaseFee, baseFeeScalar, blobBaseFeeScalar) + + c0, g0 := costFunc(emptyTx.RollupCostData()) + + require.Equal(t, ecotoneGas, g0) + require.Equal(t, ecotoneFee, c0) +} + +func TestFjordL1CostFuncMinimumBounds(t *testing.T) { + costFunc := NewL1CostFuncFjord( + baseFee, + blobBaseFee, + baseFeeScalar, + blobBaseFeeScalar, + ) + + // Minimum size transactions: + // -42.5856 + 0.8365*110 = 49.4294 + // -42.5856 + 0.8365*150 = 82.8894 + // -42.5856 + 0.8365*170 = 99.6194 + for _, fastLzsize := range []uint64{100, 150, 170} { + c, g := costFunc(RollupCostData{ + FastLzSize: fastLzsize, + }) + + require.Equal(t, minimumFjordGas, g) + require.Equal(t, fjordFee, c) + } + + // Larger size transactions: + // -42.5856 + 0.8365*171 = 100.4559 + // -42.5856 + 0.8365*175 = 108.8019 + // -42.5856 + 0.8365*200 = 124.7144 + for _, fastLzsize := range []uint64{171, 175, 200} { + c, g := costFunc(RollupCostData{ + FastLzSize: fastLzsize, + }) + + require.Greater(t, g.Uint64(), minimumFjordGas.Uint64()) + require.Greater(t, c.Uint64(), fjordFee.Uint64()) + } +} + +// TestFjordL1CostSolidityParity tests that the cost function for the fjord upgrade matches a Solidity +// test to ensure the outputs are the same. +func TestFjordL1CostSolidityParity(t *testing.T) { + costFunc := NewL1CostFuncFjord( + big.NewInt(2*1e6), + big.NewInt(3*1e6), + big.NewInt(20), + big.NewInt(15), + ) + + c0, g0 := costFunc(RollupCostData{ + FastLzSize: 235, + }) + + require.Equal(t, big.NewInt(2463), g0) + require.Equal(t, big.NewInt(105484), c0) +} + +func TestExtractBedrockGasParams(t *testing.T) { + regolithTime := uint64(1) + config := &params.ChainConfig{ + Optimism: params.OptimismTestConfig.Optimism, + RegolithTime: &regolithTime, + } + + data := getBedrockL1Attributes(baseFee, overhead, scalar) + + gasparams, err := extractL1GasParams(config, regolithTime-1, data) + costFuncPreRegolith := gasparams.costFunc + require.NoError(t, err) + + // Function should continue to succeed even with extra data (that just gets ignored) since we + // have been testing the data size is at least the expected number of bytes instead of exactly + // the expected number of bytes. It's unclear if this flexibility was intentional, but since + // it's been in production we shouldn't change this behavior. + data = append(data, []byte{0xBE, 0xEE, 0xEE, 0xFF}...) // tack on garbage data + gasparams, err = extractL1GasParams(config, regolithTime, data) + costFuncRegolith := gasparams.costFunc + require.NoError(t, err) + + c, _ := costFuncPreRegolith(emptyTx.RollupCostData()) + require.Equal(t, bedrockFee, c) + + c, _ = costFuncRegolith(emptyTx.RollupCostData()) + require.Equal(t, regolithFee, c) + + // try to extract from data which has not enough params, should get error. + data = data[:len(data)-4-32] + _, err = extractL1GasParams(config, regolithTime, data) + require.Error(t, err) +} + +func TestExtractEcotoneGasParams(t *testing.T) { + zeroTime := uint64(0) + // create a config where ecotone upgrade is active + config := &params.ChainConfig{ + Optimism: params.OptimismTestConfig.Optimism, + RegolithTime: &zeroTime, + EcotoneTime: &zeroTime, + } + require.True(t, config.IsOptimismEcotone(zeroTime)) + + data := getEcotoneL1Attributes( + baseFee, + blobBaseFee, + baseFeeScalar, + blobBaseFeeScalar, + ) + + gasparams, err := extractL1GasParams(config, zeroTime, data) + require.NoError(t, err) + costFunc := gasparams.costFunc + + c, g := costFunc(emptyTx.RollupCostData()) + + require.Equal(t, ecotoneGas, g) + require.Equal(t, ecotoneFee, c) + + // make sure wrong amont of data results in error + data = append(data, 0x00) // tack on garbage byte + _, err = extractL1GasParamsPostEcotone(data) + require.Error(t, err) +} + +func TestExtractFjordGasParams(t *testing.T) { + zeroTime := uint64(0) + // create a config where fjord is active + config := &params.ChainConfig{ + Optimism: params.OptimismTestConfig.Optimism, + RegolithTime: &zeroTime, + EcotoneTime: &zeroTime, + FjordTime: &zeroTime, + } + require.True(t, config.IsOptimismFjord(zeroTime)) + + data := getEcotoneL1Attributes( + baseFee, + blobBaseFee, + baseFeeScalar, + blobBaseFeeScalar, + ) + + gasparams, err := extractL1GasParams(config, zeroTime, data) + require.NoError(t, err) + costFunc := gasparams.costFunc + + c, g := costFunc(emptyTx.RollupCostData()) + + require.Equal(t, minimumFjordGas, g) + require.Equal(t, fjordFee, c) +} + +// make sure the first block of the ecotone upgrade is properly detected, and invokes the bedrock +// cost function appropriately +func TestFirstBlockEcotoneGasParams(t *testing.T) { + zeroTime := uint64(0) + // create a config where ecotone upgrade is active + config := &params.ChainConfig{ + Optimism: params.OptimismTestConfig.Optimism, + RegolithTime: &zeroTime, + EcotoneTime: &zeroTime, + } + require.True(t, config.IsOptimismEcotone(0)) + + data := getBedrockL1Attributes(baseFee, overhead, scalar) + + gasparams, err := extractL1GasParams(config, zeroTime, data) + require.NoError(t, err) + oldCostFunc := gasparams.costFunc + c, g := oldCostFunc(emptyTx.RollupCostData()) + require.Equal(t, regolithGas, g) + require.Equal(t, regolithFee, c) +} + +func getBedrockL1Attributes(baseFee, overhead, scalar *big.Int) []byte { + uint256 := make([]byte, 32) + ignored := big.NewInt(1234) + data := []byte{} + data = append(data, BedrockL1AttributesSelector...) + data = append(data, ignored.FillBytes(uint256)...) // arg 0 + data = append(data, ignored.FillBytes(uint256)...) // arg 1 + data = append(data, baseFee.FillBytes(uint256)...) // arg 2 + data = append(data, ignored.FillBytes(uint256)...) // arg 3 + data = append(data, ignored.FillBytes(uint256)...) // arg 4 + data = append(data, ignored.FillBytes(uint256)...) // arg 5 + data = append(data, overhead.FillBytes(uint256)...) // arg 6 + data = append(data, scalar.FillBytes(uint256)...) // arg 7 + return data +} + +func getEcotoneL1Attributes(baseFee, blobBaseFee, baseFeeScalar, blobBaseFeeScalar *big.Int) []byte { + ignored := big.NewInt(1234) + data := []byte{} + uint256Slice := make([]byte, 32) + uint64Slice := make([]byte, 8) + uint32Slice := make([]byte, 4) + data = append(data, EcotoneL1AttributesSelector...) + data = append(data, baseFeeScalar.FillBytes(uint32Slice)...) + data = append(data, blobBaseFeeScalar.FillBytes(uint32Slice)...) + data = append(data, ignored.FillBytes(uint64Slice)...) + data = append(data, ignored.FillBytes(uint64Slice)...) + data = append(data, ignored.FillBytes(uint64Slice)...) + data = append(data, baseFee.FillBytes(uint256Slice)...) + data = append(data, blobBaseFee.FillBytes(uint256Slice)...) + data = append(data, ignored.FillBytes(uint256Slice)...) + data = append(data, ignored.FillBytes(uint256Slice)...) + return data +} + +type testStateGetter struct { + baseFee, blobBaseFee, overhead, scalar *big.Int + baseFeeScalar, blobBaseFeeScalar uint32 +} + +func (sg *testStateGetter) GetState(addr common.Address, slot common.Hash) common.Hash { + buf := common.Hash{} + switch slot { + case L1BaseFeeSlot: + sg.baseFee.FillBytes(buf[:]) + case OverheadSlot: + sg.overhead.FillBytes(buf[:]) + case ScalarSlot: + sg.scalar.FillBytes(buf[:]) + case L1BlobBaseFeeSlot: + sg.blobBaseFee.FillBytes(buf[:]) + case L1FeeScalarsSlot: + // fetch Ecotone fee sclars + offset := scalarSectionStart + binary.BigEndian.PutUint32(buf[offset:offset+4], sg.baseFeeScalar) + binary.BigEndian.PutUint32(buf[offset+4:offset+8], sg.blobBaseFeeScalar) + default: + panic("unknown slot") + } + return buf +} + +// TestNewL1CostFunc tests that the appropriate cost function is selected based on the +// configuration and statedb values. +func TestNewL1CostFunc(t *testing.T) { + time := uint64(10) + timeInFuture := uint64(20) + config := &params.ChainConfig{ + Optimism: params.OptimismTestConfig.Optimism, + } + statedb := &testStateGetter{ + baseFee: baseFee, + overhead: overhead, + scalar: scalar, + blobBaseFee: blobBaseFee, + baseFeeScalar: uint32(baseFeeScalar.Uint64()), + blobBaseFeeScalar: uint32(blobBaseFeeScalar.Uint64()), + } + + costFunc := NewL1CostFunc(config, statedb) + require.NotNil(t, costFunc) + + // empty cost data should result in nil fee + fee := costFunc(RollupCostData{}, time) + require.Nil(t, fee) + + // emptyTx fee w/ bedrock config should be the bedrock fee + fee = costFunc(emptyTx.RollupCostData(), time) + require.NotNil(t, fee) + require.Equal(t, bedrockFee, fee) + + // emptyTx fee w/ regolith config should be the regolith fee + config.RegolithTime = &time + costFunc = NewL1CostFunc(config, statedb) + require.NotNil(t, costFunc) + fee = costFunc(emptyTx.RollupCostData(), time) + require.NotNil(t, fee) + require.Equal(t, regolithFee, fee) + + // emptyTx fee w/ ecotone config should be the ecotone fee + config.EcotoneTime = &time + costFunc = NewL1CostFunc(config, statedb) + fee = costFunc(emptyTx.RollupCostData(), time) + require.NotNil(t, fee) + require.Equal(t, ecotoneFee, fee) + + // emptyTx fee w/ fjord config should be the fjord fee + config.FjordTime = &time + costFunc = NewL1CostFunc(config, statedb) + fee = costFunc(emptyTx.RollupCostData(), time) + require.NotNil(t, fee) + require.Equal(t, fjordFee, fee) + + // emptyTx fee w/ ecotone config, but simulate first ecotone block by blowing away the ecotone + // params. Should result in regolith fee. + config.FjordTime = &timeInFuture + statedb.baseFeeScalar = 0 + statedb.blobBaseFeeScalar = 0 + statedb.blobBaseFee = new(big.Int) + costFunc = NewL1CostFunc(config, statedb) + fee = costFunc(emptyTx.RollupCostData(), time) + require.NotNil(t, fee) + require.Equal(t, regolithFee, fee) + + // emptyTx fee w/ fjord config, but simulate first ecotone block by blowing away the ecotone + // params. Should result in regolith fee. + config.EcotoneTime = &time + config.FjordTime = &time + statedb.baseFeeScalar = 0 + statedb.blobBaseFeeScalar = 0 + statedb.blobBaseFee = new(big.Int) + costFunc = NewL1CostFunc(config, statedb) + fee = costFunc(emptyTx.RollupCostData(), time) + require.NotNil(t, fee) + require.Equal(t, regolithFee, fee) +} + +func TestFlzCompressLen(t *testing.T) { + var ( + emptyTxBytes, _ = emptyTx.MarshalBinary() + contractCallTxStr = "02f901550a758302df1483be21b88304743f94f8" + + "0e51afb613d764fa61751affd3313c190a86bb870151bd62fd12adb8" + + "e41ef24f3f0000000000000000000000000000000000000000000000" + + "00000000000000006e000000000000000000000000af88d065e77c8c" + + "c2239327c5edb3a432268e5831000000000000000000000000000000" + + "000000000000000000000000000003c1e50000000000000000000000" + + "00000000000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000a0000000" + + "00000000000000000000000000000000000000000000000000000000" + + "148c89ed219d02f1a5be012c689b4f5b731827bebe00000000000000" + + "0000000000c001a033fd89cb37c31b2cba46b6466e040c61fc9b2a36" + + "75a7f5f493ebd5ad77c497f8a07cdf65680e238392693019b4092f61" + + "0222e71b7cec06449cb922b93b6a12744e" + contractCallTx, _ = hex.DecodeString(contractCallTxStr) + ) + + testCases := []struct { + input []byte + expectedLen uint32 + }{ + // empty input + {[]byte{}, 0}, + // all 1 inputs + {bytes.Repeat([]byte{1}, 1000), 21}, + // all 0 inputs + {make([]byte, 1000), 21}, + // empty tx input + {emptyTxBytes, 31}, + // contract call tx: https://optimistic.etherscan.io/tx/0x8eb9dd4eb6d33f4dc25fb015919e4b1e9f7542f9b0322bf6622e268cd116b594 + {contractCallTx, 202}, + } + + for _, tc := range testCases { + output := FlzCompressLen(tc.input) + require.Equal(t, tc.expectedLen, output) + } +}
diff --git go-ethereum/core/types/transaction_marshalling_test.go op-geth/core/types/transaction_marshalling_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4ab93a539d19bbcffa0293738b2e4dcc3ebe7ad2 --- /dev/null +++ op-geth/core/types/transaction_marshalling_test.go @@ -0,0 +1,103 @@ +package types + +import ( + "encoding/json" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestTransactionUnmarshalJsonDeposit(t *testing.T) { + tx := NewTx(&DepositTx{ + SourceHash: common.HexToHash("0x1234"), + IsSystemTransaction: true, + Mint: big.NewInt(34), + }) + json, err := tx.MarshalJSON() + require.NoError(t, err, "Failed to marshal tx JSON") + + got := &Transaction{} + err = got.UnmarshalJSON(json) + require.NoError(t, err, "Failed to unmarshal tx JSON") + require.Equal(t, tx.Hash(), got.Hash()) +} + +func TestTransactionUnmarshalJSON(t *testing.T) { + tests := []struct { + name string + json string + expectedError string + }{ + { + name: "No gas", + json: `{"type":"0x7e","nonce":null,"gasPrice":null,"maxPriorityFeePerGas":null,"maxFeePerGas":null,"value":"0x1","input":"0x616263646566","v":null,"r":null,"s":null,"to":null,"sourceHash":"0x0000000000000000000000000000000000000000000000000000000000000000","from":"0x0000000000000000000000000000000000000001","isSystemTx":false,"hash":"0xa4341f3db4363b7ca269a8538bd027b2f8784f84454ca917668642d5f6dffdf9"}`, + expectedError: "missing required field 'gas'", + }, + { + name: "No value", + json: `{"type":"0x7e","nonce":null,"gas": "0x1234", "gasPrice":null,"maxPriorityFeePerGas":null,"maxFeePerGas":null,"input":"0x616263646566","v":null,"r":null,"s":null,"to":null,"sourceHash":"0x0000000000000000000000000000000000000000000000000000000000000000","from":"0x0000000000000000000000000000000000000001","isSystemTx":false,"hash":"0xa4341f3db4363b7ca269a8538bd027b2f8784f84454ca917668642d5f6dffdf9"}`, + expectedError: "missing required field 'value'", + }, + { + name: "No input", + json: `{"type":"0x7e","nonce":null,"gas": "0x1234", "gasPrice":null,"maxPriorityFeePerGas":null,"maxFeePerGas":null,"value":"0x1","v":null,"r":null,"s":null,"to":null,"sourceHash":"0x0000000000000000000000000000000000000000000000000000000000000000","from":"0x0000000000000000000000000000000000000001","isSystemTx":false,"hash":"0xa4341f3db4363b7ca269a8538bd027b2f8784f84454ca917668642d5f6dffdf9"}`, + expectedError: "missing required field 'input'", + }, + { + name: "No from", + json: `{"type":"0x7e","nonce":null,"gas": "0x1234", "gasPrice":null,"maxPriorityFeePerGas":null,"maxFeePerGas":null,"value":"0x1","input":"0x616263646566","v":null,"r":null,"s":null,"to":null,"sourceHash":"0x0000000000000000000000000000000000000000000000000000000000000000","isSystemTx":false,"hash":"0xa4341f3db4363b7ca269a8538bd027b2f8784f84454ca917668642d5f6dffdf9"}`, + expectedError: "missing required field 'from'", + }, + { + name: "No sourceHash", + json: `{"type":"0x7e","nonce":null,"gas": "0x1234", "gasPrice":null,"maxPriorityFeePerGas":null,"maxFeePerGas":null,"value":"0x1","input":"0x616263646566","v":null,"r":null,"s":null,"to":null,"from":"0x0000000000000000000000000000000000000001","isSystemTx":false,"hash":"0xa4341f3db4363b7ca269a8538bd027b2f8784f84454ca917668642d5f6dffdf9"}`, + expectedError: "missing required field 'sourceHash'", + }, + { + name: "No mint", + json: `{"type":"0x7e","nonce":null,"gas": "0x1234", "gasPrice":null,"maxPriorityFeePerGas":null,"maxFeePerGas":null,"value":"0x1","input":"0x616263646566","v":null,"r":null,"s":null,"to":null,"sourceHash":"0x0000000000000000000000000000000000000000000000000000000000000000","from":"0x0000000000000000000000000000000000000001","isSystemTx":false,"hash":"0xa4341f3db4363b7ca269a8538bd027b2f8784f84454ca917668642d5f6dffdf9"}`, + // Allowed + }, + { + name: "No IsSystemTx", + json: `{"type":"0x7e","nonce":null,"gas": "0x1234", "gasPrice":null,"maxPriorityFeePerGas":null,"maxFeePerGas":null,"value":"0x1","input":"0x616263646566","v":null,"r":null,"s":null,"to":null,"sourceHash":"0x0000000000000000000000000000000000000000000000000000000000000000","from":"0x0000000000000000000000000000000000000001","hash":"0xa4341f3db4363b7ca269a8538bd027b2f8784f84454ca917668642d5f6dffdf9"}`, + // Allowed + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var parsedTx = &Transaction{} + err := json.Unmarshal([]byte(test.json), &parsedTx) + if test.expectedError == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, test.expectedError) + } + }) + } + + tests = []struct { + name string + json string + expectedError string + }{ + { + name: "Valid deposit sender", + json: `{"type":"0x7e","nonce":"0x1","gas": "0x1234", "gasPrice":null,"maxPriorityFeePerGas":null,"maxFeePerGas":null,"value":"0x1","input":"0x616263646566","v":null,"r":null,"s":null,"to":null,"sourceHash":"0x0000000000000000000000000000000000000000000000000000000000000000","from":"0x0000000000000000000000000000000000000001","hash":"0xa4341f3db4363b7ca269a8538bd027b2f8784f84454ca917668642d5f6dffdf9"}`, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var parsedTx = &Transaction{} + err := json.Unmarshal([]byte(test.json), &parsedTx) + require.NoError(t, err) + + signer := NewLondonSigner(big.NewInt(123)) + sender, err := signer.Sender(parsedTx) + require.NoError(t, err) + require.Equal(t, common.HexToAddress("0x1"), sender) + }) + } +}
diff --git go-ethereum/core/vm/contracts_test.go op-geth/core/vm/contracts_test.go index fc30541d459604f6a47c9a0819d4704e8ad8ec92..38781545e1fdbc5df0998024a3c7573bb40c2c5a 100644 --- go-ethereum/core/vm/contracts_test.go +++ op-geth/core/vm/contracts_test.go @@ -58,6 +58,8 @@ common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, common.BytesToAddress([]byte{9}): &blake2F{}, common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{},   + common.BytesToAddress([]byte{0x01, 0x00}): &p256Verify{}, + common.BytesToAddress([]byte{0x0f, 0x0a}): &bls12381G1Add{}, common.BytesToAddress([]byte{0x0f, 0x0b}): &bls12381G1Mul{}, common.BytesToAddress([]byte{0x0f, 0x0c}): &bls12381G1MultiExp{}, @@ -395,3 +397,15 @@ NoBenchmark: false, } benchmarkPrecompiled("0f", testcase, b) } + +// Benchmarks the sample inputs from the P256VERIFY precompile. +func BenchmarkPrecompiledP256Verify(bench *testing.B) { + t := precompiledTest{ + Input: "4cee90eb86eaa050036147a12d49004b6b9c72bd725d39d4785011fe190f0b4da73bd4903f0ce3b639bbbf6e8e80d16931ff4bcf5993d58468e8fb19086e8cac36dbcd03009df8c59286b162af3bd7fcc0450c9aa81be5d10d312af6c66b1d604aebd3099c618202fcfe16ae7770b0c49ab5eadf74b754204a3bb6060e44eff37618b065f9832de4ca6ca971a7a1adc826d0f7c00181a5fb2ddf79ae00b4e10e", + Expected: "0000000000000000000000000000000000000000000000000000000000000001", + Name: "p256Verify", + } + benchmarkPrecompiled("100", t, bench) +} + +func TestPrecompiledP256Verify(t *testing.T) { testJson("p256Verify", "100", t) }
diff --git go-ethereum/core/vm/gas_table_test.go op-geth/core/vm/gas_table_test.go index 4a2545b6edfaf06795f7015e3b337eef73b4de83..1ee9d691dfd15382bfd7e6f887be449408156896 100644 --- go-ethereum/core/vm/gas_table_test.go +++ op-geth/core/vm/gas_table_test.go @@ -117,7 +117,7 @@ gasUsed uint64 minimumGas uint64 }{ // legacy create(0, 0, 0xc000) without 3860 used - {"0x61C00060006000f0" + "600052" + "60206000F3", false, 41237, 41237}, + {"0x61C00060006000f0" + "600052" + "60206000F3", false, 41237, 41237}, //nolint:all // legacy create(0, 0, 0xc000) _with_ 3860 {"0x61C00060006000f0" + "600052" + "60206000F3", true, 44309, 44309}, // create2(0, 0, 0xc001, 0) without 3860
diff --git go-ethereum/crypto/signify/signify_test.go op-geth/crypto/signify/signify_test.go index 9bac2c825f2c844bff70f7566bbe3d953a16f583..56195649dfb3b2ccfcc3e5239bae8bd33d28796a 100644 --- go-ethereum/crypto/signify/signify_test.go +++ op-geth/crypto/signify/signify_test.go @@ -48,7 +48,7 @@ if err = tmpFile.Close(); err != nil { t.Fatal(err) }   - err = SignFile(tmpFile.Name(), tmpFile.Name()+".sig", testSecKey, "clé", "croissants") + err = SignFile(tmpFile.Name(), tmpFile.Name()+".sig", testSecKey, "clé", "croissants") //nolint:all if err != nil { t.Fatal(err) }
diff --git go-ethereum/eth/catalyst/api_test.go op-geth/eth/catalyst/api_test.go index cc1258ca55bfcc38afcbeeef10add0727603734e..3e0f3df405745bb047e54d9a6211f500ff31a79c 100644 --- go-ethereum/eth/catalyst/api_test.go +++ op-geth/eth/catalyst/api_test.go @@ -28,6 +28,8 @@ "sync" "testing" "time"   + "github.com/stretchr/testify/require" + "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -198,12 +200,10 @@ HeadBlockHash: blocks[8].Hash(), SafeBlockHash: common.Hash{}, FinalizedBlockHash: common.Hash{}, } - _, err := api.ForkchoiceUpdatedV1(fcState, &blockParams) + resp, err := api.ForkchoiceUpdatedV1(fcState, &blockParams) if err != nil { t.Fatalf("error preparing payload, err=%v", err) } - // give the payload some time to be built - time.Sleep(100 * time.Millisecond) payloadID := (&miner.BuildPayloadArgs{ Parent: fcState.HeadBlockHash, Timestamp: blockParams.Timestamp, @@ -212,6 +212,8 @@ Random: blockParams.Random, BeaconRoot: blockParams.BeaconRoot, Version: engine.PayloadV1, }).Id() + require.Equal(t, payloadID, *resp.PayloadID) + require.NoError(t, waitForApiPayloadToBuild(api, *resp.PayloadID)) execData, err := api.GetPayloadV1(payloadID) if err != nil { t.Fatalf("error getting payload, err=%v", err) @@ -435,6 +437,11 @@ }   // startEthService creates a full node instance for testing. func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block) (*node.Node, *eth.Ethereum) { + ethcfg := &ethconfig.Config{Genesis: genesis, SyncMode: downloader.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256} + return startEthServiceWithConfigFn(t, blocks, ethcfg) +} + +func startEthServiceWithConfigFn(t *testing.T, blocks []*types.Block, ethcfg *ethconfig.Config) (*node.Node, *eth.Ethereum) { t.Helper()   n, err := node.New(&node.Config{ @@ -447,7 +454,7 @@ if err != nil { t.Fatal("can't create node:", err) }   - ethcfg := &ethconfig.Config{Genesis: genesis, SyncMode: downloader.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256} + // default eth config is moved to startEthService ethservice, err := eth.New(n, ethcfg) if err != nil { t.Fatal("can't create eth service:", err) @@ -628,8 +635,7 @@ } if resp.PayloadStatus.Status != engine.VALID { t.Fatalf("error preparing payload, invalid status: %v", resp.PayloadStatus.Status) } - // give the payload some time to be built - time.Sleep(50 * time.Millisecond) + require.NoError(t, waitForApiPayloadToBuild(api, *resp.PayloadID)) if payload, err = api.GetPayloadV1(*resp.PayloadID); err != nil { t.Fatalf("can't get payload: %v", err) } @@ -677,6 +683,7 @@ payload, err := api.eth.Miner().BuildPayload(args) if err != nil { return nil, err } + waitForPayloadToBuild(payload) return payload.ResolveFull().ExecutionPayload, nil }   @@ -915,6 +922,7 @@ payload, err := api.eth.Miner().BuildPayload(args) if err != nil { t.Fatalf("error preparing payload, err=%v", err) } + waitForPayloadToBuild(payload) data := *payload.Resolve().ExecutionPayload // We need to recompute the blockhash, since the miner computes a wrong (correct) blockhash txs, _ := decodeTransactions(data.Transactions) @@ -1076,6 +1084,8 @@ Withdrawals: blockParams.Withdrawals, BeaconRoot: blockParams.BeaconRoot, Version: engine.PayloadV2, }).Id() + require.Equal(t, payloadID, *resp.PayloadID) + require.NoError(t, waitForApiPayloadToBuild(api, payloadID)) execData, err := api.GetPayloadV2(payloadID) if err != nil { t.Fatalf("error getting payload, err=%v", err) @@ -1110,7 +1120,7 @@ }, }, } fcState.HeadBlockHash = execData.ExecutionPayload.BlockHash - _, err = api.ForkchoiceUpdatedV2(fcState, &blockParams) + resp, err = api.ForkchoiceUpdatedV2(fcState, &blockParams) if err != nil { t.Fatalf("error preparing payload, err=%v", err) } @@ -1125,6 +1135,8 @@ Withdrawals: blockParams.Withdrawals, BeaconRoot: blockParams.BeaconRoot, Version: engine.PayloadV2, }).Id() + require.Equal(t, payloadID, *resp.PayloadID) + require.NoError(t, waitForApiPayloadToBuild(api, payloadID)) execData, err = api.GetPayloadV2(payloadID) if err != nil { t.Fatalf("error getting payload, err=%v", err) @@ -1265,6 +1277,8 @@ Parent: fcState.HeadBlockHash, Timestamp: test.blockParams.Timestamp, FeeRecipient: test.blockParams.SuggestedFeeRecipient, Random: test.blockParams.Random, + BeaconRoot: test.blockParams.BeaconRoot, + Withdrawals: test.blockParams.Withdrawals, Version: payloadVersion, }).Id() execData, err := api.GetPayloadV2(payloadID) @@ -1622,6 +1636,8 @@ Withdrawals: blockParams.Withdrawals, BeaconRoot: blockParams.BeaconRoot, Version: engine.PayloadV3, }).Id() + require.Equal(t, payloadID, *resp.PayloadID) + require.NoError(t, waitForApiPayloadToBuild(api, *resp.PayloadID)) execData, err := api.GetPayloadV3(payloadID) if err != nil { t.Fatalf("error getting payload, err=%v", err) @@ -1659,6 +1675,14 @@ } if root := db.GetState(params.BeaconRootsStorageAddress, rootIdx); root != *blockParams.BeaconRoot { t.Fatalf("incorrect root stored: want %s, got %s", *blockParams.BeaconRoot, root) } +} + +func waitForPayloadToBuild(payload *miner.Payload) { + payload.WaitFull() +} + +func waitForApiPayloadToBuild(api *ConsensusAPI, id engine.PayloadID) error { + return api.localBlocks.waitFull(id) }   // TestGetClientVersion verifies the expected version info is returned.
diff --git go-ethereum/eth/catalyst/superchain_test.go op-geth/eth/catalyst/superchain_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4fd46a4c1257c38998878071ef618a71849e6d6e --- /dev/null +++ op-geth/eth/catalyst/superchain_test.go @@ -0,0 +1,120 @@ +package catalyst + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" +) + +func TestSignalSuperchainV1(t *testing.T) { + genesis, preMergeBlocks := generateMergeChain(2, false) + n, ethservice := startEthService(t, genesis, preMergeBlocks) + defer n.Close() + api := NewConsensusAPI(ethservice) + t.Run("matching", func(t *testing.T) { + out, err := api.SignalSuperchainV1(&SuperchainSignal{ + Recommended: params.OPStackSupport, + Required: params.OPStackSupport, + }) + if err != nil { + t.Fatalf("failed to process signal: %v", err) + } + if out != params.OPStackSupport { + t.Fatalf("expected %s but got %s", params.OPStackSupport, out) + } + }) + t.Run("null_arg", func(t *testing.T) { + out, err := api.SignalSuperchainV1(nil) + if err != nil { + t.Fatalf("failed to process signal: %v", err) + } + if out != params.OPStackSupport { + t.Fatalf("expected %s but got %s", params.OPStackSupport, out) + } + }) +} + +func TestSignalSuperchainV1Halt(t *testing.T) { + // Note: depending on the params.OPStackSupport some types of bumps are not possible with active prerelease + testCases := []struct { + cfg string + bump string + halt bool + }{ + {"none", "major", false}, + {"major", "major", true}, + {"minor", "major", true}, + {"patch", "major", true}, + {"major", "minor", false}, + {"minor", "minor", true}, + {"patch", "minor", true}, + {"major", "patch", false}, + {"minor", "patch", false}, + {"patch", "patch", true}, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.cfg+"_"+tc.bump, func(t *testing.T) { + genesis, preMergeBlocks := generateMergeChain(2, false) + ethcfg := &ethconfig.Config{Genesis: genesis, SyncMode: downloader.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256} + ethcfg.RollupHaltOnIncompatibleProtocolVersion = tc.cfg // opt-in to halting (or not) + n, ethservice := startEthServiceWithConfigFn(t, preMergeBlocks, ethcfg) + defer n.Close() // close at the end, regardless of any prior (failed) closing + api := NewConsensusAPI(ethservice) + _, build, major, minor, patch, preRelease := params.OPStackSupport.Parse() + if preRelease != 0 { // transform back from prerelease, so we can do a clean version bump + if patch != 0 { + patch -= 1 + } else if minor != 0 { + // can't patch-bump e.g. v3.1.0-1, the prerelease forces a minor bump: + // v3.0.999 is lower than the prerelease, v3.1.1-1 is a prerelease of v3.1.1. + if tc.bump == "patch" { + t.Skip() + } + minor -= 1 + } else if major != 0 { + if tc.bump == "minor" || tc.bump == "patch" { // can't minor-bump or patch-bump + t.Skip() + } + major -= 1 + } + preRelease = 0 + } + majorSignal, minorSignal, patchSignal := major, minor, patch + switch tc.bump { + case "major": + majorSignal += 1 + case "minor": + minorSignal += 1 + case "patch": + patchSignal += 1 + } + out, err := api.SignalSuperchainV1(&SuperchainSignal{ + Recommended: params.OPStackSupport, // required version change should be enough + Required: params.ProtocolVersionV0{Build: build, Major: majorSignal, Minor: minorSignal, Patch: patchSignal, PreRelease: preRelease}.Encode(), + }) + if err != nil { + t.Fatalf("failed to process signal: %v", err) + } + if out != params.OPStackSupport { + t.Fatalf("expected %s but got %s", params.OPStackSupport, out) + } + closeErr := n.Close() + if !tc.halt { + // assert no halt by closing, and not getting any error + if closeErr != nil { + t.Fatalf("expected not to have closed already, but just closed without error") + } + } else { + // assert halt by closing again, and seeing if we are not stopped already + if closeErr != node.ErrNodeStopped { + t.Fatalf("expected to have already closed and get a ErrNodeStopped error, but got %v", closeErr) + } + } + }) + } +}
diff --git go-ethereum/eth/downloader/downloader_test.go op-geth/eth/downloader/downloader_test.go index 2468e1a9809e247c1dd877a86e8fce37c2f08800..097888f0245fac34537ab755f5e9fbc35bfb14cf 100644 --- go-ethereum/eth/downloader/downloader_test.go +++ op-geth/eth/downloader/downloader_test.go @@ -81,7 +81,7 @@ freezer: freezer, chain: chain, peers: make(map[string]*downloadTesterPeer), } - tester.downloader = New(db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, success) + tester.downloader = New(db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, success, 0) return tester }
diff --git go-ethereum/eth/downloader/receiptreference_test.go op-geth/eth/downloader/receiptreference_test.go new file mode 100644 index 0000000000000000000000000000000000000000..85bc4e1aaa77aa18aafadf64bc41f0147486a455 --- /dev/null +++ op-geth/eth/downloader/receiptreference_test.go @@ -0,0 +1,108 @@ +package downloader + +import ( + "testing" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" +) + +// makeCorrection is a helper function to create a slice of receipts and a slice of corrected receipts +func makeCorrection(bn uint64, cid uint64, ns []uint64, ty []uint8) (types.Receipts, types.Receipts) { + receipts := make(types.Receipts, len(ns)) + correctedReceipts := make(types.Receipts, len(ns)) + transactions := make(types.Transactions, len(ns)) + for i := range ns { + receipts[i] = &types.Receipt{Type: ty[i], DepositNonce: &ns[i]} + correctedReceipts[i] = &types.Receipt{Type: ty[i], DepositNonce: &ns[i]} + transactions[i] = types.NewTx(&types.DepositTx{}) + } + + correctedReceipts = correctReceipts(correctedReceipts, transactions, bn, cid) + + return receipts, correctedReceipts +} + +func TestCorrectReceipts(t *testing.T) { + type testcase struct { + blockNum uint64 + chainID uint64 + nonces []uint64 + txTypes []uint8 + validate func(types.Receipts, types.Receipts) + } + + // Tests use the real reference data, so block numbers and chainIDs are selected for different test cases + testcases := []testcase{ + // Test case 1: No receipts + { + blockNum: 6825767, + chainID: 420, + nonces: []uint64{}, + txTypes: []uint8{}, + validate: func(receipts types.Receipts, correctedReceipts types.Receipts) { + assert.Empty(t, correctedReceipts) + }, + }, + // Test case 2: No deposits + { + blockNum: 6825767, + chainID: 420, + nonces: []uint64{1, 2, 3}, + txTypes: []uint8{1, 1, 1}, + validate: func(receipts types.Receipts, correctedReceipts types.Receipts) { + assert.Equal(t, receipts, correctedReceipts) + }, + }, + // Test case 3: all deposits with no correction + { + blockNum: 8835769, + chainID: 420, + nonces: []uint64{78756, 78757, 78758, 78759, 78760, 78761, 78762, 78763, 78764}, + txTypes: []uint8{126, 126, 126, 126, 126, 126, 126, 126, 126}, + validate: func(receipts types.Receipts, correctedReceipts types.Receipts) { + assert.Equal(t, receipts, correctedReceipts) + }, + }, + // Test case 4: all deposits with a correction + { + blockNum: 8835769, + chainID: 420, + nonces: []uint64{78756, 78757, 78758, 12345, 78760, 78761, 78762, 78763, 78764}, + txTypes: []uint8{126, 126, 126, 126, 126, 126, 126, 126, 126}, + validate: func(receipts types.Receipts, correctedReceipts types.Receipts) { + assert.NotEqual(t, receipts[3], correctedReceipts[3]) + for i := range receipts { + if i != 3 { + assert.Equal(t, receipts[i], correctedReceipts[i]) + } + } + }, + }, + // Test case 5: deposits with several corrections and non-deposits + { + blockNum: 8835769, + chainID: 420, + nonces: []uint64{0, 1, 2, 78759, 78760, 78761, 6, 78763, 78764, 9, 10, 11}, + txTypes: []uint8{126, 126, 126, 126, 126, 126, 126, 126, 126, 1, 1, 1}, + validate: func(receipts types.Receipts, correctedReceipts types.Receipts) { + // indexes 0, 1, 2, 6 were modified + // indexes 9, 10, 11 were added too, but they are not user deposits + assert.NotEqual(t, receipts[0], correctedReceipts[0]) + assert.NotEqual(t, receipts[1], correctedReceipts[1]) + assert.NotEqual(t, receipts[2], correctedReceipts[2]) + assert.NotEqual(t, receipts[6], correctedReceipts[6]) + for i := range receipts { + if i != 0 && i != 1 && i != 2 && i != 6 { + assert.Equal(t, receipts[i], correctedReceipts[i]) + } + } + }, + }, + } + + for _, tc := range testcases { + receipts, correctedReceipts := makeCorrection(tc.blockNum, tc.chainID, tc.nonces, tc.txTypes) + tc.validate(receipts, correctedReceipts) + } +}
diff --git go-ethereum/eth/gasprice/optimism-gasprice_test.go op-geth/eth/gasprice/optimism-gasprice_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dd0140839d91e45be16d06faae9b714ad53265fa --- /dev/null +++ op-geth/eth/gasprice/optimism-gasprice_test.go @@ -0,0 +1,142 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package gasprice + +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" +) + +const ( + blockGasLimit = params.TxGas * 3 +) + +type testTxData struct { + priorityFee int64 + gasLimit uint64 +} + +type opTestBackend struct { + block *types.Block + receipts []*types.Receipt +} + +func (b *opTestBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { + panic("not implemented") +} + +func (b *opTestBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { + return b.block, nil +} + +func (b *opTestBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { + return b.receipts, nil +} + +func (b *opTestBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { + panic("not implemented") +} + +func (b *opTestBackend) ChainConfig() *params.ChainConfig { + return params.OptimismTestConfig +} + +func (b *opTestBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { + return nil +} + +func newOpTestBackend(t *testing.T, txs []testTxData) *opTestBackend { + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + signer = types.LatestSigner(params.TestChainConfig) + ) + // only the most recent block is considered for optimism priority fee suggestions, so this is + // where we add the test transactions + ts := []*types.Transaction{} + rs := []*types.Receipt{} + header := types.Header{} + header.GasLimit = blockGasLimit + var nonce uint64 + for _, tx := range txs { + txdata := &types.DynamicFeeTx{ + ChainID: params.TestChainConfig.ChainID, + Nonce: nonce, + To: &common.Address{}, + Gas: params.TxGas, + GasFeeCap: big.NewInt(100 * params.GWei), + GasTipCap: big.NewInt(tx.priorityFee), + Data: []byte{}, + } + t := types.MustSignNewTx(key, signer, txdata) + ts = append(ts, t) + r := types.Receipt{} + r.GasUsed = tx.gasLimit + header.GasUsed += r.GasUsed + rs = append(rs, &r) + nonce++ + } + hasher := trie.NewStackTrie(nil) + b := types.NewBlock(&header, ts, nil, nil, hasher) + return &opTestBackend{block: b, receipts: rs} +} + +func TestSuggestOptimismPriorityFee(t *testing.T) { + minSuggestion := new(big.Int).SetUint64(1e8 * params.Wei) + var cases = []struct { + txdata []testTxData + want *big.Int + }{ + { + // block well under capacity, expect min priority fee suggestion + txdata: []testTxData{testTxData{params.GWei, 21000}}, + want: minSuggestion, + }, + { + // 2 txs, still under capacity, expect min priority fee suggestion + txdata: []testTxData{testTxData{params.GWei, 21000}, testTxData{params.GWei, 21000}}, + want: minSuggestion, + }, + { + // 2 txs w same priority fee (1 gwei), but second tx puts it right over capacity + txdata: []testTxData{testTxData{params.GWei, 21000}, testTxData{params.GWei, 21001}}, + want: big.NewInt(1100000000), // 10 percent over 1 gwei, the median + }, + { + // 3 txs, full block. return 10% over the median tx (10 gwei * 10% == 11 gwei) + txdata: []testTxData{testTxData{10 * params.GWei, 21000}, testTxData{1 * params.GWei, 21000}, testTxData{100 * params.GWei, 21000}}, + want: big.NewInt(11 * params.GWei), + }, + } + for i, c := range cases { + backend := newOpTestBackend(t, c.txdata) + oracle := NewOracle(backend, Config{MinSuggestedPriorityFee: minSuggestion}) + got := oracle.SuggestOptimismPriorityFee(context.Background(), backend.block.Header(), backend.block.Hash()) + if got.Cmp(c.want) != 0 { + t.Errorf("Gas price mismatch for test case %d: want %d, got %d", i, c.want, got) + } + } +}
diff --git go-ethereum/eth/tracers/internal/tracetest/flat_calltrace_test.go op-geth/eth/tracers/internal/tracetest/flat_calltrace_test.go index abee48891767d5434f65e6a1d64ed22e031a1153..b4e596ead16f1be145b35aa74d4df387cbf2673d 100644 --- go-ethereum/eth/tracers/internal/tracetest/flat_calltrace_test.go +++ op-geth/eth/tracers/internal/tracetest/flat_calltrace_test.go @@ -16,7 +16,6 @@ "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/tests"   // Force-load the native, to trigger registration @@ -82,7 +81,7 @@ return fmt.Errorf("failed to parse testcase: %v", err) } // Configure a blockchain with the given prestate tx := new(types.Transaction) - if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil { + if err := tx.UnmarshalBinary(common.FromHex(test.Input)); err != nil { return fmt.Errorf("failed to parse testcase input: %v", err) } signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), uint64(test.Context.Time))
diff --git go-ethereum/internal/ethapi/api_test.go op-geth/internal/ethapi/api_test.go index a6f7405eb3637aa180406ffdac374e62c0b12c28..313ce1c10e8662808eca54db4748d85157bda782 100644 --- go-ethereum/internal/ethapi/api_test.go +++ op-geth/internal/ethapi/api_test.go @@ -31,6 +31,10 @@ "reflect" "testing" "time"   + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" @@ -52,11 +56,166 @@ "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/internal/blocktest" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" - "github.com/holiman/uint256" - "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" )   +func TestNewRPCTransactionDepositTx(t *testing.T) { + tx := types.NewTx(&types.DepositTx{ + SourceHash: common.HexToHash("0x1234"), + IsSystemTransaction: true, + Mint: big.NewInt(34), + }) + nonce := uint64(7) + receipt := &types.Receipt{ + DepositNonce: &nonce, + } + got := newRPCTransaction(tx, common.Hash{}, uint64(12), uint64(1234), uint64(1), big.NewInt(0), &params.ChainConfig{}, receipt) + // Should provide zero values for unused fields that are required in other transactions + require.Equal(t, got.GasPrice, (*hexutil.Big)(big.NewInt(0)), "newRPCTransaction().GasPrice = %v, want 0x0", got.GasPrice) + require.Equal(t, got.V, (*hexutil.Big)(big.NewInt(0)), "newRPCTransaction().V = %v, want 0x0", got.V) + require.Equal(t, got.R, (*hexutil.Big)(big.NewInt(0)), "newRPCTransaction().R = %v, want 0x0", got.R) + require.Equal(t, got.S, (*hexutil.Big)(big.NewInt(0)), "newRPCTransaction().S = %v, want 0x0", got.S) + + // Should include deposit tx specific fields + require.Equal(t, *got.SourceHash, tx.SourceHash(), "newRPCTransaction().SourceHash = %v, want %v", got.SourceHash, tx.SourceHash()) + require.Equal(t, *got.IsSystemTx, tx.IsSystemTx(), "newRPCTransaction().IsSystemTx = %v, want %v", got.IsSystemTx, tx.IsSystemTx()) + require.Equal(t, got.Mint, (*hexutil.Big)(tx.Mint()), "newRPCTransaction().Mint = %v, want %v", got.Mint, tx.Mint()) + require.Equal(t, got.Nonce, (hexutil.Uint64)(nonce), "newRPCTransaction().Nonce = %v, want %v", got.Nonce, nonce) +} + +func TestRPCTransactionDepositTxWithVersion(t *testing.T) { + tx := types.NewTx(&types.DepositTx{ + SourceHash: common.HexToHash("0x1234"), + IsSystemTransaction: true, + Mint: big.NewInt(34), + }) + nonce := uint64(7) + version := types.CanyonDepositReceiptVersion + receipt := &types.Receipt{ + DepositNonce: &nonce, + DepositReceiptVersion: &version, + } + got := newRPCTransaction(tx, common.Hash{}, uint64(12), uint64(1234), uint64(1), big.NewInt(0), &params.ChainConfig{}, receipt) + // Should provide zero values for unused fields that are required in other transactions + require.Equal(t, got.GasPrice, (*hexutil.Big)(big.NewInt(0)), "newRPCTransaction().GasPrice = %v, want 0x0", got.GasPrice) + require.Equal(t, got.V, (*hexutil.Big)(big.NewInt(0)), "newRPCTransaction().V = %v, want 0x0", got.V) + require.Equal(t, got.R, (*hexutil.Big)(big.NewInt(0)), "newRPCTransaction().R = %v, want 0x0", got.R) + require.Equal(t, got.S, (*hexutil.Big)(big.NewInt(0)), "newRPCTransaction().S = %v, want 0x0", got.S) + + // Should include versioned deposit tx specific fields + require.Equal(t, *got.SourceHash, tx.SourceHash(), "newRPCTransaction().SourceHash = %v, want %v", got.SourceHash, tx.SourceHash()) + require.Equal(t, *got.IsSystemTx, tx.IsSystemTx(), "newRPCTransaction().IsSystemTx = %v, want %v", got.IsSystemTx, tx.IsSystemTx()) + require.Equal(t, got.Mint, (*hexutil.Big)(tx.Mint()), "newRPCTransaction().Mint = %v, want %v", got.Mint, tx.Mint()) + require.Equal(t, got.Nonce, (hexutil.Uint64)(nonce), "newRPCTransaction().Nonce = %v, want %v", got.Nonce, nonce) + require.Equal(t, *got.DepositReceiptVersion, (hexutil.Uint64(version)), "newRPCTransaction().DepositReceiptVersion = %v, want %v", *got.DepositReceiptVersion, version) + + // Make sure json marshal/unmarshal of the rpc tx preserves the receipt version + b, err := json.Marshal(got) + require.NoError(t, err, "marshalling failed: %w", err) + parsed := make(map[string]interface{}) + err = json.Unmarshal(b, &parsed) + require.NoError(t, err, "unmarshalling failed: %w", err) + require.Equal(t, "0x1", parsed["depositReceiptVersion"]) +} + +func TestNewRPCTransactionOmitIsSystemTxFalse(t *testing.T) { + tx := types.NewTx(&types.DepositTx{ + IsSystemTransaction: false, + }) + got := newRPCTransaction(tx, common.Hash{}, uint64(12), uint64(1234), uint64(1), big.NewInt(0), &params.ChainConfig{}, nil) + + require.Nil(t, got.IsSystemTx, "should omit IsSystemTx when false") +} + +func TestUnmarshalRpcDepositTx(t *testing.T) { + version := hexutil.Uint64(types.CanyonDepositReceiptVersion) + tests := []struct { + name string + modifier func(tx *RPCTransaction) + valid bool + }{ + { + name: "Unmodified", + modifier: func(tx *RPCTransaction) {}, + valid: true, + }, + { + name: "Zero Values", + modifier: func(tx *RPCTransaction) { + tx.V = (*hexutil.Big)(common.Big0) + tx.R = (*hexutil.Big)(common.Big0) + tx.S = (*hexutil.Big)(common.Big0) + tx.GasPrice = (*hexutil.Big)(common.Big0) + }, + valid: true, + }, + { + name: "Nil Values", + modifier: func(tx *RPCTransaction) { + tx.V = nil + tx.R = nil + tx.S = nil + tx.GasPrice = nil + }, + valid: true, + }, + { + name: "Non-Zero GasPrice", + modifier: func(tx *RPCTransaction) { + tx.GasPrice = (*hexutil.Big)(big.NewInt(43)) + }, + valid: false, + }, + { + name: "Non-Zero V", + modifier: func(tx *RPCTransaction) { + tx.V = (*hexutil.Big)(big.NewInt(43)) + }, + valid: false, + }, + { + name: "Non-Zero R", + modifier: func(tx *RPCTransaction) { + tx.R = (*hexutil.Big)(big.NewInt(43)) + }, + valid: false, + }, + { + name: "Non-Zero S", + modifier: func(tx *RPCTransaction) { + tx.S = (*hexutil.Big)(big.NewInt(43)) + }, + valid: false, + }, + { + name: "Non-nil deposit receipt version", + modifier: func(tx *RPCTransaction) { + tx.DepositReceiptVersion = &version + }, + valid: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + tx := types.NewTx(&types.DepositTx{ + SourceHash: common.HexToHash("0x1234"), + IsSystemTransaction: true, + Mint: big.NewInt(34), + }) + rpcTx := newRPCTransaction(tx, common.Hash{}, uint64(12), uint64(1234), uint64(1), big.NewInt(0), &params.ChainConfig{}, nil) + test.modifier(rpcTx) + json, err := json.Marshal(rpcTx) + require.NoError(t, err, "marshalling failed: %w", err) + parsed := &types.Transaction{} + err = parsed.UnmarshalJSON(json) + if test.valid { + require.NoError(t, err, "unmarshal failed: %w", err) + } else { + require.Error(t, err, "unmarshal should have failed but did not") + } + }) + } +} + func testTransactionMarshal(t *testing.T, tests []txData, config *params.ChainConfig) { t.Parallel() var ( @@ -80,7 +239,7 @@ t.Fatalf("test %d: stx changed, want %x have %x", i, want, have) }   // rpcTransaction - rpcTx := newRPCTransaction(tx, common.Hash{}, 0, 0, 0, nil, config) + rpcTx := newRPCTransaction(tx, common.Hash{}, 0, 0, 0, nil, config, nil) if data, err := json.Marshal(rpcTx); err != nil { t.Fatalf("test %d: marshalling failed; %v", i, err) } else if err = tx2.UnmarshalJSON(data); err != nil { @@ -567,7 +726,7 @@ if vmConfig == nil { vmConfig = b.chain.GetVMConfig() } txContext := core.NewEVMTxContext(msg) - context := core.NewEVMBlockContext(header, b.chain, nil) + context := core.NewEVMBlockContext(header, b.chain, nil, b.ChainConfig(), state) if blockContext != nil { context = *blockContext } @@ -620,6 +779,12 @@ panic("implement me") } func (b testBackend) BloomStatus() (uint64, uint64) { panic("implement me") } func (b testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { + panic("implement me") +} +func (b testBackend) HistoricalRPCService() *rpc.Client { + panic("implement me") +} +func (b testBackend) Genesis() *types.Block { panic("implement me") }   @@ -843,7 +1008,7 @@ From: &accounts[0].addr, To: &accounts[1].addr, Value: (*hexutil.Big)(big.NewInt(1000)), }, - expectErr: errors.New("header not found"), + expectErr: ethereum.NotFound, }, // transfer on the latest block { @@ -1466,7 +1631,7 @@ "to": "0x0000000000000000000000000000000000000011", "transactionIndex": "0x1", "value": "0x6f", "type": "0x0", - "chainId": "0x7fffffffffffffee", + "chainId": "0x1", "v": "0x0", "r": "0x0", "s": "0x0" @@ -1504,7 +1669,7 @@ "to": "0x0000000000000000000000000000000000000011", "transactionIndex": "0x3", "value": "0x6f", "type": "0x0", - "chainId": "0x7fffffffffffffee", + "chainId": "0x1", "v": "0x0", "r": "0x0", "s": "0x0" @@ -1517,7 +1682,11 @@ }, }   for i, tc := range testSuite { - resp := RPCMarshalBlock(block, tc.inclTx, tc.fullTx, params.MainnetChainConfig) + resp, err := RPCMarshalBlock(context.Background(), block, tc.inclTx, tc.fullTx, params.MainnetChainConfig, testBackend{}) + if err != nil { + t.Errorf("test %d: got error %v", i, err) + continue + } out, err := json.Marshal(resp) if err != nil { t.Errorf("test %d: json marshal error: %v", i, err)
diff --git go-ethereum/params/config_test.go op-geth/params/config_test.go index bf8ce2fc5e247242615ff478a455785f9f07f88b..fc76c604a8163f77f8da9522f40a00d315de64a7 100644 --- go-ethereum/params/config_test.go +++ op-geth/params/config_test.go @@ -137,3 +137,23 @@ if r := c.Rules(big.NewInt(0), true, stamp); !r.IsShanghai { t.Errorf("expected %v to be shanghai", stamp) } } + +func TestConfigRulesRegolith(t *testing.T) { + c := &ChainConfig{ + RegolithTime: newUint64(500), + LondonBlock: new(big.Int), + Optimism: &OptimismConfig{}, + } + var stamp uint64 + if r := c.Rules(big.NewInt(0), true, stamp); r.IsOptimismRegolith { + t.Errorf("expected %v to not be regolith", stamp) + } + stamp = 500 + if r := c.Rules(big.NewInt(0), true, stamp); !r.IsOptimismRegolith { + t.Errorf("expected %v to be regolith", stamp) + } + stamp = math.MaxInt64 + if r := c.Rules(big.NewInt(0), true, stamp); !r.IsOptimismRegolith { + t.Errorf("expected %v to be regolith", stamp) + } +}
diff --git go-ethereum/params/superchain_test.go op-geth/params/superchain_test.go new file mode 100644 index 0000000000000000000000000000000000000000..abb72bbd351f4c0a99ea8b78deac222a816a9ccc --- /dev/null +++ op-geth/params/superchain_test.go @@ -0,0 +1,172 @@ +package params + +import ( + "fmt" + "testing" +) + +type HumanProtocolVersion struct { + VersionType uint8 + Major, Minor, Patch uint32 + Prerelease uint32 + Build [8]byte +} + +type ComparisonCase struct { + A, B HumanProtocolVersion + Cmp ProtocolVersionComparison +} + +func TestProtocolVersion_Compare(t *testing.T) { + testCases := []ComparisonCase{ + { + A: HumanProtocolVersion{0, 2, 1, 1, 1, [8]byte{}}, + B: HumanProtocolVersion{0, 1, 2, 2, 2, [8]byte{}}, + Cmp: AheadMajor, + }, + { + A: HumanProtocolVersion{0, 1, 2, 1, 1, [8]byte{}}, + B: HumanProtocolVersion{0, 1, 1, 2, 2, [8]byte{}}, + Cmp: AheadMinor, + }, + { + A: HumanProtocolVersion{0, 1, 1, 2, 1, [8]byte{}}, + B: HumanProtocolVersion{0, 1, 1, 1, 2, [8]byte{}}, + Cmp: AheadPatch, + }, + { + A: HumanProtocolVersion{0, 1, 1, 1, 2, [8]byte{}}, + B: HumanProtocolVersion{0, 1, 1, 1, 1, [8]byte{}}, + Cmp: AheadPrerelease, + }, + { + A: HumanProtocolVersion{0, 1, 2, 3, 4, [8]byte{}}, + B: HumanProtocolVersion{0, 1, 2, 3, 4, [8]byte{}}, + Cmp: Matching, + }, + { + A: HumanProtocolVersion{0, 3, 2, 1, 5, [8]byte{3}}, + B: HumanProtocolVersion{1, 1, 2, 3, 3, [8]byte{6}}, + Cmp: DiffVersionType, + }, + { + A: HumanProtocolVersion{0, 3, 2, 1, 5, [8]byte{3}}, + B: HumanProtocolVersion{0, 1, 2, 3, 3, [8]byte{6}}, + Cmp: DiffBuild, + }, + { + A: HumanProtocolVersion{0, 0, 0, 0, 0, [8]byte{}}, + B: HumanProtocolVersion{0, 1, 3, 3, 3, [8]byte{3}}, + Cmp: EmptyVersion, + }, + { + A: HumanProtocolVersion{0, 4, 0, 0, 0, [8]byte{}}, + B: HumanProtocolVersion{0, 4, 0, 0, 1, [8]byte{}}, + Cmp: AheadMajor, + }, + { + A: HumanProtocolVersion{0, 4, 1, 0, 0, [8]byte{}}, + B: HumanProtocolVersion{0, 4, 1, 0, 1, [8]byte{}}, + Cmp: AheadMinor, + }, + { + A: HumanProtocolVersion{0, 4, 0, 1, 0, [8]byte{}}, + B: HumanProtocolVersion{0, 4, 0, 1, 1, [8]byte{}}, + Cmp: AheadPatch, + }, + { + A: HumanProtocolVersion{0, 4, 0, 0, 2, [8]byte{}}, + B: HumanProtocolVersion{0, 4, 0, 0, 1, [8]byte{}}, + Cmp: AheadPrerelease, + }, + { + A: HumanProtocolVersion{0, 4, 1, 0, 1, [8]byte{}}, + B: HumanProtocolVersion{0, 4, 0, 0, 0, [8]byte{}}, + Cmp: AheadPatch, + }, + { + A: HumanProtocolVersion{0, 4, 0, 1, 1, [8]byte{}}, + B: HumanProtocolVersion{0, 4, 0, 0, 0, [8]byte{}}, + Cmp: AheadPrerelease, + }, + { + A: HumanProtocolVersion{0, 4, 1, 1, 1, [8]byte{}}, + B: HumanProtocolVersion{0, 4, 0, 0, 0, [8]byte{}}, + Cmp: AheadMinor, + }, + { + A: HumanProtocolVersion{0, 4, 0, 2, 1, [8]byte{}}, + B: HumanProtocolVersion{0, 4, 0, 0, 0, [8]byte{}}, + Cmp: AheadPatch, + }, + { + A: HumanProtocolVersion{0, 5, 0, 1, 1, [8]byte{}}, + B: HumanProtocolVersion{0, 4, 0, 0, 0, [8]byte{}}, + Cmp: AheadMajor, + }, + { + A: HumanProtocolVersion{0, 1, 0, 0, 1, [8]byte{}}, + B: HumanProtocolVersion{0, 0, 9, 0, 0, [8]byte{}}, + Cmp: AheadMinor, + }, + { + A: HumanProtocolVersion{0, 0, 1, 0, 1, [8]byte{}}, + B: HumanProtocolVersion{0, 0, 0, 9, 0, [8]byte{}}, + Cmp: AheadPatch, + }, + { + A: HumanProtocolVersion{0, 1, ^uint32(0), 0, 1, [8]byte{}}, + B: HumanProtocolVersion{0, 0, 1, 0, 0, [8]byte{}}, + Cmp: InvalidVersion, + }, + } + for i, tc := range testCases { + tc := tc // not a parallel sub-test, but better than a flake + t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) { + a := ProtocolVersionV0{tc.A.Build, tc.A.Major, tc.A.Minor, tc.A.Patch, tc.A.Prerelease}.Encode() + a[0] = tc.A.VersionType + b := ProtocolVersionV0{tc.B.Build, tc.B.Major, tc.B.Minor, tc.B.Patch, tc.B.Prerelease}.Encode() + b[0] = tc.B.VersionType + cmp := a.Compare(b) + if cmp != tc.Cmp { + t.Fatalf("expected %d but got %d", tc.Cmp, cmp) + } + switch tc.Cmp { + case AheadMajor, AheadMinor, AheadPatch, AheadPrerelease: + inv := b.Compare(a) + if inv != -tc.Cmp { + t.Fatalf("expected inverse when reversing the comparison, %d but got %d", -tc.Cmp, inv) + } + case DiffVersionType, DiffBuild, EmptyVersion, Matching: + inv := b.Compare(a) + if inv != tc.Cmp { + t.Fatalf("expected comparison reversed to hold the same, expected %d but got %d", tc.Cmp, inv) + } + } + }) + } +} +func TestProtocolVersion_String(t *testing.T) { + testCases := []struct { + version ProtocolVersion + expected string + }{ + {ProtocolVersionV0{[8]byte{}, 0, 0, 0, 0}.Encode(), "v0.0.0"}, + {ProtocolVersionV0{[8]byte{}, 0, 0, 0, 1}.Encode(), "v0.0.0-1"}, + {ProtocolVersionV0{[8]byte{}, 0, 0, 1, 0}.Encode(), "v0.0.1"}, + {ProtocolVersionV0{[8]byte{}, 4, 3, 2, 1}.Encode(), "v4.3.2-1"}, + {ProtocolVersionV0{[8]byte{}, 0, 100, 2, 0}.Encode(), "v0.100.2"}, + {ProtocolVersionV0{[8]byte{'O', 'P', '-', 'm', 'o', 'd'}, 42, 0, 2, 1}.Encode(), "v42.0.2-1+OP-mod"}, + {ProtocolVersionV0{[8]byte{'b', 'e', 't', 'a', '.', '1', '2', '3'}, 1, 0, 0, 0}.Encode(), "v1.0.0+beta.123"}, + {ProtocolVersionV0{[8]byte{'a', 'b', 1}, 42, 0, 2, 0}.Encode(), "v42.0.2+0x6162010000000000"}, // do not render invalid alpha numeric + {ProtocolVersionV0{[8]byte{1, 2, 3, 4, 5, 6, 7, 8}, 42, 0, 2, 0}.Encode(), "v42.0.2+0x0102030405060708"}, + } + for _, tc := range testCases { + t.Run(tc.expected, func(t *testing.T) { + got := tc.version.String() + if got != tc.expected { + t.Fatalf("got %q but expected %q", got, tc.expected) + } + }) + } +}
diff --git go-ethereum/rpc/client_test.go op-geth/rpc/client_test.go index ac02ad33cf6a7a4c389c72ec54e96bdb92bc5bf0..8870ed1cb687638d1f29a268ac75c5faae6190eb 100644 --- go-ethereum/rpc/client_test.go +++ op-geth/rpc/client_test.go @@ -585,7 +585,7 @@ var ( srv = NewServer() httpsrv = httptest.NewServer(srv.WebsocketHandler(nil)) - wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") //nolint:all ) defer srv.Stop() defer httpsrv.Close()
diff --git go-ethereum/tests/state_test.go op-geth/tests/state_test.go index 1d749d8bcf52b4fd55f1b63d3b4c86359cef38c6..5562942627c6b32294a2291ab6d8404f08ce97ff 100644 --- go-ethereum/tests/state_test.go +++ op-geth/tests/state_test.go @@ -284,7 +284,7 @@ }   // Prepare the EVM. txContext := core.NewEVMTxContext(msg) - context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase) + context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase, config, state.StateDB) context.GetHash = vmTestBlockHash context.BaseFee = baseFee evm := vm.NewEVM(context, txContext, state.StateDB, config, vmconfig)
diff --git go-ethereum/beacon/engine/gen_epe.go op-geth/beacon/engine/gen_epe.go index e69f9a5951a28deeaa9ad51f1b2d9e8733b2feca..3529bbd66141dcf34643f03ee03c5dbe2a67f1d5 100644 --- go-ethereum/beacon/engine/gen_epe.go +++ op-geth/beacon/engine/gen_epe.go @@ -7,6 +7,7 @@ "encoding/json" "errors" "math/big"   + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" )   @@ -15,26 +16,29 @@ // MarshalJSON marshals as JSON. func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) { type ExecutionPayloadEnvelope struct { - ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` - BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"` - BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` - Override bool `json:"shouldOverrideBuilder"` + ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` + BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"` + BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` + Override bool `json:"shouldOverrideBuilder"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot,omitempty"` } var enc ExecutionPayloadEnvelope enc.ExecutionPayload = e.ExecutionPayload enc.BlockValue = (*hexutil.Big)(e.BlockValue) enc.BlobsBundle = e.BlobsBundle enc.Override = e.Override + enc.ParentBeaconBlockRoot = e.ParentBeaconBlockRoot return json.Marshal(&enc) }   // UnmarshalJSON unmarshals from JSON. func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error { type ExecutionPayloadEnvelope struct { - ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` - BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"` - BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` - Override *bool `json:"shouldOverrideBuilder"` + ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` + BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"` + BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` + Override *bool `json:"shouldOverrideBuilder"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot,omitempty"` } var dec ExecutionPayloadEnvelope if err := json.Unmarshal(input, &dec); err != nil { @@ -53,6 +57,9 @@ e.BlobsBundle = dec.BlobsBundle } if dec.Override != nil { e.Override = *dec.Override + } + if dec.ParentBeaconBlockRoot != nil { + e.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot } return nil }
diff --git go-ethereum/cmd/devp2p/internal/ethtest/snap.go op-geth/cmd/devp2p/internal/ethtest/snap.go index 64e063358545ee8fd2cb0fbf9d8c6ae2ead74f0c..83844b1e9d6a09737db340a70f48242294e8dbd3 100644 --- go-ethereum/cmd/devp2p/internal/ethtest/snap.go +++ op-geth/cmd/devp2p/internal/ethtest/snap.go @@ -470,13 +470,13 @@ { desc: `Here we request state roots as code hashes. The server should deliver an empty response with no items.`, nBytes: 10000, hashes: []common.Hash{genesisRoot, headRoot}, - expHashes: 0, + expHashes: 1, // 32-byte keys are detected as code, even if not code (like genesis hash), in legacy lookups. }, { desc: `Here we request the genesis state root (which is not an existing code hash) two times. The server should deliver an empty response with no items.`, nBytes: 10000, hashes: []common.Hash{genesisRoot, genesisRoot}, - expHashes: 0, + expHashes: 2, // 32-byte keys are detected as code, even if not code (like genesis hash), in legacy lookups. }, // Empties {
diff --git go-ethereum/cmd/era/main.go op-geth/cmd/era/main.go index e27d8ccec60540cf4e7cc041a84a3b19abbf65f7..d913958a0ec70a92443d134f788ff4479dd2323a 100644 --- go-ethereum/cmd/era/main.go +++ op-geth/cmd/era/main.go @@ -28,6 +28,7 @@ "time"   "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/internal/era" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/internal/flags" @@ -121,7 +122,11 @@ if err != nil { return fmt.Errorf("error reading block %d: %w", num, err) } // Convert block to JSON and print. - val := ethapi.RPCMarshalBlock(block, ctx.Bool(txsFlag.Name), ctx.Bool(txsFlag.Name), params.MainnetChainConfig) + // TODO: without a proper Eth API Backend, this tool isn't expected to function correctly + val, err := ethapi.RPCMarshalBlock(ctx.Context, block, ctx.Bool(txsFlag.Name), ctx.Bool(txsFlag.Name), params.MainnetChainConfig, &eth.EthAPIBackend{}) + if err != nil { + return fmt.Errorf("error marshaling block: %w", err) + } b, err := json.MarshalIndent(val, "", " ") if err != nil { return fmt.Errorf("error marshaling json: %w", err)
diff --git go-ethereum/cmd/evm/internal/t8ntool/transition.go op-geth/cmd/evm/internal/t8ntool/transition.go index 7802d49651999b0d97951342ef5896e64651b7f7..1059586f3969da9c3f019b374172aa004300564b 100644 --- go-ethereum/cmd/evm/internal/t8ntool/transition.go +++ op-geth/cmd/evm/internal/t8ntool/transition.go @@ -210,7 +210,7 @@ Number: new(big.Int).SetUint64(env.Number - 1), BaseFee: env.ParentBaseFee, GasUsed: env.ParentGasUsed, GasLimit: env.ParentGasLimit, - }) + }, env.Timestamp) return nil }
diff --git go-ethereum/consensus/misc/create2deployer.go op-geth/consensus/misc/create2deployer.go new file mode 100644 index 0000000000000000000000000000000000000000..27c87a4a8c45b1e859719e8d4f8e5775ff3da0ca --- /dev/null +++ op-geth/consensus/misc/create2deployer.go @@ -0,0 +1,37 @@ +package misc + +import ( + "github.com/ethereum-optimism/superchain-registry/superchain" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" +) + +// The original create2deployer contract could not be deployed to Base mainnet at +// the canonical address of 0x13b0D85CcB8bf860b6b79AF3029fCA081AE9beF2 due to +// an accidental nonce increment from a deposit transaction. See +// https://github.com/pcaversaccio/create2deployer/issues/128 for context. This +// file applies the contract code to the canonical address manually in the Canyon +// hardfork. + +var create2DeployerAddress = common.HexToAddress("0x13b0D85CcB8bf860b6b79AF3029fCA081AE9beF2") +var create2DeployerCodeHash = common.HexToHash("0xb0550b5b431e30d38000efb7107aaa0ade03d48a7198a140edda9d27134468b2") +var create2DeployerCode []byte + +func init() { + code, err := superchain.LoadContractBytecode(superchain.Hash(create2DeployerCodeHash)) + if err != nil { + panic(err) + } + create2DeployerCode = code +} + +func EnsureCreate2Deployer(c *params.ChainConfig, timestamp uint64, db vm.StateDB) { + if !c.IsOptimism() || c.CanyonTime == nil || *c.CanyonTime != timestamp { + return + } + log.Info("Setting Create2Deployer code", "address", create2DeployerAddress, "codeHash", create2DeployerCodeHash) + db.SetCode(create2DeployerAddress, create2DeployerCode) +}
diff --git go-ethereum/core/chain_makers.go op-geth/core/chain_makers.go index 733030fd1c9eb8fff850a87babe2a71b491a7ba5..9da48c447af77c02c59d4b8f43708a7ffa5e532e 100644 --- go-ethereum/core/chain_makers.go +++ op-geth/core/chain_makers.go @@ -98,7 +98,7 @@ // block. func (b *BlockGen) SetParentBeaconRoot(root common.Hash) { b.header.ParentBeaconRoot = &root var ( - blockContext = NewEVMBlockContext(b.header, b.cm, &b.header.Coinbase) + blockContext = NewEVMBlockContext(b.header, b.cm, &b.header.Coinbase, b.cm.config, b.statedb) vmenv = vm.NewEVM(blockContext, vm.TxContext{}, b.statedb, b.cm.config, vm.Config{}) ) ProcessBeaconBlockRoot(root, vmenv, b.statedb) @@ -230,7 +230,7 @@ // The gas limit and price should be derived from the parent h.GasLimit = parent.GasLimit if b.cm.config.IsLondon(h.Number) { - h.BaseFee = eip1559.CalcBaseFee(b.cm.config, parent) + h.BaseFee = eip1559.CalcBaseFee(b.cm.config, parent, h.Time) if !b.cm.config.IsLondon(parent.Number) { parentGasLimit := parent.GasLimit * b.cm.config.ElasticityMultiplier() h.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit) @@ -320,7 +320,7 @@ // Set the difficulty for clique block. The chain maker doesn't have access // to a chain, so the difficulty will be left unset (nil). Set it here to the // correct value. if b.header.Difficulty == nil { - if config.TerminalTotalDifficulty == nil { + if config.TerminalTotalDifficulty == nil && !config.IsOptimismBedrock(b.header.Number) { // Clique chain b.header.Difficulty = big.NewInt(2) } else { @@ -430,7 +430,7 @@ Time: time, }   if cm.config.IsLondon(header.Number) { - header.BaseFee = eip1559.CalcBaseFee(cm.config, parent.Header()) + header.BaseFee = eip1559.CalcBaseFee(cm.config, parent.Header(), header.Time) if !cm.config.IsLondon(parent.Number()) { parentGasLimit := parent.GasLimit() * cm.config.ElasticityMultiplier() header.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit)
diff --git go-ethereum/core/state/statedb.go op-geth/core/state/statedb.go index ebd2143882ac953a3930fc124ef2c8a959b09069..9fc529b86b3be8dbdaf4509c97f12540fc835a53 100644 --- go-ethereum/core/state/statedb.go +++ op-geth/core/state/statedb.go @@ -1386,6 +1386,12 @@ } return ret }   +// OpenStorageTrie opens the storage trie for the storage root of the provided address. +func (s *StateDB) OpenStorageTrie(addr common.Address) (Trie, error) { + storageRoot := s.GetStorageRoot(addr) + return s.db.OpenStorageTrie(s.originalRoot, addr, storageRoot, s.trie) +} + // copySet returns a deep-copied set. func copySet[k comparable](set map[k][]byte) map[k][]byte { copied := make(map[k][]byte, len(set))
diff --git go-ethereum/core/vm/contracts.go op-geth/core/vm/contracts.go index 33a867654e71ec9883f54d1daa19b892da3c9ee2..cd819f9e7b6848011cd8ab427b646a81031c14c5 100644 --- go-ethereum/core/vm/contracts.go +++ op-geth/core/vm/contracts.go @@ -30,6 +30,7 @@ "github.com/ethereum/go-ethereum/crypto/blake2b" "github.com/ethereum/go-ethereum/crypto/bls12381" "github.com/ethereum/go-ethereum/crypto/bn256" "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/crypto/secp256r1" "github.com/ethereum/go-ethereum/params" "golang.org/x/crypto/ripemd160" ) @@ -107,6 +108,22 @@ common.BytesToAddress([]byte{9}): &blake2F{}, common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{}, }   +// PrecompiledContractsFjord contains the default set of pre-compiled Ethereum +// contracts used in the Fjord release. +var PrecompiledContractsFjord = map[common.Address]PrecompiledContract{ + common.BytesToAddress([]byte{1}): &ecrecover{}, + common.BytesToAddress([]byte{2}): &sha256hash{}, + common.BytesToAddress([]byte{3}): &ripemd160hash{}, + common.BytesToAddress([]byte{4}): &dataCopy{}, + common.BytesToAddress([]byte{5}): &bigModExp{eip2565: true}, + common.BytesToAddress([]byte{6}): &bn256AddIstanbul{}, + common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{}, + common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, + common.BytesToAddress([]byte{9}): &blake2F{}, + common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{}, + common.BytesToAddress([]byte{0x01, 0x00}): &p256Verify{}, +} + // PrecompiledContractsBLS contains the set of pre-compiled Ethereum // contracts specified in EIP-2537. These are exported for testing purposes. var PrecompiledContractsBLS = map[common.Address]PrecompiledContract{ @@ -122,6 +139,7 @@ common.BytesToAddress([]byte{18}): &bls12381MapG2{}, }   var ( + PrecompiledAddressesFjord []common.Address PrecompiledAddressesCancun []common.Address PrecompiledAddressesBerlin []common.Address PrecompiledAddressesIstanbul []common.Address @@ -145,11 +163,16 @@ } for k := range PrecompiledContractsCancun { PrecompiledAddressesCancun = append(PrecompiledAddressesCancun, k) } + for k := range PrecompiledContractsFjord { + PrecompiledAddressesFjord = append(PrecompiledAddressesFjord, k) + } }   // ActivePrecompiles returns the precompiles enabled with the current configuration. func ActivePrecompiles(rules params.Rules) []common.Address { switch { + case rules.IsOptimismFjord: + return PrecompiledAddressesFjord case rules.IsCancun: return PrecompiledAddressesCancun case rules.IsBerlin: @@ -1134,3 +1157,37 @@ h[0] = blobCommitmentVersionKZG   return h } + +// P256VERIFY (secp256r1 signature verification) +// implemented as a native contract +type p256Verify struct{} + +// RequiredGas returns the gas required to execute the precompiled contract +func (c *p256Verify) RequiredGas(input []byte) uint64 { + return params.P256VerifyGas +} + +// Run executes the precompiled contract with given 160 bytes of param, returning the output and the used gas +func (c *p256Verify) Run(input []byte) ([]byte, error) { + // Required input length is 160 bytes + const p256VerifyInputLength = 160 + // Check the input length + if len(input) != p256VerifyInputLength { + // Input length is invalid + return nil, nil + } + + // Extract the hash, r, s, x, y from the input + hash := input[0:32] + r, s := new(big.Int).SetBytes(input[32:64]), new(big.Int).SetBytes(input[64:96]) + x, y := new(big.Int).SetBytes(input[96:128]), new(big.Int).SetBytes(input[128:160]) + + // Verify the secp256r1 signature + if secp256r1.Verify(hash, r, s, x, y) { + // Signature is valid + return common.LeftPadBytes([]byte{1}, 32), nil + } else { + // Signature is invalid + return nil, nil + } +}
diff --git go-ethereum/core/vm/interpreter.go op-geth/core/vm/interpreter.go index 1968289f4eaa370d3857a6dee1533a9a2d6db9aa..a65c7edd4570b86c0219959c3573a7b9d17984b2 100644 --- go-ethereum/core/vm/interpreter.go +++ op-geth/core/vm/interpreter.go @@ -21,14 +21,19 @@ "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" ) + +// PrecompileOverrides is a function that can be used to override the default precompiled contracts +type PrecompileOverrides func(params.Rules, PrecompiledContract, common.Address) (PrecompiledContract, bool)   // Config are the configuration options for the Interpreter type Config struct { - Tracer EVMLogger // Opcode logger - NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) - EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages - ExtraEips []int // Additional EIPS that are to be enabled + Tracer EVMLogger // Opcode logger + NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) + EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages + ExtraEips []int // Additional EIPS that are to be enabled + OptimismPrecompileOverrides PrecompileOverrides // Precompile overrides for Optimism }   // ScopeContext contains the things that are per-call, such as stack and memory,
diff --git go-ethereum/crypto/secp256r1/publickey.go op-geth/crypto/secp256r1/publickey.go new file mode 100644 index 0000000000000000000000000000000000000000..885a3e1a624b85310225913c44d294e402a50fcd --- /dev/null +++ op-geth/crypto/secp256r1/publickey.go @@ -0,0 +1,21 @@ +package secp256r1 + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "math/big" +) + +// Generates appropriate public key format from given coordinates +func newPublicKey(x, y *big.Int) *ecdsa.PublicKey { + // Check if the given coordinates are valid + if x == nil || y == nil || !elliptic.P256().IsOnCurve(x, y) { + return nil + } + + return &ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: x, + Y: y, + } +}
diff --git go-ethereum/crypto/secp256r1/verifier.go op-geth/crypto/secp256r1/verifier.go new file mode 100644 index 0000000000000000000000000000000000000000..ffb4839d8d560a5892175c737bff136d73ee6e12 --- /dev/null +++ op-geth/crypto/secp256r1/verifier.go @@ -0,0 +1,22 @@ +package secp256r1 + +import ( + "crypto/ecdsa" + "math/big" +) + +// Verify verifies the given signature (r, s) for the given hash and public key (x, y). +// It returns true if the signature is valid, false otherwise. +func Verify(hash []byte, r, s, x, y *big.Int) bool { + // Create the public key format + publicKey := newPublicKey(x, y) + + // Check if they are invalid public key coordinates + if publicKey == nil { + return false + } + + // Verify the signature with the public key, + // then return true if it's valid, false otherwise + return ecdsa.Verify(publicKey, hash, r, s) +}
diff --git go-ethereum/eth/catalyst/queue.go op-geth/eth/catalyst/queue.go index 634dc1b2e6c2520f02e74f38ba66cba649685c86..d42904843b25fa8bb54a1739a9ff68f6d017f90f 100644 --- go-ethereum/eth/catalyst/queue.go +++ op-geth/eth/catalyst/queue.go @@ -17,6 +17,7 @@ package catalyst   import ( + "errors" "sync"   "github.com/ethereum/go-ethereum/beacon/engine" @@ -89,6 +90,24 @@ return item.payload.ResolveFull() } } return nil +} + +// waitFull waits until the first full payload has been built for the specified payload id +// The method returns immediately if the payload is unknown. +func (q *payloadQueue) waitFull(id engine.PayloadID) error { + q.lock.RLock() + defer q.lock.RUnlock() + + for _, item := range q.payloads { + if item == nil { + return errors.New("unknown payload") + } + if item.id == id { + item.payload.WaitFull() + return nil + } + } + return errors.New("unknown payload") }   // has checks if a particular payload is already tracked.
diff --git go-ethereum/eth/catalyst/simulated_beacon.go op-geth/eth/catalyst/simulated_beacon.go index f1c5689e1d286880cef5c8b4950dc28282d136a1..0c277ce0d6f8b4012657024c696e67d6424ae05a 100644 --- go-ethereum/eth/catalyst/simulated_beacon.go +++ op-geth/eth/catalyst/simulated_beacon.go @@ -169,6 +169,9 @@ if fcResponse == engine.STATUS_SYNCING { return errors.New("chain rewind prevented invocation of payload creation") }   + if err := c.engineAPI.localBlocks.waitFull(*fcResponse.PayloadID); err != nil { + return err + } envelope, err := c.engineAPI.getPayload(*fcResponse.PayloadID, true) if err != nil { return err
diff --git go-ethereum/eth/gasestimator/gasestimator.go op-geth/eth/gasestimator/gasestimator.go index f07f98956e3c8a483f14e693569f096837d899a0..a6c9d635d8d87d1b5abedf9fb99c32d236ddcb68 100644 --- go-ethereum/eth/gasestimator/gasestimator.go +++ op-geth/eth/gasestimator/gasestimator.go @@ -208,7 +208,7 @@ func run(ctx context.Context, call *core.Message, opts *Options) (*core.ExecutionResult, error) { // Assemble the call and the call context var ( msgContext = core.NewEVMTxContext(call) - evmContext = core.NewEVMBlockContext(opts.Header, opts.Chain, nil) + evmContext = core.NewEVMBlockContext(opts.Header, opts.Chain, nil, opts.Config, opts.State)   dirtyState = opts.State.Copy() evm = vm.NewEVM(evmContext, msgContext, dirtyState, opts.Config, vm.Config{NoBaseFee: true})
diff --git go-ethereum/eth/gasprice/feehistory.go op-geth/eth/gasprice/feehistory.go index d657eb6d996b6f7eb7eecfb6bab2f4a3b63d8970..354b1bc1824b0cd2e79238d49e1012a43e814c9d 100644 --- go-ethereum/eth/gasprice/feehistory.go +++ op-geth/eth/gasprice/feehistory.go @@ -83,7 +83,7 @@ if bf.results.baseFee = bf.header.BaseFee; bf.results.baseFee == nil { bf.results.baseFee = new(big.Int) } if chainconfig.IsLondon(big.NewInt(int64(bf.blockNumber + 1))) { - bf.results.nextBaseFee = eip1559.CalcBaseFee(chainconfig, bf.header) + bf.results.nextBaseFee = eip1559.CalcBaseFee(chainconfig, bf.header, bf.header.Time+1) } else { bf.results.nextBaseFee = new(big.Int) }
diff --git go-ethereum/eth/tracers/native/call.go op-geth/eth/tracers/native/call.go index be9b58a4cd3c384e823eea19b52a3de6cb9cf366..eafaf157f60d3801a055382f2637ffd2f1429724 100644 --- go-ethereum/eth/tracers/native/call.go +++ op-geth/eth/tracers/native/call.go @@ -262,6 +262,10 @@ if len(t.callstack) != 1 { return nil, errors.New("incorrect number of top-level calls") }   + if t.callstack[0].Type == vm.STOP { + t.callstack[0].Error = "failed deposit transaction" + } + res, err := json.Marshal(t.callstack[0]) if err != nil { return nil, err
diff --git go-ethereum/eth/tracers/native/call_flat.go op-geth/eth/tracers/native/call_flat.go index 266ab9900146ca413600888e0238e4c5a3102b77..93f36f84a057e1e225d9df1e0bf146995a1c8c08 100644 --- go-ethereum/eth/tracers/native/call_flat.go +++ op-geth/eth/tracers/native/call_flat.go @@ -215,6 +215,10 @@ if len(t.tracer.callstack) < 1 { return nil, errors.New("invalid number of calls") }   + if t.tracer.callstack[0].Type == vm.STOP { + t.tracer.callstack[0].Error = "failed deposit transaction" + } + flat, err := flatFromNested(&t.tracer.callstack[0], []int{}, t.config.ConvertParityErrors, t.ctx) if err != nil { return nil, err @@ -249,7 +253,7 @@ case vm.CREATE, vm.CREATE2: frame = newFlatCreate(input) case vm.SELFDESTRUCT: frame = newFlatSelfdestruct(input) - case vm.CALL, vm.STATICCALL, vm.CALLCODE, vm.DELEGATECALL: + case vm.CALL, vm.STATICCALL, vm.CALLCODE, vm.DELEGATECALL, vm.STOP: frame = newFlatCall(input) default: return nil, fmt.Errorf("unrecognized call frame type: %s", input.Type)
diff --git go-ethereum/graphql/graphql.go op-geth/graphql/graphql.go index bac86476b10532037855f92d8196e350700e9440..c2807131d8bf83dd182ffefb63d05d8ea73a9593 100644 --- go-ethereum/graphql/graphql.go +++ op-geth/graphql/graphql.go @@ -773,7 +773,7 @@ if !chaincfg.IsLondon(new(big.Int).Add(header.Number, common.Big1)) { return nil, nil } } - nextBaseFee := eip1559.CalcBaseFee(chaincfg, header) + nextBaseFee := eip1559.CalcBaseFee(chaincfg, header, header.Time+1) return (*hexutil.Big)(nextBaseFee), nil }
diff --git go-ethereum/internal/build/env.go op-geth/internal/build/env.go index 0854fba2498d2f490ca5de1e73c1c9f2c58932a3..6a621495ba5441adbc045f1210f1f75c98a969b0 100644 --- go-ethereum/internal/build/env.go +++ op-geth/internal/build/env.go @@ -102,6 +102,10 @@ func LocalEnv() Environment { env := applyEnvFlags(Environment{Name: "local", Repo: "ethereum/go-ethereum"})   head := readGitFile("HEAD") + if info, err := os.Stat(".git/objects"); err == nil && info.IsDir() && env.Tag == "" { + env.Tag = firstLine(RunGit("tag", "-l", "--points-at", "HEAD")) + } + if fields := strings.Fields(head); len(fields) == 2 { head = fields[1] } else { @@ -111,6 +115,7 @@ // Additional check required to verify, that file contains commit hash commitRe, _ := regexp.Compile("^([0-9a-f]{40})$") if commit := commitRe.FindString(head); commit != "" && env.Commit == "" { env.Commit = commit + env.Date = getDate(env.Commit) } return env } @@ -122,9 +127,6 @@ if env.Branch == "" { if head != "HEAD" { env.Branch = strings.TrimPrefix(head, "refs/heads/") } - } - if info, err := os.Stat(".git/objects"); err == nil && info.IsDir() && env.Tag == "" { - env.Tag = firstLine(RunGit("tag", "-l", "--points-at", "HEAD")) } return env }
diff --git go-ethereum/internal/testlog/testlog.go op-geth/internal/testlog/testlog.go index 037b7ee9c1206f56203745a224d36abbff3f08f8..3cdbea6e0537659c5fb8b01967565641fb4fd5fc 100644 --- go-ethereum/internal/testlog/testlog.go +++ op-geth/internal/testlog/testlog.go @@ -98,6 +98,10 @@ h: &bh, } }   +func (l *logger) Handler() slog.Handler { + return l.l.Handler() +} + func (l *logger) Write(level slog.Level, msg string, ctx ...interface{}) {}   func (l *logger) Enabled(ctx context.Context, level slog.Level) bool {
diff --git go-ethereum/log/handler.go op-geth/log/handler.go index 7459aad8913b29a491cfa45d1b6ab524dc9c7617..256b57f2d0b2e46e289a3c3d97896113b0672634 100644 --- go-ethereum/log/handler.go +++ op-geth/log/handler.go @@ -117,6 +117,7 @@ // JSONHandler returns a handler which prints records in JSON format. func JSONHandler(wr io.Writer) slog.Handler { return slog.NewJSONHandler(wr, &slog.HandlerOptions{ ReplaceAttr: builtinReplaceJSON, + Level: &leveler{levelMaxVerbosity}, }) }
diff --git go-ethereum/log/logger.go op-geth/log/logger.go index 75e3643044889295b9076d8c20173b55d4fa7ef4..c28bbde56840e40cf18ca2e364fcbdec8d23847d 100644 --- go-ethereum/log/logger.go +++ op-geth/log/logger.go @@ -137,6 +137,9 @@ Write(level slog.Level, msg string, attrs ...any)   // Enabled reports whether l emits log records at the given context and level. Enabled(ctx context.Context, level slog.Level) bool + + // Handler returns the underlying handler of the inner logger. + Handler() slog.Handler }   type logger struct { @@ -148,6 +151,10 @@ func NewLogger(h slog.Handler) Logger { return &logger{ slog.New(h), } +} + +func (l *logger) Handler() slog.Handler { + return l.inner.Handler() }   // write logs a message at the specified level:
diff --git go-ethereum/node/node.go op-geth/node/node.go index dfa83d58c7260fee12076b11f5d045cd699b2333..4dc856c3456d478bd54419a8598275197dedf202 100644 --- go-ethereum/node/node.go +++ op-geth/node/node.go @@ -680,7 +680,7 @@ // HTTPEndpoint returns the URL of the HTTP server. Note that this URL does not // contain the JSON-RPC path prefix set by HTTPPathPrefix. func (n *Node) HTTPEndpoint() string { - return "http://" + n.http.listenAddr() + return "http://" + n.http.listenAddr() //nolint:all }   // WSEndpoint returns the current JSON-RPC over WebSocket endpoint.
diff --git go-ethereum/rpc/http.go op-geth/rpc/http.go index dd376b1ecd59834fa99f0c3329fc146229b366b1..d79ac462d146c6924c06d5e3e9e6a2d75b97da59 100644 --- go-ethereum/rpc/http.go +++ op-geth/rpc/http.go @@ -33,8 +33,9 @@ "time" )   const ( - defaultBodyLimit = 5 * 1024 * 1024 - contentType = "application/json" + maxRequestContentLength = 1024 * 1024 * 32 + defaultBodyLimit