Skip to content

Commit

Permalink
Elderberry blockhash adjustments (#3424) (#3448)
Browse files Browse the repository at this point in the history
  • Loading branch information
tclemos authored Mar 14, 2024
1 parent e355073 commit 1d510de
Show file tree
Hide file tree
Showing 24 changed files with 239 additions and 171 deletions.
81 changes: 5 additions & 76 deletions db/migrations/state/0017.sql
Original file line number Diff line number Diff line change
@@ -1,80 +1,9 @@
-- +migrate Up
CREATE TABLE state.blob_inner
(
blob_inner_num BIGINT PRIMARY KEY,
data BYTEA,
block_num BIGINT NOT NULL REFERENCES state.block (block_num) ON DELETE CASCADE
);
ALTER TABLE state.receipt
ADD COLUMN IF NOT EXISTS im_state_root BYTEA;

ALTER TABLE state.virtual_batch
ADD COLUMN IF NOT EXISTS blob_inner_num BIGINT, -- REFERENCES state.blob_inner (blob_inner_num),
ADD COLUMN IF NOT EXISTS prev_l1_it_root VARCHAR,
ADD COLUMN IF NOT EXISTS prev_l1_it_index BIGINT;

ALTER TABLE IF EXISTS state.proof RENAME TO batch_proof;

ALTER TABLE state.batch_proof
ADD COLUMN IF NOT EXISTS blob_inner_num BIGINT; -- NOT NULL REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE;

CREATE TABLE state.blob_inner_proof
(
blob_inner_num BIGINT NOT NULL, -- REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE,
proof_id VARCHAR,
proof VARCHAR,
input_prover VARCHAR,
prover VARCHAR,
prover_id VARCHAR,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
generating_since TIMESTAMP WITH TIME ZONE,
PRIMARY KEY (blob_inner_num)
);

CREATE TABLE state.blob_outer_proof
(
blob_outer_num BIGINT NOT NULL, -- REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE,
blob_outer_num_final BIGINT NOT NULL, -- REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE,
proof_id VARCHAR,
proof VARCHAR,
input_prover VARCHAR,
prover VARCHAR,
prover_id VARCHAR,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
generating_since TIMESTAMP WITH TIME ZONE,
PRIMARY KEY (blob_outer_num, blob_outer_num_final)
);
UPDATE state.receipt SET im_state_root = post_state WHERE block_num >= (SELECT MIN(block_num) FROM state.l2block WHERE batch_num >= (SELECT from_batch_num FROM state.fork_id WHERE fork_id = 7));

-- +migrate Down
ALTER TABLE state.virtual_batch
DROP COLUMN IF EXISTS blob_inner_num,
DROP COLUMN IF EXISTS prev_l1_it_root,
DROP COLUMN IF EXISTS prev_l1_it_index;

DROP TABLE state.blob_outer_proof;

DROP TABLE state.blob_inner_proof;

DROP TABLE state.batch_proof;

DROP TABLE state.blob_inner;

CREATE TABLE state.proof
(
batch_num BIGINT NOT NULL REFERENCES state.batch (batch_num) ON DELETE CASCADE,
batch_num_final BIGINT NOT NULL REFERENCES state.batch (batch_num) ON DELETE CASCADE,
proof_id VARCHAR,
proof VARCHAR,
input_prover VARCHAR,
prover VARCHAR,
prover_id VARCHAR,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
generating_since TIMESTAMP WITH TIME ZONE,
PRIMARY KEY (batch_num, batch_num_final)
);

ALTER TABLE state.virtual_batch
DROP COLUMN IF EXISTS blob_inner_num,
DROP COLUMN IF EXISTS prev_l1_it_root,
DROP COLUMN IF EXISTS prev_l1_it_index;
ALTER TABLE state.receipt
DROP COLUMN IF EXISTS im_state_root;
81 changes: 81 additions & 0 deletions db/migrations/state/0018.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
-- +migrate Up
CREATE TABLE state.blob_inner
(
blob_inner_num BIGINT PRIMARY KEY,
data BYTEA,
block_num BIGINT NOT NULL REFERENCES state.block (block_num) ON DELETE CASCADE
);

ALTER TABLE state.virtual_batch
ADD COLUMN IF NOT EXISTS blob_inner_num BIGINT, -- REFERENCES state.blob_inner (blob_inner_num),
ADD COLUMN IF NOT EXISTS prev_l1_it_root VARCHAR,
ADD COLUMN IF NOT EXISTS prev_l1_it_index BIGINT;

ALTER TABLE IF EXISTS state.proof RENAME TO batch_proof;

ALTER TABLE state.batch_proof
ADD COLUMN IF NOT EXISTS blob_inner_num BIGINT; -- NOT NULL REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE;

CREATE TABLE state.blob_inner_proof
(
blob_inner_num BIGINT NOT NULL, -- REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE,
proof_id VARCHAR,
proof VARCHAR,
input_prover VARCHAR,
prover VARCHAR,
prover_id VARCHAR,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
generating_since TIMESTAMP WITH TIME ZONE,
PRIMARY KEY (blob_inner_num)
);

CREATE TABLE state.blob_outer_proof
(
blob_outer_num BIGINT NOT NULL, -- REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE,
blob_outer_num_final BIGINT NOT NULL, -- REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE,
proof_id VARCHAR,
proof VARCHAR,
input_prover VARCHAR,
prover VARCHAR,
prover_id VARCHAR,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
generating_since TIMESTAMP WITH TIME ZONE,
PRIMARY KEY (blob_outer_num, blob_outer_num_final)
);

-- +migrate Down
ALTER TABLE state.virtual_batch
DROP COLUMN IF EXISTS blob_inner_num,
DROP COLUMN IF EXISTS prev_l1_it_root,
DROP COLUMN IF EXISTS prev_l1_it_index;

DROP TABLE state.blob_outer_proof;

DROP TABLE state.blob_inner_proof;

DROP TABLE state.batch_proof;

DROP TABLE state.blob_inner;

CREATE TABLE state.proof
(
batch_num BIGINT NOT NULL REFERENCES state.batch (batch_num) ON DELETE CASCADE,
batch_num_final BIGINT NOT NULL REFERENCES state.batch (batch_num) ON DELETE CASCADE,
proof_id VARCHAR,
proof VARCHAR,
input_prover VARCHAR,
prover VARCHAR,
prover_id VARCHAR,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
generating_since TIMESTAMP WITH TIME ZONE,
PRIMARY KEY (batch_num, batch_num_final)
);

ALTER TABLE state.virtual_batch
DROP COLUMN IF EXISTS blob_inner_num,
DROP COLUMN IF EXISTS prev_l1_it_root,
DROP COLUMN IF EXISTS prev_l1_it_index;

Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@ import (
"github.com/stretchr/testify/assert"
)

type migrationTest0017 struct{}
type migrationTest0018 struct{}

func (m migrationTest0017) InsertData(db *sql.DB) error {
func (m migrationTest0018) InsertData(db *sql.DB) error {
const insertBatch1 = `
INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, wip)
VALUES (1,'0x0001', '0x0001', '0x0001', '0x0001', now(), '0x0001', null, null, true)`
Expand Down Expand Up @@ -45,7 +45,7 @@ func (m migrationTest0017) InsertData(db *sql.DB) error {
return nil
}

func (m migrationTest0017) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) {
func (m migrationTest0018) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) {
assertTableNotExists(t, db, "state", "proof")

assertTableExists(t, db, "state", "blob_inner")
Expand Down Expand Up @@ -77,7 +77,7 @@ func (m migrationTest0017) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB)
assert.NoError(t, err)
}

func (m migrationTest0017) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) {
func (m migrationTest0018) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) {
var result int

assertTableExists(t, db, "state", "proof")
Expand Down Expand Up @@ -110,6 +110,6 @@ func (m migrationTest0017) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB
assert.Equal(t, 0, result)
}

func TestMigration0017(t *testing.T) {
runMigrationTest(t, 17, migrationTest0017{})
func TestMigration0018(t *testing.T) {
runMigrationTest(t, 18, migrationTest0018{})
}
2 changes: 1 addition & 1 deletion jsonrpc/endpoints_eth_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3311,7 +3311,7 @@ func TestGetTransactionReceipt(t *testing.T) {
receipt.Bloom = ethTypes.CreateBloom(ethTypes.Receipts{receipt})

rpcReceipt := types.Receipt{
Root: stateRoot,
Root: &stateRoot,
CumulativeGasUsed: types.ArgUint64(receipt.CumulativeGasUsed),
LogsBloom: receipt.Bloom,
Logs: receipt.Logs,
Expand Down
2 changes: 1 addition & 1 deletion jsonrpc/endpoints_zkevm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2260,7 +2260,7 @@ func TestGetTransactionReceiptByL2Hash(t *testing.T) {
receipt.Bloom = ethTypes.CreateBloom(ethTypes.Receipts{receipt})

rpcReceipt := types.Receipt{
Root: stateRoot,
Root: &stateRoot,
CumulativeGasUsed: types.ArgUint64(receipt.CumulativeGasUsed),
LogsBloom: receipt.Bloom,
Logs: receipt.Logs,
Expand Down
8 changes: 6 additions & 2 deletions jsonrpc/types/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -601,7 +601,7 @@ func NewTransaction(

// Receipt structure
type Receipt struct {
Root common.Hash `json:"root"`
Root *common.Hash `json:"root,omitempty"`
CumulativeGasUsed ArgUint64 `json:"cumulativeGasUsed"`
LogsBloom types.Bloom `json:"logsBloom"`
Logs []*types.Log `json:"logs"`
Expand Down Expand Up @@ -643,7 +643,6 @@ func NewReceipt(tx types.Transaction, r *types.Receipt, l2Hash *common.Hash) (Re
return Receipt{}, err
}
receipt := Receipt{
Root: common.BytesToHash(r.PostState),
CumulativeGasUsed: ArgUint64(r.CumulativeGasUsed),
LogsBloom: r.Bloom,
Logs: logs,
Expand All @@ -659,6 +658,11 @@ func NewReceipt(tx types.Transaction, r *types.Receipt, l2Hash *common.Hash) (Re
Type: ArgUint64(r.Type),
TxL2Hash: l2Hash,
}
if common.BytesToHash(r.PostState).String() != state.ZeroHash.String() {
root := common.BytesToHash(r.PostState)
receipt.Root = &root
}

if r.EffectiveGasPrice != nil {
egp := ArgBig(*r.EffectiveGasPrice)
receipt.EffectiveGasPrice = &egp
Expand Down
2 changes: 2 additions & 0 deletions proto/src/proto/executor/v1/executor.proto
Original file line number Diff line number Diff line change
Expand Up @@ -872,4 +872,6 @@ enum ExecutorError {
EXECUTOR_ERROR_INVALID_DATA_STREAM = 115;
// EXECUTOR_ERROR_INVALID_UPDATE_MERKLE_TREE indicates that the provided update merkle tree is invalid, e.g. because the executor is configured not to write to database
EXECUTOR_ERROR_INVALID_UPDATE_MERKLE_TREE = 116;
// EXECUTOR_ERROR_SM_MAIN_INVALID_TX_STATUS_ERROR indicates that a TX has an invalid status-error combination
EXECUTOR_ERROR_SM_MAIN_INVALID_TX_STATUS_ERROR = 117;
}
1 change: 1 addition & 0 deletions state/convertersV2.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ func (s *State) convertToProcessTransactionResponseV2(responses []*executor.Proc
result.ReturnValue = response.ReturnValue
result.GasLeft = response.GasLeft
result.GasUsed = response.GasUsed
result.CumulativeGasUsed = response.CumulativeGasUsed
result.GasRefunded = response.GasRefunded
result.RomError = executor.RomErr(response.Error)
result.CreateAddress = common.HexToAddress(response.CreateAddress)
Expand Down
6 changes: 6 additions & 0 deletions state/datastream.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ func (b DSL2BlockStart) Decode(data []byte) DSL2BlockStart {
// DSL2Transaction represents a data stream L2 transaction
type DSL2Transaction struct {
L2BlockNumber uint64 // Not included in the encoded data
ImStateRoot common.Hash // Not included in the encoded data
EffectiveGasPricePercentage uint8 // 1 byte
IsValid uint8 // 1 byte
StateRoot common.Hash // 32 bytes
Expand Down Expand Up @@ -557,6 +558,9 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
}

for _, tx := range l2Block.Txs {
// < ETROG => IM State root is retrieved from the system SC (using cache is available)
// = ETROG => IM State root is retrieved from the receipt.post_state => Do nothing
// > ETROG => IM State root is retrieved from the receipt.im_state_root
if l2Block.ForkID < FORKID_ETROG {
// Populate intermediate state root with information from the system SC (or cache if available)
if imStateRoots == nil || (*imStateRoots)[blockStart.L2BlockNumber] == nil {
Expand All @@ -569,6 +573,8 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
} else {
tx.StateRoot = common.BytesToHash((*imStateRoots)[blockStart.L2BlockNumber])
}
} else if l2Block.ForkID > FORKID_ETROG {
tx.StateRoot = tx.ImStateRoot
}

_, err = streamServer.AddStreamEntry(EntryTypeL2Tx, tx.Encode())
Expand Down
2 changes: 1 addition & 1 deletion state/genesis.go
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ func (s *State) SetGenesis(ctx context.Context, block Block, genesis Genesis, m
storeTxsEGPData := []StoreTxEGPData{}
txsL2Hash := []common.Hash{}

err = s.AddL2Block(ctx, batch.BatchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx)
err = s.AddL2Block(ctx, batch.BatchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, []common.Hash{}, dbTx)
if err != nil {
return common.Hash{}, err
}
Expand Down
36 changes: 22 additions & 14 deletions state/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -279,19 +279,23 @@ func DecodeTx(encodedTx string) (*types.Transaction, error) {
}

// GenerateReceipt generates a receipt from a processed transaction
func GenerateReceipt(blockNumber *big.Int, processedTx *ProcessTransactionResponse, txIndex uint) *types.Receipt {
func GenerateReceipt(blockNumber *big.Int, processedTx *ProcessTransactionResponse, txIndex uint, forkID uint64) *types.Receipt {
receipt := &types.Receipt{
Type: uint8(processedTx.Type),
PostState: processedTx.StateRoot.Bytes(),
CumulativeGasUsed: processedTx.GasUsed,
BlockNumber: blockNumber,
GasUsed: processedTx.GasUsed,
TxHash: processedTx.Tx.Hash(),
TransactionIndex: txIndex,
ContractAddress: processedTx.CreateAddress,
Logs: processedTx.Logs,
Type: uint8(processedTx.Type),
BlockNumber: blockNumber,
GasUsed: processedTx.GasUsed,
TxHash: processedTx.Tx.Hash(),
TransactionIndex: txIndex,
ContractAddress: processedTx.CreateAddress,
Logs: processedTx.Logs,
}
if forkID <= FORKID_ETROG {
receipt.PostState = processedTx.StateRoot.Bytes()
receipt.CumulativeGasUsed = processedTx.GasUsed
} else {
receipt.PostState = ZeroHash.Bytes()
receipt.CumulativeGasUsed = processedTx.CumulativeGasUsed
}

if processedTx.EffectiveGasPrice != "" {
effectiveGasPrice, ok := big.NewInt(0).SetString(processedTx.EffectiveGasPrice, 0)
if !ok {
Expand All @@ -309,10 +313,14 @@ func GenerateReceipt(blockNumber *big.Int, processedTx *ProcessTransactionRespon
for i := 0; i < len(receipt.Logs); i++ {
receipt.Logs[i].TxHash = processedTx.Tx.Hash()
}
if processedTx.RomError == nil {
receipt.Status = types.ReceiptStatusSuccessful
if forkID <= FORKID_ETROG {
if processedTx.RomError == nil {
receipt.Status = types.ReceiptStatusSuccessful
} else {
receipt.Status = types.ReceiptStatusFailed
}
} else {
receipt.Status = types.ReceiptStatusFailed
receipt.Status = uint64(processedTx.Status)
}

return receipt
Expand Down
4 changes: 2 additions & 2 deletions state/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ type storage interface {
GetL2BlockTransactionCountByHash(ctx context.Context, blockHash common.Hash, dbTx pgx.Tx) (uint64, error)
GetL2BlockTransactionCountByNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (uint64, error)
GetTransactionEGPLogByHash(ctx context.Context, transactionHash common.Hash, dbTx pgx.Tx) (*EffectiveGasPriceLog, error)
AddL2Block(ctx context.Context, batchNumber uint64, l2Block *L2Block, receipts []*types.Receipt, txsL2Hash []common.Hash, txsEGPData []StoreTxEGPData, dbTx pgx.Tx) error
AddL2Block(ctx context.Context, batchNumber uint64, l2Block *L2Block, receipts []*types.Receipt, txsL2Hash []common.Hash, txsEGPData []StoreTxEGPData, imStateRoots []common.Hash, dbTx pgx.Tx) error
GetLastVirtualizedL2BlockNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error)
GetLastConsolidatedL2BlockNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error)
GetLastVerifiedL2BlockNumberUntilL1Block(ctx context.Context, l1FinalizedBlockNumber uint64, dbTx pgx.Tx) (uint64, error)
Expand All @@ -93,7 +93,7 @@ type storage interface {
IsL2BlockConsolidated(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (bool, error)
IsL2BlockVirtualized(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (bool, error)
GetLogs(ctx context.Context, fromBlock uint64, toBlock uint64, addresses []common.Address, topics [][]common.Hash, blockHash *common.Hash, since *time.Time, dbTx pgx.Tx) ([]*types.Log, error)
AddReceipt(ctx context.Context, receipt *types.Receipt, dbTx pgx.Tx) error
AddReceipt(ctx context.Context, receipt *types.Receipt, imStateRoot common.Hash, dbTx pgx.Tx) error
AddLog(ctx context.Context, l *types.Log, dbTx pgx.Tx) error
GetExitRootByGlobalExitRoot(ctx context.Context, ger common.Hash, dbTx pgx.Tx) (*GlobalExitRoot, error)
AddSequence(ctx context.Context, sequence Sequence, dbTx pgx.Tx) error
Expand Down
Loading

0 comments on commit 1d510de

Please sign in to comment.