From cd13fdef45c46558779d017d8eeecf517b7b0183 Mon Sep 17 00:00:00 2001 From: Nikolay Kurtov Date: Tue, 26 Sep 2023 15:25:35 +0200 Subject: [PATCH 1/6] refactor(state-sync): Rename module --- chain/chain/src/chain.rs | 4 ++-- chain/chain/src/resharding.rs | 2 +- chain/chain/src/store.rs | 2 +- chain/chain/src/store_validator.rs | 2 +- chain/chain/src/store_validator/validate.rs | 2 +- chain/client/src/debug.rs | 4 ++-- chain/client/src/sync/state.rs | 4 ++-- chain/client/src/sync_jobs_actor.rs | 2 +- chain/client/src/view_client.rs | 4 ++-- chain/network/src/network_protocol/mod.rs | 2 +- chain/network/src/testonly/fake_client.rs | 2 +- core/primitives/src/lib.rs | 2 +- core/primitives/src/{syncing.rs => state_sync.rs} | 2 +- integration-tests/src/tests/client/process_blocks.rs | 2 +- integration-tests/src/tests/client/state_dump.rs | 4 ++-- integration-tests/src/tests/nearcore/sync_state_nodes.rs | 2 +- nearcore/src/state_sync.rs | 2 +- tools/mock-node/src/setup.rs | 2 +- tools/state-viewer/src/apply_chunk.rs | 2 +- tools/state-viewer/src/scan_db.rs | 2 +- tools/state-viewer/src/state_parts.rs | 2 +- 21 files changed, 26 insertions(+), 26 deletions(-) rename core/primitives/src/{syncing.rs => state_sync.rs} (99%) diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index dc8df9d3f36..b38dc32a976 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -56,11 +56,11 @@ use near_primitives::sharding::{ ShardChunkHeader, ShardInfo, ShardProof, StateSyncInfo, }; use near_primitives::state_part::PartId; -use near_primitives::static_clock::StaticClock; -use near_primitives::syncing::{ +use near_primitives::state_sync::{ get_num_state_parts, ReceiptProofResponse, RootProof, ShardStateSyncResponseHeader, ShardStateSyncResponseHeaderV1, ShardStateSyncResponseHeaderV2, StateHeaderKey, StatePartKey, }; +use near_primitives::static_clock::StaticClock; use near_primitives::transaction::{ExecutionOutcomeWithIdAndProof, SignedTransaction}; use near_primitives::types::chunk_extra::ChunkExtra; use near_primitives::types::{ diff --git a/chain/chain/src/resharding.rs b/chain/chain/src/resharding.rs index 83092ad27d3..f5f49e6c379 100644 --- a/chain/chain/src/resharding.rs +++ b/chain/chain/src/resharding.rs @@ -13,7 +13,7 @@ use near_primitives::hash::CryptoHash; use near_primitives::shard_layout::{account_id_to_shard_uid, ShardLayout}; use near_primitives::state::FlatStateValue; use near_primitives::state_part::PartId; -use near_primitives::syncing::get_num_state_parts; +use near_primitives::state_sync::get_num_state_parts; use near_primitives::types::chunk_extra::ChunkExtra; use near_primitives::types::{AccountId, ShardId, StateRoot}; use near_store::flat::{ diff --git a/chain/chain/src/store.rs b/chain/chain/src/store.rs index 7f2d51d9757..2ba08539af4 100644 --- a/chain/chain/src/store.rs +++ b/chain/chain/src/store.rs @@ -23,7 +23,7 @@ use near_primitives::sharding::{ ChunkHash, EncodedShardChunk, PartialEncodedChunk, ReceiptProof, ShardChunk, ShardChunkHeader, StateSyncInfo, }; -use near_primitives::syncing::{ +use near_primitives::state_sync::{ get_num_state_parts, ReceiptProofResponse, ShardStateSyncResponseHeader, StateHeaderKey, StatePartKey, StateSyncDumpProgress, }; diff --git a/chain/chain/src/store_validator.rs b/chain/chain/src/store_validator.rs index 07fd1407581..46182754409 100644 --- a/chain/chain/src/store_validator.rs +++ b/chain/chain/src/store_validator.rs @@ -17,7 +17,7 @@ use near_primitives::epoch_manager::epoch_info::EpochInfo; use near_primitives::epoch_manager::AGGREGATOR_KEY; use near_primitives::hash::CryptoHash; use near_primitives::sharding::{ChunkHash, ShardChunk, StateSyncInfo}; -use near_primitives::syncing::{ShardStateSyncResponseHeader, StateHeaderKey, StatePartKey}; +use near_primitives::state_sync::{ShardStateSyncResponseHeader, StateHeaderKey, StatePartKey}; use near_primitives::transaction::ExecutionOutcomeWithProof; use near_primitives::types::chunk_extra::ChunkExtra; use near_primitives::types::{AccountId, BlockHeight, EpochId}; diff --git a/chain/chain/src/store_validator/validate.rs b/chain/chain/src/store_validator/validate.rs index 0f2e2d8184e..140d5660eca 100644 --- a/chain/chain/src/store_validator/validate.rs +++ b/chain/chain/src/store_validator/validate.rs @@ -6,7 +6,7 @@ use near_primitives::epoch_manager::epoch_info::EpochInfo; use near_primitives::hash::CryptoHash; use near_primitives::shard_layout::{get_block_shard_uid, ShardUId}; use near_primitives::sharding::{ChunkHash, ShardChunk, StateSyncInfo}; -use near_primitives::syncing::{ +use near_primitives::state_sync::{ get_num_state_parts, ShardStateSyncResponseHeader, StateHeaderKey, StatePartKey, }; use near_primitives::transaction::{ExecutionOutcomeWithProof, SignedTransaction}; diff --git a/chain/client/src/debug.rs b/chain/client/src/debug.rs index 87a86ae3149..79b2276da8a 100644 --- a/chain/client/src/debug.rs +++ b/chain/client/src/debug.rs @@ -18,11 +18,11 @@ use near_client_primitives::{ use near_epoch_manager::EpochManagerAdapter; use near_o11y::{handler_debug_span, log_assert, OpenTelemetrySpanExt, WithSpanContext}; use near_performance_metrics_macros::perf; -use near_primitives::syncing::get_num_state_parts; +use near_primitives::state_sync::get_num_state_parts; use near_primitives::types::{AccountId, BlockHeight, ShardId, ValidatorInfoIdentifier}; use near_primitives::{ hash::CryptoHash, - syncing::{ShardStateSyncResponseHeader, StateHeaderKey}, + state_sync::{ShardStateSyncResponseHeader, StateHeaderKey}, types::EpochId, views::ValidatorInfo, }; diff --git a/chain/client/src/sync/state.rs b/chain/client/src/sync/state.rs index b7a0e73c9ec..e838baf9125 100644 --- a/chain/client/src/sync/state.rs +++ b/chain/client/src/sync/state.rs @@ -46,8 +46,8 @@ use near_network::types::{ use near_primitives::hash::CryptoHash; use near_primitives::shard_layout::ShardUId; use near_primitives::state_part::PartId; +use near_primitives::state_sync::{get_num_state_parts, ShardStateSyncResponse}; use near_primitives::static_clock::StaticClock; -use near_primitives::syncing::{get_num_state_parts, ShardStateSyncResponse}; use near_primitives::types::{AccountId, EpochHeight, EpochId, ShardId, StateRoot}; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; @@ -1400,7 +1400,7 @@ mod test { use near_epoch_manager::EpochManagerAdapter; use near_network::test_utils::MockPeerManagerAdapter; use near_primitives::{ - syncing::{ShardStateSyncResponseHeader, ShardStateSyncResponseV2}, + state_sync::{ShardStateSyncResponseHeader, ShardStateSyncResponseV2}, test_utils::TestBlockBuilder, types::EpochId, }; diff --git a/chain/client/src/sync_jobs_actor.rs b/chain/client/src/sync_jobs_actor.rs index 8d7866c9bf9..f2b0a45a844 100644 --- a/chain/client/src/sync_jobs_actor.rs +++ b/chain/client/src/sync_jobs_actor.rs @@ -9,7 +9,7 @@ use near_chain::Chain; use near_o11y::{handler_debug_span, OpenTelemetrySpanExt, WithSpanContext, WithSpanContextExt}; use near_performance_metrics_macros::perf; use near_primitives::state_part::PartId; -use near_primitives::syncing::StatePartKey; +use near_primitives::state_sync::StatePartKey; use near_primitives::types::ShardId; use near_store::DBCol; diff --git a/chain/client/src/view_client.rs b/chain/client/src/view_client.rs index fbef4f43428..5903c798587 100644 --- a/chain/client/src/view_client.rs +++ b/chain/client/src/view_client.rs @@ -42,11 +42,11 @@ use near_primitives::merkle::{merklize, PartialMerkleTree}; use near_primitives::network::AnnounceAccount; use near_primitives::receipt::Receipt; use near_primitives::sharding::ShardChunk; -use near_primitives::static_clock::StaticClock; -use near_primitives::syncing::{ +use near_primitives::state_sync::{ ShardStateSyncResponse, ShardStateSyncResponseHeader, ShardStateSyncResponseV1, ShardStateSyncResponseV2, }; +use near_primitives::static_clock::StaticClock; use near_primitives::types::{ AccountId, BlockHeight, BlockId, BlockReference, EpochReference, Finality, MaybeBlockId, ShardId, SyncCheckpoint, TransactionOrReceiptId, ValidatorInfoIdentifier, diff --git a/chain/network/src/network_protocol/mod.rs b/chain/network/src/network_protocol/mod.rs index 03a46de6af7..824adddd904 100644 --- a/chain/network/src/network_protocol/mod.rs +++ b/chain/network/src/network_protocol/mod.rs @@ -35,7 +35,7 @@ use near_primitives::network::{AnnounceAccount, PeerId}; use near_primitives::sharding::{ ChunkHash, PartialEncodedChunk, PartialEncodedChunkPart, ReceiptProof, ShardChunkHeader, }; -use near_primitives::syncing::{ShardStateSyncResponse, ShardStateSyncResponseV1}; +use near_primitives::state_sync::{ShardStateSyncResponse, ShardStateSyncResponseV1}; use near_primitives::transaction::SignedTransaction; use near_primitives::types::AccountId; use near_primitives::types::{BlockHeight, ShardId}; diff --git a/chain/network/src/testonly/fake_client.rs b/chain/network/src/testonly/fake_client.rs index 625ea4c6352..5d2b0d00c96 100644 --- a/chain/network/src/testonly/fake_client.rs +++ b/chain/network/src/testonly/fake_client.rs @@ -9,7 +9,7 @@ use near_primitives::challenge::Challenge; use near_primitives::hash::CryptoHash; use near_primitives::network::{AnnounceAccount, PeerId}; use near_primitives::sharding::{ChunkHash, PartialEncodedChunkPart}; -use near_primitives::syncing::{ShardStateSyncResponse, ShardStateSyncResponseV2}; +use near_primitives::state_sync::{ShardStateSyncResponse, ShardStateSyncResponseV2}; use near_primitives::transaction::SignedTransaction; use near_primitives::types::{AccountId, EpochId, ShardId}; use near_primitives::views::FinalExecutionOutcomeView; diff --git a/core/primitives/src/lib.rs b/core/primitives/src/lib.rs index ebf2bcaf792..b7fca043fa0 100644 --- a/core/primitives/src/lib.rs +++ b/core/primitives/src/lib.rs @@ -23,8 +23,8 @@ pub mod signable_message; pub mod state; pub mod state_part; pub mod state_record; +pub mod state_sync; pub mod static_clock; -pub mod syncing; pub mod telemetry; pub mod test_utils; pub mod transaction; diff --git a/core/primitives/src/syncing.rs b/core/primitives/src/state_sync.rs similarity index 99% rename from core/primitives/src/syncing.rs rename to core/primitives/src/state_sync.rs index 7e2bf846c97..03ff371f128 100644 --- a/core/primitives/src/syncing.rs +++ b/core/primitives/src/state_sync.rs @@ -248,7 +248,7 @@ pub enum StateSyncDumpProgress { #[cfg(test)] mod tests { - use crate::syncing::{get_num_state_parts, STATE_PART_MEMORY_LIMIT}; + use crate::state_sync::{get_num_state_parts, STATE_PART_MEMORY_LIMIT}; #[test] fn test_get_num_state_parts() { diff --git a/integration-tests/src/tests/client/process_blocks.rs b/integration-tests/src/tests/client/process_blocks.rs index 83690f8994f..73fd0ff64ff 100644 --- a/integration-tests/src/tests/client/process_blocks.rs +++ b/integration-tests/src/tests/client/process_blocks.rs @@ -59,7 +59,7 @@ use near_primitives::sharding::{ ShardChunkHeaderV3, }; use near_primitives::state_part::PartId; -use near_primitives::syncing::{get_num_state_parts, StatePartKey}; +use near_primitives::state_sync::{get_num_state_parts, StatePartKey}; use near_primitives::test_utils::create_test_signer; use near_primitives::test_utils::TestBlockBuilder; use near_primitives::transaction::{ diff --git a/integration-tests/src/tests/client/state_dump.rs b/integration-tests/src/tests/client/state_dump.rs index 1f8d6374fc6..1e558156173 100644 --- a/integration-tests/src/tests/client/state_dump.rs +++ b/integration-tests/src/tests/client/state_dump.rs @@ -16,8 +16,8 @@ use near_primitives::block::Tip; use near_primitives::shard_layout::ShardUId; use near_primitives::state::FlatStateValue; use near_primitives::state_part::PartId; -use near_primitives::syncing::get_num_state_parts; -use near_primitives::syncing::StatePartKey; +use near_primitives::state_sync::get_num_state_parts; +use near_primitives::state_sync::StatePartKey; use near_primitives::transaction::SignedTransaction; use near_primitives::types::BlockHeight; use near_primitives::views::{QueryRequest, QueryResponseKind}; diff --git a/integration-tests/src/tests/nearcore/sync_state_nodes.rs b/integration-tests/src/tests/nearcore/sync_state_nodes.rs index 93765b4268e..9d3a2bfa942 100644 --- a/integration-tests/src/tests/nearcore/sync_state_nodes.rs +++ b/integration-tests/src/tests/nearcore/sync_state_nodes.rs @@ -17,7 +17,7 @@ use near_o11y::testonly::{init_integration_logger, init_test_logger}; use near_o11y::WithSpanContextExt; use near_primitives::shard_layout::ShardUId; use near_primitives::state_part::PartId; -use near_primitives::syncing::{get_num_state_parts, StatePartKey}; +use near_primitives::state_sync::{get_num_state_parts, StatePartKey}; use near_primitives::transaction::SignedTransaction; use near_primitives::utils::MaybeValidated; use near_primitives_core::types::ShardId; diff --git a/nearcore/src/state_sync.rs b/nearcore/src/state_sync.rs index 0e05a2a2f84..b3596b0aa8e 100644 --- a/nearcore/src/state_sync.rs +++ b/nearcore/src/state_sync.rs @@ -13,7 +13,7 @@ use near_epoch_manager::shard_tracker::ShardTracker; use near_epoch_manager::EpochManagerAdapter; use near_primitives::hash::CryptoHash; use near_primitives::state_part::PartId; -use near_primitives::syncing::{get_num_state_parts, StatePartKey, StateSyncDumpProgress}; +use near_primitives::state_sync::{get_num_state_parts, StatePartKey, StateSyncDumpProgress}; use near_primitives::types::{AccountId, EpochHeight, EpochId, ShardId, StateRoot}; use near_store::DBCol; use rand::{thread_rng, Rng}; diff --git a/tools/mock-node/src/setup.rs b/tools/mock-node/src/setup.rs index 6f2dca38ebe..acacde0e935 100644 --- a/tools/mock-node/src/setup.rs +++ b/tools/mock-node/src/setup.rs @@ -13,7 +13,7 @@ use near_network::tcp; use near_network::types::PeerInfo; use near_primitives::network::PeerId; use near_primitives::state_part::PartId; -use near_primitives::syncing::get_num_state_parts; +use near_primitives::state_sync::get_num_state_parts; use near_primitives::types::{BlockHeight, ShardId}; use near_store::test_utils::create_test_store; use nearcore::{NearConfig, NightshadeRuntime}; diff --git a/tools/state-viewer/src/apply_chunk.rs b/tools/state-viewer/src/apply_chunk.rs index 16dbedb36a5..7ecb175a25f 100644 --- a/tools/state-viewer/src/apply_chunk.rs +++ b/tools/state-viewer/src/apply_chunk.rs @@ -10,7 +10,7 @@ use near_primitives::merkle::combine_hash; use near_primitives::receipt::Receipt; use near_primitives::shard_layout; use near_primitives::sharding::{ChunkHash, ReceiptProof}; -use near_primitives::syncing::ReceiptProofResponse; +use near_primitives::state_sync::ReceiptProofResponse; use near_primitives::types::{BlockHeight, ShardId}; use near_primitives_core::hash::hash; use near_primitives_core::types::Gas; diff --git a/tools/state-viewer/src/scan_db.rs b/tools/state-viewer/src/scan_db.rs index f7c1b747c26..bd96193c106 100644 --- a/tools/state-viewer/src/scan_db.rs +++ b/tools/state-viewer/src/scan_db.rs @@ -9,7 +9,7 @@ use near_primitives::receipt::Receipt; use near_primitives::shard_layout::{get_block_shard_uid_rev, ShardUId}; use near_primitives::sharding::{ChunkHash, ReceiptProof, ShardChunk, StateSyncInfo}; use near_primitives::state::FlatStateValue; -use near_primitives::syncing::{ +use near_primitives::state_sync::{ ShardStateSyncResponseHeader, StateHeaderKey, StatePartKey, StateSyncDumpProgress, }; use near_primitives::transaction::{ExecutionOutcomeWithProof, SignedTransaction}; diff --git a/tools/state-viewer/src/state_parts.rs b/tools/state-viewer/src/state_parts.rs index c3b83371784..26860af4de7 100644 --- a/tools/state-viewer/src/state_parts.rs +++ b/tools/state-viewer/src/state_parts.rs @@ -12,7 +12,7 @@ use near_primitives::challenge::PartialState; use near_primitives::epoch_manager::epoch_info::EpochInfo; use near_primitives::state_part::PartId; use near_primitives::state_record::StateRecord; -use near_primitives::syncing::get_num_state_parts; +use near_primitives::state_sync::get_num_state_parts; use near_primitives::types::{EpochId, StateRoot}; use near_primitives_core::hash::CryptoHash; use near_primitives_core::types::{BlockHeight, EpochHeight, ShardId}; From b2f497776119dc568c4f299eb32fe653c2ff9f16 Mon Sep 17 00:00:00 2001 From: Jakob Meier Date: Tue, 26 Sep 2023 16:13:20 +0200 Subject: [PATCH 2/6] chore: bump crossbeam-channel to 0.5.8 (#9592) Versions 0.5.1 - 0.5.7 were yanked due to a race condition. See [changelog](/~https://github.com/crossbeam-rs/crossbeam/blob/master/crossbeam-channel/CHANGELOG.md#version-058) and [crates.io](https://crates.io/crates/crossbeam-channel/versions) and [PR with the fix](/~https://github.com/crossbeam-rs/crossbeam/pull/972). --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 418e8a4956e..8347c7b40a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1531,9 +1531,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.4" +version = "0.5.8" source = "registry+/~https://github.com/rust-lang/crates.io-index" -checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", diff --git a/Cargo.toml b/Cargo.toml index 0b95cc0e4ba..a39e262ca51 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -125,7 +125,7 @@ conqueue = "0.4.0" cpu-time = "1.0" criterion = { version = "0.3.5", default_features = false, features = ["html_reports", "cargo_bench_support"] } crossbeam = "0.8" -crossbeam-channel = "0.5" +crossbeam-channel = "0.5.8" crossbeam-queue = "0.3.8" csv = "1.2.1" curve25519-dalek = "3" From d6df8ba3d790f17d17ae517be628e19d5c519b46 Mon Sep 17 00:00:00 2001 From: Shreyan Gupta Date: Tue, 26 Sep 2023 12:13:46 -0700 Subject: [PATCH 3/6] [cleanup] Remove redundant pattern of manually creating store, epoch manager, runtime in integration tests (#9595) We had a common pattern of creating a bunch of manual stores, epoch managers, and nightshade runtime for the test environment. This PR brings the default functionality to the test environment builder. --- Cargo.lock | 1 + chain/client/Cargo.toml | 1 + chain/client/src/test_utils.rs | 43 +++- integration-tests/src/tests/client/mod.rs | 2 + .../src/tests/client/process_blocks.rs | 226 +++++------------- .../src/tests/client/state_dump.rs | 111 +++------ .../{nearcore => client}/state_snapshot.rs | 42 +--- .../{nearcore => client}/sync_state_nodes.rs | 54 +---- integration-tests/src/tests/client/utils.rs | 16 +- integration-tests/src/tests/nearcore/mod.rs | 2 - 10 files changed, 162 insertions(+), 336 deletions(-) rename integration-tests/src/tests/{nearcore => client}/state_snapshot.rs (86%) rename integration-tests/src/tests/{nearcore => client}/sync_state_nodes.rs (94%) diff --git a/Cargo.lock b/Cargo.lock index 8347c7b40a8..ee5e9d588cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3613,6 +3613,7 @@ dependencies = [ "serde_json", "strum", "sysinfo", + "tempfile", "thiserror", "tokio", "tracing", diff --git a/chain/client/Cargo.toml b/chain/client/Cargo.toml index 3842074071a..9ebe33f88d9 100644 --- a/chain/client/Cargo.toml +++ b/chain/client/Cargo.toml @@ -33,6 +33,7 @@ serde.workspace = true serde_json.workspace = true strum.workspace = true sysinfo.workspace = true +tempfile.workspace = true thiserror.workspace = true tokio.workspace = true tracing.workspace = true diff --git a/chain/client/src/test_utils.rs b/chain/client/src/test_utils.rs index 998992a505a..387292bca03 100644 --- a/chain/client/src/test_utils.rs +++ b/chain/client/src/test_utils.rs @@ -2,10 +2,12 @@ // code so we're in the clear. #![allow(clippy::arc_with_non_send_sync)] +use itertools::Itertools; use std::cmp::max; use std::collections::{HashMap, HashSet}; use std::mem::swap; use std::ops::DerefMut; +use std::path::PathBuf; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, Instant}; @@ -83,7 +85,7 @@ use near_primitives::views::{ AccountView, FinalExecutionOutcomeView, QueryRequest, QueryResponseKind, StateItem, }; use near_store::test_utils::create_test_store; -use near_store::Store; +use near_store::{NodeStorage, Store}; use near_telemetry::TelemetryActor; use crate::adapter::{ @@ -1361,6 +1363,7 @@ pub struct TestEnvBuilder { chain_genesis: ChainGenesis, clients: Vec, validators: Vec, + home_dirs: Option>, stores: Option>, epoch_managers: Option>, shard_trackers: Option>, @@ -1386,6 +1389,7 @@ impl TestEnvBuilder { chain_genesis, clients, validators, + home_dirs: None, stores: None, epoch_managers: None, shard_trackers: None, @@ -1443,6 +1447,19 @@ impl TestEnvBuilder { self.validators(Self::make_accounts(num)) } + fn ensure_home_dirs(mut self) -> Self { + if self.home_dirs.is_none() { + let home_dirs = (0..self.clients.len()) + .map(|_| { + let temp_dir = tempfile::tempdir().unwrap(); + temp_dir.into_path() + }) + .collect_vec(); + self.home_dirs = Some(home_dirs) + } + self + } + /// Overrides the stores that are used to create epoch managers and runtimes. pub fn stores(mut self, stores: Vec) -> Self { assert_eq!(stores.len(), self.clients.len()); @@ -1453,6 +1470,23 @@ impl TestEnvBuilder { self } + pub fn real_stores(self) -> Self { + let ret = self.ensure_home_dirs(); + let stores = ret + .home_dirs + .as_ref() + .unwrap() + .iter() + .map(|home_dir| { + NodeStorage::opener(home_dir.as_path(), false, &Default::default(), None) + .open() + .unwrap() + .get_hot_store() + }) + .collect_vec(); + ret.stores(stores) + } + /// Internal impl to make sure the stores are initialized. fn ensure_stores(self) -> Self { if self.stores.is_some() { @@ -1561,8 +1595,11 @@ impl TestEnvBuilder { /// Visible for extension methods in integration-tests. pub fn internal_ensure_epoch_managers_for_nightshade_runtime( self, - ) -> (Self, Vec, Vec>) { + ) -> (Self, Vec, Vec, Vec>) { let builder = self.ensure_epoch_managers(); + let default_home_dirs = + (0..builder.clients.len()).map(|_| PathBuf::from("../../../..")).collect_vec(); + let home_dirs = builder.home_dirs.clone().unwrap_or(default_home_dirs); let stores = builder.stores.clone().unwrap(); let epoch_managers = builder .epoch_managers @@ -1576,7 +1613,7 @@ impl TestEnvBuilder { EpochManagerKind::Handle(handle) => handle, }) .collect(); - (builder, stores, epoch_managers) + (builder, home_dirs, stores, epoch_managers) } /// Specifies custom ShardTracker for each client. This allows us to diff --git a/integration-tests/src/tests/client/mod.rs b/integration-tests/src/tests/client/mod.rs index 17a65afa890..c97f2c15c0a 100644 --- a/integration-tests/src/tests/client/mod.rs +++ b/integration-tests/src/tests/client/mod.rs @@ -12,5 +12,7 @@ mod runtimes; mod sandbox; mod sharding_upgrade; mod state_dump; +mod state_snapshot; +mod sync_state_nodes; mod undo_block; mod utils; diff --git a/integration-tests/src/tests/client/process_blocks.rs b/integration-tests/src/tests/client/process_blocks.rs index 73fd0ff64ff..c3e143cf774 100644 --- a/integration-tests/src/tests/client/process_blocks.rs +++ b/integration-tests/src/tests/client/process_blocks.rs @@ -1,5 +1,4 @@ use std::collections::{HashSet, VecDeque}; -use std::path::Path; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, RwLock}; @@ -7,23 +6,17 @@ use std::sync::{Arc, RwLock}; use actix::System; use assert_matches::assert_matches; use futures::{future, FutureExt}; -use near_async::messaging::{IntoSender, Sender}; -use near_chain::test_utils::ValidatorSchedule; -use near_chunks::test_utils::MockClientAdapterForShardsManager; - -use near_epoch_manager::shard_tracker::{ShardTracker, TrackedConfig}; -use near_epoch_manager::{EpochManager, EpochManagerHandle}; -use near_primitives::config::{ActionCosts, ExtCosts}; -use near_primitives::num_rational::{Ratio, Rational32}; - use near_actix_test_utils::run_actix; +use near_async::messaging::IntoSender; use near_chain::chain::ApplyStatePartsRequest; +use near_chain::test_utils::ValidatorSchedule; use near_chain::types::{LatestKnown, RuntimeAdapter}; use near_chain::validate::validate_chunk_with_chunk_extra; use near_chain::{ Block, BlockProcessingArtifact, ChainGenesis, ChainStore, ChainStoreAccess, Error, Provenance, }; -use near_chain_configs::{ClientConfig, Genesis, DEFAULT_GC_NUM_EPOCHS_TO_KEEP}; +use near_chain_configs::{Genesis, DEFAULT_GC_NUM_EPOCHS_TO_KEEP}; +use near_chunks::test_utils::MockClientAdapterForShardsManager; use near_chunks::{ChunkStatus, ShardsManager}; use near_client::test_utils::{ create_chunk_on_height, setup_client_with_synchronous_shards_manager, setup_mock, @@ -70,22 +63,23 @@ use near_primitives::trie_key::TrieKey; use near_primitives::types::validator_stake::ValidatorStake; use near_primitives::types::{AccountId, BlockHeight, EpochId, NumBlocks, ProtocolVersion}; use near_primitives::utils::to_timestamp; -use near_primitives::validator_signer::{InMemoryValidatorSigner, ValidatorSigner}; +use near_primitives::validator_signer::ValidatorSigner; use near_primitives::version::PROTOCOL_VERSION; use near_primitives::views::{ BlockHeaderView, FinalExecutionStatus, QueryRequest, QueryResponseKind, }; +use near_primitives_core::config::{ActionCosts, ExtCosts}; +use near_primitives_core::num_rational::{Ratio, Rational32}; use near_primitives_core::types::ShardId; use near_store::cold_storage::{update_cold_db, update_cold_head}; -use near_store::genesis::initialize_genesis_state; use near_store::metadata::DbKind; use near_store::metadata::DB_VERSION; use near_store::test_utils::create_test_node_storage_with_cold; use near_store::test_utils::create_test_store; +use near_store::NodeStorage; use near_store::{get, DBCol, TrieChanges}; -use near_store::{NodeStorage, Store}; use nearcore::config::{GenesisExt, TESTING_INIT_BALANCE, TESTING_INIT_STAKE}; -use nearcore::{NightshadeRuntime, NEAR_BASE}; +use nearcore::NEAR_BASE; use rand::prelude::StdRng; use rand::{Rng, SeedableRng}; @@ -1811,32 +1805,11 @@ fn test_process_block_after_state_sync() { let mut chain_genesis = ChainGenesis::test(); chain_genesis.epoch_length = epoch_length; - let num_clients = 1; - let env_objects = (0..num_clients).map(|_|{ - let tmp_dir = tempfile::tempdir().unwrap(); - // Use default StoreConfig rather than NodeStorage::test_opener so we’re using the - // same configuration as in production. - let store= NodeStorage::opener(&tmp_dir.path(), false, &Default::default(), None) - .open() - .unwrap() - .get_hot_store(); - initialize_genesis_state(store.clone(), &genesis, Some(tmp_dir.path())); - let epoch_manager = EpochManager::new_arc_handle(store.clone(), &genesis.config); - let runtime = - NightshadeRuntime::test(tmp_dir.path(), store.clone(), &genesis.config, epoch_manager.clone()) - as Arc; - (tmp_dir, store, epoch_manager, runtime) - }).collect::, Arc)>>(); - - let stores = env_objects.iter().map(|x| x.1.clone()).collect::>(); - let epoch_managers = env_objects.iter().map(|x| x.2.clone()).collect::>(); - let runtimes = env_objects.iter().map(|x| x.3.clone()).collect::>(); - let mut env = TestEnv::builder(chain_genesis) - .clients_count(env_objects.len()) - .stores(stores) - .epoch_managers(epoch_managers) - .runtimes(runtimes) + .clients_count(1) + .real_stores() + .real_epoch_managers(&genesis.config) + .nightshade_runtimes(&genesis) .use_state_snapshots() .build(); @@ -2182,35 +2155,15 @@ fn test_invalid_block_root() { fn test_incorrect_validator_key_produce_block() { let genesis = Genesis::test(vec!["test0".parse().unwrap(), "test1".parse().unwrap()], 2); let chain_genesis = ChainGenesis::new(&genesis); - let store = create_test_store(); - let home_dir = Path::new("../../../.."); - initialize_genesis_state(store.clone(), &genesis, Some(home_dir)); - let epoch_manager = EpochManager::new_arc_handle(store.clone(), &genesis.config); - let shard_tracker = ShardTracker::new(TrackedConfig::new_empty(), epoch_manager.clone()); - let runtime = - nearcore::NightshadeRuntime::test(home_dir, store, &genesis.config, epoch_manager.clone()); - let signer = Arc::new(InMemoryValidatorSigner::from_seed( - "test0".parse().unwrap(), - KeyType::ED25519, - "seed", - )); - let mut config = ClientConfig::test(true, 10, 20, 2, false, true, true, true); - config.epoch_length = chain_genesis.epoch_length; - let mut client = Client::new( - config, - chain_genesis, - epoch_manager, - shard_tracker, - runtime, - Arc::new(MockPeerManagerAdapter::default()).into(), - Sender::noop(), - Some(signer), - false, - TEST_SEED, - None, - ) - .unwrap(); - let res = client.produce_block(1); + + let mut env = TestEnv::builder(chain_genesis) + .clients_count(1) + .real_epoch_managers(&genesis.config) + .nightshade_runtimes(&genesis) + .track_all_shards() + .build(); + + let res = env.clients[0].produce_block(1); assert_matches!(res, Ok(None)); } @@ -2569,31 +2522,11 @@ fn test_catchup_gas_price_change() { genesis.config.gas_limit = 1000000000000; let chain_genesis = ChainGenesis::new(&genesis); - let env_objects = (0..2).map(|_|{ - let tmp_dir = tempfile::tempdir().unwrap(); - // Use default StoreConfig rather than NodeStorage::test_opener so we’re using the - // same configuration as in production. - let store= NodeStorage::opener(&tmp_dir.path(), false, &Default::default(), None) - .open() - .unwrap() - .get_hot_store(); - initialize_genesis_state(store.clone(), &genesis, Some(tmp_dir.path())); - let epoch_manager = EpochManager::new_arc_handle(store.clone(), &genesis.config); - let runtime = - NightshadeRuntime::test(tmp_dir.path(), store.clone(), &genesis.config, epoch_manager.clone()) - as Arc; - (tmp_dir, store, epoch_manager, runtime) - }).collect::, Arc)>>(); - - let stores = env_objects.iter().map(|x| x.1.clone()).collect::>(); - let epoch_managers = env_objects.iter().map(|x| x.2.clone()).collect::>(); - let runtimes = env_objects.iter().map(|x| x.3.clone()).collect::>(); - let mut env = TestEnv::builder(chain_genesis) - .clients_count(env_objects.len()) - .stores(stores) - .epoch_managers(epoch_managers) - .runtimes(runtimes) + .clients_count(2) + .real_stores() + .real_epoch_managers(&genesis.config) + .nightshade_runtimes(&genesis) .use_state_snapshots() .build(); @@ -3611,7 +3544,7 @@ mod contract_precompilation_tests { use super::*; use near_primitives::test_utils::MockEpochInfoProvider; use near_primitives::views::ViewApplyState; - use near_store::{Store, StoreCompiledContractCache, TrieUpdate}; + use near_store::{StoreCompiledContractCache, TrieUpdate}; use near_vm_runner::logic::CompiledContractCache; use near_vm_runner::{get_contract_cache_key, ContractCode}; use node_runtime::state_viewer::TrieViewer; @@ -3648,31 +3581,11 @@ mod contract_precompilation_tests { Genesis::test(vec!["test0".parse().unwrap(), "test1".parse().unwrap()], 1); genesis.config.epoch_length = EPOCH_LENGTH; - let env_objects = (0..num_clients).map(|_|{ - let tmp_dir = tempfile::tempdir().unwrap(); - // Use default StoreConfig rather than NodeStorage::test_opener so we’re using the - // same configuration as in production. - let store= NodeStorage::opener(&tmp_dir.path(), false, &Default::default(), None) - .open() - .unwrap() - .get_hot_store(); - initialize_genesis_state(store.clone(), &genesis, Some(tmp_dir.path())); - let epoch_manager = EpochManager::new_arc_handle(store.clone(), &genesis.config); - let runtime = - NightshadeRuntime::test(tmp_dir.path(), store.clone(), &genesis.config, epoch_manager.clone()) - as Arc; - (tmp_dir, store, epoch_manager, runtime) - }).collect::, Arc)>>(); - - let stores = env_objects.iter().map(|x| x.1.clone()).collect::>(); - let epoch_managers = env_objects.iter().map(|x| x.2.clone()).collect::>(); - let runtimes = env_objects.iter().map(|x| x.3.clone()).collect::>(); - let mut env = TestEnv::builder(ChainGenesis::test()) - .clients_count(env_objects.len()) - .stores(stores.clone()) - .epoch_managers(epoch_managers) - .runtimes(runtimes) + .clients_count(num_clients) + .real_stores() + .real_epoch_managers(&genesis.config) + .nightshade_runtimes(&genesis) .use_state_snapshots() .build(); @@ -3692,8 +3605,11 @@ mod contract_precompilation_tests { state_sync_on_height(&mut env, height - 1); // Check existence of contract in both caches. - let mut caches: Vec = - stores.iter().map(StoreCompiledContractCache::new).collect(); + let mut caches: Vec = env + .clients + .iter() + .map(|client| StoreCompiledContractCache::new(client.chain.store().store())) + .collect(); let contract_code = ContractCode::new(wasm_code.clone(), None); let epoch_id = env.clients[0] .chain @@ -3761,31 +3677,11 @@ mod contract_precompilation_tests { Genesis::test(vec!["test0".parse().unwrap(), "test1".parse().unwrap()], 1); genesis.config.epoch_length = EPOCH_LENGTH; - let env_objects = (0..num_clients).map(|_|{ - let tmp_dir = tempfile::tempdir().unwrap(); - // Use default StoreConfig rather than NodeStorage::test_opener so we’re using the - // same configuration as in production. - let store= NodeStorage::opener(&tmp_dir.path(), false, &Default::default(), None) - .open() - .unwrap() - .get_hot_store(); - initialize_genesis_state(store.clone(), &genesis, Some(tmp_dir.path())); - let epoch_manager = EpochManager::new_arc_handle(store.clone(), &genesis.config); - let runtime = - NightshadeRuntime::test(tmp_dir.path(), store.clone(), &genesis.config, epoch_manager.clone()) - as Arc; - (tmp_dir, store, epoch_manager, runtime) - }).collect::, Arc)>>(); - - let stores = env_objects.iter().map(|x| x.1.clone()).collect::>(); - let epoch_managers = env_objects.iter().map(|x| x.2.clone()).collect::>(); - let runtimes = env_objects.iter().map(|x| x.3.clone()).collect::>(); - let mut env = TestEnv::builder(ChainGenesis::test()) - .clients_count(env_objects.len()) - .stores(stores.clone()) - .epoch_managers(epoch_managers) - .runtimes(runtimes) + .clients_count(num_clients) + .real_stores() + .real_epoch_managers(&genesis.config) + .nightshade_runtimes(&genesis) .use_state_snapshots() .build(); @@ -3817,8 +3713,11 @@ mod contract_precompilation_tests { // Perform state sync for the second client on the last produced height. state_sync_on_height(&mut env, height - 1); - let caches: Vec = - stores.iter().map(StoreCompiledContractCache::new).collect(); + let caches: Vec = env + .clients + .iter() + .map(|client| StoreCompiledContractCache::new(client.chain.store().store())) + .collect(); let epoch_id = env.clients[0] .chain .get_block_by_height(height - 1) @@ -3856,31 +3755,11 @@ mod contract_precompilation_tests { ); genesis.config.epoch_length = EPOCH_LENGTH; - let env_objects = (0..num_clients).map(|_|{ - let tmp_dir = tempfile::tempdir().unwrap(); - // Use default StoreConfig rather than NodeStorage::test_opener so we’re using the - // same configuration as in production. - let store= NodeStorage::opener(&tmp_dir.path(), false, &Default::default(), None) - .open() - .unwrap() - .get_hot_store(); - initialize_genesis_state(store.clone(), &genesis, Some(tmp_dir.path())); - let epoch_manager = EpochManager::new_arc_handle(store.clone(), &genesis.config); - let runtime = - NightshadeRuntime::test(tmp_dir.path(), store.clone(), &genesis.config, epoch_manager.clone()) - as Arc; - (tmp_dir, store, epoch_manager, runtime) - }).collect::, Arc)>>(); - - let stores = env_objects.iter().map(|x| x.1.clone()).collect::>(); - let epoch_managers = env_objects.iter().map(|x| x.2.clone()).collect::>(); - let runtimes = env_objects.iter().map(|x| x.3.clone()).collect::>(); - let mut env = TestEnv::builder(ChainGenesis::test()) - .clients_count(env_objects.len()) - .stores(stores.clone()) - .epoch_managers(epoch_managers) - .runtimes(runtimes) + .clients_count(num_clients) + .real_stores() + .real_epoch_managers(&genesis.config) + .nightshade_runtimes(&genesis) .use_state_snapshots() .build(); @@ -3916,8 +3795,11 @@ mod contract_precompilation_tests { // Perform state sync for the second client. state_sync_on_height(&mut env, height - 1); - let caches: Vec = - stores.iter().map(StoreCompiledContractCache::new).collect(); + let caches: Vec = env + .clients + .iter() + .map(|client| StoreCompiledContractCache::new(client.chain.store().store())) + .collect(); let epoch_id = env.clients[0] .chain diff --git a/integration-tests/src/tests/client/state_dump.rs b/integration-tests/src/tests/client/state_dump.rs index 1e558156173..53e1a3e55b7 100644 --- a/integration-tests/src/tests/client/state_dump.rs +++ b/integration-tests/src/tests/client/state_dump.rs @@ -1,15 +1,14 @@ +use super::utils::TestEnvNightshadeSetupExt; use assert_matches::assert_matches; use borsh::BorshSerialize; use near_chain::near_chain_primitives::error::QueryError; -use near_chain::types::RuntimeAdapter; -use near_chain::{ChainGenesis, Provenance}; +use near_chain::{ChainGenesis, ChainStoreAccess, Provenance}; use near_chain_configs::ExternalStorageLocation::Filesystem; use near_chain_configs::{DumpConfig, Genesis}; use near_client::sync::external::external_storage_location; use near_client::test_utils::TestEnv; use near_client::ProcessTxResponse; use near_crypto::{InMemorySigner, KeyType, Signer}; -use near_epoch_manager::{EpochManager, EpochManagerAdapter, EpochManagerHandle}; use near_network::test_utils::wait_or_timeout; use near_o11y::testonly::init_test_logger; use near_primitives::block::Tip; @@ -22,12 +21,11 @@ use near_primitives::transaction::SignedTransaction; use near_primitives::types::BlockHeight; use near_primitives::views::{QueryRequest, QueryResponseKind}; use near_store::flat::store_helper; -use near_store::genesis::initialize_genesis_state; use near_store::DBCol; -use near_store::{NodeStorage, Store}; +use near_store::Store; use nearcore::config::GenesisExt; use nearcore::state_sync::spawn_state_sync_dump; -use nearcore::{NightshadeRuntime, NEAR_BASE}; +use nearcore::NEAR_BASE; use std::ops::ControlFlow; use std::sync::Arc; use std::time::Duration; @@ -42,38 +40,18 @@ fn test_state_dump() { genesis.config.epoch_length = 25; near_actix_test_utils::run_actix(async { - let num_clients = 1; - let env_objects = (0..num_clients).map(|_|{ - let tmp_dir = tempfile::tempdir().unwrap(); - // Use default StoreConfig rather than NodeStorage::test_opener so we’re using the - // same configuration as in production. - let store = NodeStorage::opener(&tmp_dir.path(), false, &Default::default(), None) - .open() - .unwrap() - .get_hot_store(); - initialize_genesis_state(store.clone(), &genesis, Some(tmp_dir.path())); - let epoch_manager = EpochManager::new_arc_handle(store.clone(), &genesis.config); - let runtime = - NightshadeRuntime::test(tmp_dir.path(), store.clone(), &genesis.config, epoch_manager.clone()) - as Arc; - (tmp_dir, store, epoch_manager, runtime) - }).collect::, Arc)>>(); - - let stores = env_objects.iter().map(|x| x.1.clone()).collect::>(); - let epoch_managers = env_objects.iter().map(|x| x.2.clone()).collect::>(); - let runtimes = env_objects.iter().map(|x| x.3.clone()).collect::>(); - - let mut env = TestEnv::builder(ChainGenesis::test()) - .clients_count(env_objects.len()) - .stores(stores.clone()) - .epoch_managers(epoch_managers.clone()) - .runtimes(runtimes.clone()) + let chain_genesis = ChainGenesis::new(&genesis); + let mut env = TestEnv::builder(chain_genesis.clone()) + .clients_count(1) + .real_stores() + .real_epoch_managers(&genesis.config) + .nightshade_runtimes(&genesis) .use_state_snapshots() .build(); - let chain_genesis = ChainGenesis::new(&genesis); - let chain = &env.clients[0].chain; + let epoch_manager = env.clients[0].epoch_manager.clone(); + let runtime = env.clients[0].runtime_adapter.clone(); let shard_tracker = chain.shard_tracker.clone(); let mut config = env.clients[0].config.clone(); let root_dir = tempfile::Builder::new().prefix("state_dump").tempdir().unwrap(); @@ -87,9 +65,9 @@ fn test_state_dump() { let _state_sync_dump_handle = spawn_state_sync_dump( &config, chain_genesis, - epoch_managers[0].clone(), - shard_tracker.clone(), - runtimes[0].clone(), + epoch_manager.clone(), + shard_tracker, + runtime, Some("test0".parse().unwrap()), ) .unwrap(); @@ -101,13 +79,13 @@ fn test_state_dump() { } let head = &env.clients[0].chain.head().unwrap(); let epoch_id = head.clone().epoch_id; - let epoch_info = epoch_managers[0].get_epoch_info(&epoch_id).unwrap(); + let epoch_info = epoch_manager.get_epoch_info(&epoch_id).unwrap(); let epoch_height = epoch_info.epoch_height(); wait_or_timeout(100, 10000, || async { let mut all_parts_present = true; - let num_shards = epoch_managers[0].num_shards(&epoch_id).unwrap(); + let num_shards = epoch_manager.num_shards(&epoch_id).unwrap(); assert_ne!(num_shards, 0); for shard_id in 0..num_shards { @@ -163,31 +141,11 @@ fn run_state_sync_with_dumped_parts( genesis.config.epoch_length = epoch_length; let chain_genesis = ChainGenesis::new(&genesis); let num_clients = 2; - let env_objects = (0..num_clients).map(|_|{ - let tmp_dir = tempfile::tempdir().unwrap(); - // Use default StoreConfig rather than NodeStorage::test_opener so we’re using the - // same configuration as in production. - let store = NodeStorage::opener(&tmp_dir.path(), false, &Default::default(), None) - .open() - .unwrap() - .get_hot_store(); - initialize_genesis_state(store.clone(), &genesis, Some(tmp_dir.path())); - let epoch_manager = EpochManager::new_arc_handle(store.clone(), &genesis.config); - let runtime = - NightshadeRuntime::test(tmp_dir.path(), store.clone(), &genesis.config, epoch_manager.clone()) - as Arc; - (tmp_dir, store, epoch_manager, runtime) - }).collect::, Arc)>>(); - - let stores = env_objects.iter().map(|x| x.1.clone()).collect::>(); - let epoch_managers = env_objects.iter().map(|x| x.2.clone()).collect::>(); - let runtimes = env_objects.iter().map(|x| x.3.clone()).collect::>(); - - let mut env = TestEnv::builder(ChainGenesis::test()) - .clients_count(env_objects.len()) - .stores(stores.clone()) - .epoch_managers(epoch_managers.clone()) - .runtimes(runtimes.clone()) + let mut env = TestEnv::builder(chain_genesis.clone()) + .clients_count(num_clients) + .real_stores() + .real_epoch_managers(&genesis.config) + .nightshade_runtimes(&genesis) .use_state_snapshots() .build(); @@ -197,6 +155,8 @@ fn run_state_sync_with_dumped_parts( let mut blocks = vec![]; let chain = &env.clients[0].chain; + let epoch_manager = env.clients[0].epoch_manager.clone(); + let runtime = env.clients[0].runtime_adapter.clone(); let shard_tracker = chain.shard_tracker.clone(); let mut config = env.clients[0].config.clone(); let root_dir = tempfile::Builder::new().prefix("state_dump").tempdir().unwrap(); @@ -209,9 +169,9 @@ fn run_state_sync_with_dumped_parts( let _state_sync_dump_handle = spawn_state_sync_dump( &config, chain_genesis, - epoch_managers[0].clone(), - shard_tracker.clone(), - runtimes[0].clone(), + epoch_manager.clone(), + shard_tracker, + runtime, Some("test0".parse().unwrap()), ) .unwrap(); @@ -279,7 +239,7 @@ fn run_state_sync_with_dumped_parts( } let epoch_id = final_block_header.epoch_id().clone(); - let epoch_info = epoch_managers[0].get_epoch_info(&epoch_id).unwrap(); + let epoch_info = epoch_manager.get_epoch_info(&epoch_id).unwrap(); let epoch_height = epoch_info.epoch_height(); let sync_block_height = (epoch_length * epoch_height + 1) as usize; @@ -301,7 +261,7 @@ fn run_state_sync_with_dumped_parts( wait_or_timeout(100, 10000, || async { let mut all_parts_present = true; - let num_shards = epoch_managers[0].num_shards(&epoch_id).unwrap(); + let num_shards = epoch_manager.num_shards(&epoch_id).unwrap(); assert_ne!(num_shards, 0); for shard_id in 0..num_shards { @@ -377,8 +337,10 @@ fn run_state_sync_with_dumped_parts( // Check that inlined flat state values remain inlined. { - let (num_inlined_before, num_ref_before) = count_flat_state_value_kinds(&stores[0]); - let (num_inlined_after, num_ref_after) = count_flat_state_value_kinds(&stores[1]); + let store0 = env.clients[0].chain.store().store(); + let store1 = env.clients[1].chain.store().store(); + let (num_inlined_before, num_ref_before) = count_flat_state_value_kinds(store0); + let (num_inlined_after, num_ref_after) = count_flat_state_value_kinds(store1); // Nothing new created, number of flat state values should be identical. assert_eq!(num_inlined_before, num_inlined_after); assert_eq!(num_ref_before, num_ref_after); @@ -394,9 +356,10 @@ fn run_state_sync_with_dumped_parts( // Check that inlined flat state values remain inlined. { - let (num_inlined_before, _num_ref_before) = - count_flat_state_value_kinds(&stores[0]); - let (num_inlined_after, _num_ref_after) = count_flat_state_value_kinds(&stores[1]); + let store0 = env.clients[0].chain.store().store(); + let store1 = env.clients[1].chain.store().store(); + let (num_inlined_before, _num_ref_before) = count_flat_state_value_kinds(store0); + let (num_inlined_after, _num_ref_after) = count_flat_state_value_kinds(store1); // Created a new entry, but inlined values should stay inlinedNothing new created, number of flat state values should be identical. assert!(num_inlined_before >= num_inlined_after); assert!(num_inlined_after > 0); diff --git a/integration-tests/src/tests/nearcore/state_snapshot.rs b/integration-tests/src/tests/client/state_snapshot.rs similarity index 86% rename from integration-tests/src/tests/nearcore/state_snapshot.rs rename to integration-tests/src/tests/client/state_snapshot.rs index 4c28524a25b..8ab3b1b5665 100644 --- a/integration-tests/src/tests/nearcore/state_snapshot.rs +++ b/integration-tests/src/tests/client/state_snapshot.rs @@ -1,26 +1,24 @@ -use near_chain::types::RuntimeAdapter; -use near_chain::{ChainGenesis, Provenance}; +use near_chain::{ChainGenesis, ChainStoreAccess, Provenance}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; use near_client::ProcessTxResponse; use near_crypto::{InMemorySigner, KeyType, Signer}; -use near_epoch_manager::{EpochManager, EpochManagerHandle}; use near_o11y::testonly::init_test_logger; use near_primitives::block::Block; use near_primitives::hash::CryptoHash; use near_primitives::shard_layout::ShardUId; use near_primitives::transaction::SignedTransaction; use near_store::flat::FlatStorageManager; -use near_store::genesis::initialize_genesis_state; use near_store::{ config::TrieCacheConfig, test_utils::create_test_store, Mode, ShardTries, StateSnapshotConfig, StoreConfig, TrieConfig, }; use near_store::{NodeStorage, Store}; use nearcore::config::GenesisExt; -use nearcore::{NightshadeRuntime, NEAR_BASE}; +use nearcore::NEAR_BASE; use std::path::PathBuf; -use std::sync::Arc; + +use crate::tests::client::utils::TestEnvNightshadeSetupExt; struct StateSnaptshotTestEnv { home_dir: PathBuf, @@ -192,32 +190,11 @@ fn delete_content_at_path(path: &str) -> std::io::Result<()> { fn test_make_state_snapshot() { init_test_logger(); let genesis = Genesis::test(vec!["test0".parse().unwrap()], 1); - let num_clients = 1; - let env_objects = (0..num_clients).map(|_|{ - let tmp_dir = tempfile::tempdir().unwrap(); - // Use default StoreConfig rather than NodeStorage::test_opener so we’re using the - // same configuration as in production. - let store = NodeStorage::opener(&tmp_dir.path(), false, &Default::default(), None) - .open() - .unwrap() - .get_hot_store(); - initialize_genesis_state(store.clone(), &genesis, Some(tmp_dir.path())); - let epoch_manager = EpochManager::new_arc_handle(store.clone(), &genesis.config); - let runtime = - NightshadeRuntime::test(tmp_dir.path(), store.clone(), &genesis.config, epoch_manager.clone()) - as Arc; - (tmp_dir, store, epoch_manager, runtime) - }).collect::, Arc)>>(); - - let stores = env_objects.iter().map(|x| x.1.clone()).collect::>(); - let epoch_managers = env_objects.iter().map(|x| x.2.clone()).collect::>(); - let runtimes = env_objects.iter().map(|x| x.3.clone()).collect::>(); - let mut env = TestEnv::builder(ChainGenesis::test()) - .clients_count(env_objects.len()) - .stores(stores.clone()) - .epoch_managers(epoch_managers) - .runtimes(runtimes.clone()) + .clients_count(1) + .real_stores() + .real_epoch_managers(&genesis.config) + .nightshade_runtimes(&genesis) .use_state_snapshots() .build(); @@ -227,7 +204,8 @@ fn test_make_state_snapshot() { let mut blocks = vec![]; - let state_snapshot_test_env = set_up_test_env_for_state_snapshots(&stores[0]); + let store = env.clients[0].chain.store().store(); + let state_snapshot_test_env = set_up_test_env_for_state_snapshots(store); for i in 1..=5 { let new_account_id = format!("test_account_{i}"); diff --git a/integration-tests/src/tests/nearcore/sync_state_nodes.rs b/integration-tests/src/tests/client/sync_state_nodes.rs similarity index 94% rename from integration-tests/src/tests/nearcore/sync_state_nodes.rs rename to integration-tests/src/tests/client/sync_state_nodes.rs index 9d3a2bfa942..a5f345f545b 100644 --- a/integration-tests/src/tests/nearcore/sync_state_nodes.rs +++ b/integration-tests/src/tests/client/sync_state_nodes.rs @@ -1,16 +1,15 @@ use crate::test_helpers::heavy_test; +use crate::tests::client::utils::TestEnvNightshadeSetupExt; use actix::{Actor, System}; use futures::{future, FutureExt}; use near_actix_test_utils::run_actix; use near_chain::chain::ApplyStatePartsRequest; -use near_chain::types::RuntimeAdapter; use near_chain::{ChainGenesis, Provenance}; use near_chain_configs::ExternalStorageLocation::Filesystem; use near_chain_configs::{DumpConfig, ExternalStorageConfig, Genesis, SyncConfig}; use near_client::test_utils::TestEnv; use near_client::{GetBlock, ProcessTxResponse}; use near_crypto::{InMemorySigner, KeyType}; -use near_epoch_manager::{EpochManager, EpochManagerHandle}; use near_network::tcp; use near_network::test_utils::{convert_boot_nodes, wait_or_timeout, WaitOrTimeoutActor}; use near_o11y::testonly::{init_integration_logger, init_test_logger}; @@ -21,9 +20,8 @@ use near_primitives::state_sync::{get_num_state_parts, StatePartKey}; use near_primitives::transaction::SignedTransaction; use near_primitives::utils::MaybeValidated; use near_primitives_core::types::ShardId; -use near_store::genesis::initialize_genesis_state; -use near_store::{DBCol, NodeStorage, Store}; -use nearcore::{config::GenesisExt, load_test_config, start_with_config, NightshadeRuntime}; +use near_store::DBCol; +use nearcore::{config::GenesisExt, load_test_config, start_with_config}; use std::ops::ControlFlow; use std::sync::{Arc, RwLock}; use std::time::Duration; @@ -560,47 +558,11 @@ fn test_dump_epoch_missing_chunk_in_last_block() { let mut genesis = Genesis::test(vec!["test0".parse().unwrap(), "test1".parse().unwrap()], 1); genesis.config.epoch_length = epoch_length; - let chain_genesis = ChainGenesis::new(&genesis); - - let num_clients = 2; - let env_objects = - (0..num_clients) - .map(|_| { - let tmp_dir = tempfile::tempdir().unwrap(); - // Use default StoreConfig rather than NodeStorage::test_opener so we’re using the - // same configuration as in production. - let store = - NodeStorage::opener(&tmp_dir.path(), false, &Default::default(), None) - .open() - .unwrap() - .get_hot_store(); - initialize_genesis_state(store.clone(), &genesis, Some(tmp_dir.path())); - let epoch_manager = - EpochManager::new_arc_handle(store.clone(), &genesis.config); - let runtime = NightshadeRuntime::test( - tmp_dir.path(), - store.clone(), - &genesis.config, - epoch_manager.clone(), - ) as Arc; - (tmp_dir, store, epoch_manager, runtime) - }) - .collect::, - Arc, - )>>(); - - let stores = env_objects.iter().map(|x| x.1.clone()).collect(); - let epoch_managers = env_objects.iter().map(|x| x.2.clone()).collect(); - let runtimes = env_objects.iter().map(|x| x.3.clone()).collect(); - - let mut env = TestEnv::builder(chain_genesis) - .clients_count(num_clients) - .stores(stores) - .epoch_managers(epoch_managers) - .runtimes(runtimes) + let mut env = TestEnv::builder(ChainGenesis::new(&genesis)) + .clients_count(2) + .real_stores() + .real_epoch_managers(&genesis.config) + .nightshade_runtimes(&genesis) .use_state_snapshots() .build(); diff --git a/integration-tests/src/tests/client/utils.rs b/integration-tests/src/tests/client/utils.rs index 85a7fe94b0a..f3693b49716 100644 --- a/integration-tests/src/tests/client/utils.rs +++ b/integration-tests/src/tests/client/utils.rs @@ -4,7 +4,7 @@ use near_client::test_utils::TestEnvBuilder; use near_primitives::runtime::config_store::RuntimeConfigStore; use near_store::genesis::initialize_genesis_state; use nearcore::NightshadeRuntime; -use std::{path::Path, sync::Arc}; +use std::sync::Arc; pub trait TestEnvNightshadeSetupExt { fn nightshade_runtimes(self, genesis: &Genesis) -> Self; @@ -17,16 +17,17 @@ pub trait TestEnvNightshadeSetupExt { impl TestEnvNightshadeSetupExt for TestEnvBuilder { fn nightshade_runtimes(self, genesis: &Genesis) -> Self { - let (builder, stores, epoch_managers) = + let (builder, home_dirs, stores, epoch_managers) = self.internal_ensure_epoch_managers_for_nightshade_runtime(); let runtimes = stores .into_iter() + .zip(home_dirs) .zip(epoch_managers) - .map(|(store, epoch_manager)| { + .map(|((store, home_dir), epoch_manager)| { // TODO: It's not ideal to initialize genesis state with the nightshade runtime here for tests // Tests that don't use nightshade runtime have genesis initialized in kv_runtime. // We should instead try to do this while configuring store. - let home_dir = Path::new("../../../.."); + let home_dir = home_dir.as_path(); initialize_genesis_state(store.clone(), genesis, Some(home_dir)); NightshadeRuntime::test(home_dir, store, &genesis.config, epoch_manager) as Arc @@ -40,18 +41,19 @@ impl TestEnvNightshadeSetupExt for TestEnvBuilder { genesis: &Genesis, runtime_configs: Vec, ) -> Self { - let (builder, stores, epoch_managers) = + let (builder, home_dirs, stores, epoch_managers) = self.internal_ensure_epoch_managers_for_nightshade_runtime(); assert_eq!(runtime_configs.len(), epoch_managers.len()); let runtimes = stores .into_iter() + .zip(home_dirs) .zip(epoch_managers) .zip(runtime_configs) - .map(|((store, epoch_manager), runtime_config)| { + .map(|(((store, home_dir), epoch_manager), runtime_config)| { // TODO: It's not ideal to initialize genesis state with the nightshade runtime here for tests // Tests that don't use nightshade runtime have genesis initialized in kv_runtime. // We should instead try to do this while configuring store. - let home_dir = Path::new("../../../.."); + let home_dir = home_dir.as_path(); initialize_genesis_state(store.clone(), genesis, Some(home_dir)); NightshadeRuntime::test_with_runtime_config_store( home_dir, diff --git a/integration-tests/src/tests/nearcore/mod.rs b/integration-tests/src/tests/nearcore/mod.rs index 80d5811916e..f0195acdc75 100644 --- a/integration-tests/src/tests/nearcore/mod.rs +++ b/integration-tests/src/tests/nearcore/mod.rs @@ -3,7 +3,5 @@ mod rpc_error_structs; mod rpc_nodes; mod run_nodes; mod stake_nodes; -mod state_snapshot; mod sync_nodes; -mod sync_state_nodes; mod track_shards; From c307a64e90f124654006e0375d17ac679ecb5242 Mon Sep 17 00:00:00 2001 From: Jakob Meier Date: Fri, 29 Sep 2023 12:53:04 +0200 Subject: [PATCH 4/6] fix: remove nightly changes from parameters.snap (#9612) The point of this file is to quickly see the current parameters. Showing the nightly changes is not in this spirit IMO. --- core/primitives/res/runtime_configs/parameters.snap | 2 +- core/primitives/src/runtime/config_store.rs | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/core/primitives/res/runtime_configs/parameters.snap b/core/primitives/res/runtime_configs/parameters.snap index 1a4ae83f1e9..cbc4d5225fb 100644 --- a/core/primitives/res/runtime_configs/parameters.snap +++ b/core/primitives/res/runtime_configs/parameters.snap @@ -166,7 +166,7 @@ account_id_validity_rules_version 1 disable_9393_fix false flat_storage_reads true implicit_account_creation true -fix_contract_loading_cost true +fix_contract_loading_cost false math_extension true ed25519_verify true alt_bn128 true diff --git a/core/primitives/src/runtime/config_store.rs b/core/primitives/src/runtime/config_store.rs index ef97741f182..80b39faf278 100644 --- a/core/primitives/src/runtime/config_store.rs +++ b/core/primitives/src/runtime/config_store.rs @@ -316,6 +316,7 @@ mod tests { #[cfg(not(feature = "calimero_zero_storage"))] fn test_json_unchanged() { use crate::views::RuntimeConfigView; + use near_primitives_core::version::PROTOCOL_VERSION; let store = RuntimeConfigStore::new(None); let mut any_failure = false; @@ -332,7 +333,9 @@ mod tests { // Store the latest values of parameters in a human-readable snapshot. { let mut params: ParameterTable = BASE_CONFIG.parse().unwrap(); - for (_, diff_bytes) in CONFIG_DIFFS { + for (_, diff_bytes) in + CONFIG_DIFFS.iter().filter(|(version, _)| *version <= PROTOCOL_VERSION) + { params.apply_diff(diff_bytes.parse().unwrap()).unwrap(); } insta::with_settings!({ From fc46b88ea431d96ad9fa5a6d884aff986360cbff Mon Sep 17 00:00:00 2001 From: Jakob Meier Date: Fri, 29 Sep 2023 16:43:48 +0200 Subject: [PATCH 5/6] chore: bump rusqlite version (#9615) `cargo-deny advisories` complains about an old libsqlite-sys version. We depend on it through rusqlite, which is only used in the `estimator-warehouse`, a CI /&debug script. So it's not super important but still worth updating. --- Cargo.lock | 17 ++++++++--------- Cargo.toml | 2 +- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ee5e9d588cb..5507234f024 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2473,11 +2473,11 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.7.0" +version = "0.8.2" source = "registry+/~https://github.com/rust-lang/crates.io-index" -checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" +checksum = "0761a1b9491c4f2e3d66aa0f62d0fba0af9a0e2852e4d48ea506632a4b56e6aa" dependencies = [ - "hashbrown 0.11.2", + "hashbrown 0.13.2", ] [[package]] @@ -3001,9 +3001,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.24.2" +version = "0.26.0" source = "registry+/~https://github.com/rust-lang/crates.io-index" -checksum = "898745e570c7d0453cc1fbc4a701eb6c662ed54e8fec8b7d14be137ebeeb9d14" +checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" dependencies = [ "cc", "pkg-config", @@ -5944,17 +5944,16 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.27.0" +version = "0.29.0" source = "registry+/~https://github.com/rust-lang/crates.io-index" -checksum = "85127183a999f7db96d1a976a309eebbfb6ea3b0b400ddd8340190129de6eb7a" +checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.3.1", "chrono", "fallible-iterator", "fallible-streaming-iterator", "hashlink", "libsqlite3-sys", - "memchr", "smallvec", ] diff --git a/Cargo.toml b/Cargo.toml index a39e262ca51..12ca60fd67f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -273,7 +273,7 @@ rkyv = "0.7.31" rlimit = "0.7" rocksdb = { version = "0.21.0", default-features = false, features = ["snappy", "lz4", "zstd", "zlib", "jemalloc"] } runtime-tester = { path = "test-utils/runtime-tester" } -rusqlite = { version = "0.27.0", features = ["bundled", "chrono", "functions"] } +rusqlite = { version = "0.29.0", features = ["bundled", "chrono", "functions"] } rustc-demangle = "0.1" rust-s3 = { version = "0.32.3", features = ["blocking"] } rustix = "0.37" From df173e23da7737e6eb978d2ae01c2d8743da408d Mon Sep 17 00:00:00 2001 From: Shreyan Gupta Date: Fri, 29 Sep 2023 12:32:54 -0700 Subject: [PATCH 6/6] [refactor] Break chain/client/src/test_utils.rs into module (#9611) Breaking a large test_utils.rs file into multiple parts. Additionally shifting location of `TestEnvNightshadeSetupExt` to nearcore so that it can be used in other modules, specifically `tools/state-viewer/src/state_dump.rs`. Note that we can't have `TestEnvNightshadeSetupExt` in `chain/client/src/test_utils` as it depends on NightshadeRuntime which is a part of `nearcore` module. `nearcore` module depends on `chain` and would create a cyclic dependency. --- chain/client/src/test_utils.rs | 2499 ----------------- chain/client/src/test_utils/block_stats.rs | 107 + chain/client/src/test_utils/client.rs | 271 ++ chain/client/src/test_utils/mod.rs | 13 + .../src/test_utils/peer_manager_mock.rs | 39 + chain/client/src/test_utils/setup.rs | 1099 ++++++++ chain/client/src/test_utils/test_env.rs | 516 ++++ .../client/src/test_utils/test_env_builder.rs | 531 ++++ .../src/tests/client/benchmarks.rs | 2 +- .../src/tests/client/challenges.rs | 3 +- .../src/tests/client/cold_storage.rs | 3 +- .../src/tests/client/epoch_sync.rs | 2 +- .../access_key_nonce_for_implicit_accounts.rs | 3 +- .../account_id_in_function_call_permission.rs | 3 +- .../client/features/adversarial_behaviors.rs | 3 +- .../client/features/chunk_nodes_cache.rs | 2 +- .../tests/client/features/delegate_action.rs | 2 +- .../features/fix_contract_loading_cost.rs | 2 +- .../client/features/fix_storage_usage.rs | 2 +- .../src/tests/client/features/flat_storage.rs | 2 +- .../features/increase_deployment_cost.rs | 3 +- .../features/increase_storage_compute_cost.rs | 3 +- .../limit_contract_functions_number.rs | 2 +- .../features/lower_storage_key_limit.rs | 2 +- .../src/tests/client/features/nearvm.rs | 2 +- ...restore_receipts_after_fix_apply_chunks.rs | 2 +- .../client/features/zero_balance_account.rs | 4 +- .../src/tests/client/flat_storage.rs | 3 +- integration-tests/src/tests/client/mod.rs | 1 - .../src/tests/client/process_blocks.rs | 3 +- .../src/tests/client/runtimes.rs | 3 +- integration-tests/src/tests/client/sandbox.rs | 2 +- .../src/tests/client/sharding_upgrade.rs | 14 +- .../src/tests/client/state_dump.rs | 2 +- .../src/tests/client/state_snapshot.rs | 3 +- .../src/tests/client/sync_state_nodes.rs | 2 +- .../src/tests/client/undo_block.rs | 3 +- nearcore/src/lib.rs | 1 + .../utils.rs => nearcore/src/test_utils.rs | 3 +- 39 files changed, 2612 insertions(+), 2550 deletions(-) delete mode 100644 chain/client/src/test_utils.rs create mode 100644 chain/client/src/test_utils/block_stats.rs create mode 100644 chain/client/src/test_utils/client.rs create mode 100644 chain/client/src/test_utils/mod.rs create mode 100644 chain/client/src/test_utils/peer_manager_mock.rs create mode 100644 chain/client/src/test_utils/setup.rs create mode 100644 chain/client/src/test_utils/test_env.rs create mode 100644 chain/client/src/test_utils/test_env_builder.rs rename integration-tests/src/tests/client/utils.rs => nearcore/src/test_utils.rs (98%) diff --git a/chain/client/src/test_utils.rs b/chain/client/src/test_utils.rs deleted file mode 100644 index 387292bca03..00000000000 --- a/chain/client/src/test_utils.rs +++ /dev/null @@ -1,2499 +0,0 @@ -// FIXME(nagisa): Is there a good reason we're triggering this? Luckily though this is just test -// code so we're in the clear. -#![allow(clippy::arc_with_non_send_sync)] - -use itertools::Itertools; -use std::cmp::max; -use std::collections::{HashMap, HashSet}; -use std::mem::swap; -use std::ops::DerefMut; -use std::path::PathBuf; -use std::sync::{Arc, Mutex, RwLock}; -use std::time::{Duration, Instant}; - -use actix::{Actor, Addr, AsyncContext, Context}; -use actix_rt::{Arbiter, System}; -use chrono::DateTime; -use futures::{future, FutureExt}; -use near_async::actix::AddrWithAutoSpanContextExt; -use near_async::messaging::{CanSend, IntoSender, LateBoundSender, Sender}; -use near_async::time; -use near_chain::resharding::StateSplitRequest; -use near_chunks::shards_manager_actor::start_shards_manager; -use near_chunks::ShardsManager; -use near_epoch_manager::shard_tracker::{ShardTracker, TrackedConfig}; -use near_epoch_manager::{EpochManager, EpochManagerAdapter, EpochManagerHandle}; -use near_network::shards_manager::ShardsManagerRequestFromNetwork; -use near_primitives::errors::InvalidTxError; -use near_primitives::test_utils::create_test_signer; -use num_rational::Ratio; -use once_cell::sync::OnceCell; -use rand::{thread_rng, Rng}; -use tracing::info; - -use crate::{start_view_client, Client, ClientActor, SyncStatus, ViewClientActor}; -use chrono::Utc; -use near_chain::chain::{do_apply_chunks, BlockCatchUpRequest}; -use near_chain::state_snapshot_actor::MakeSnapshotCallback; -use near_chain::test_utils::{ - wait_for_all_blocks_in_processing, wait_for_block_in_processing, KeyValueRuntime, - MockEpochManager, ValidatorSchedule, -}; -use near_chain::types::{ChainConfig, RuntimeAdapter}; -use near_chain::{Chain, ChainGenesis, ChainStoreAccess, DoomslugThresholdMode, Provenance}; -use near_chain_configs::{ClientConfig, GenesisConfig}; -use near_chunks::adapter::ShardsManagerRequestFromClient; -use near_chunks::client::ShardsManagerResponse; -use near_chunks::test_utils::{MockClientAdapterForShardsManager, SynchronousShardsManagerAdapter}; -use near_client_primitives::types::Error; -use near_crypto::{InMemorySigner, KeyType, PublicKey, Signer}; -use near_network::test_utils::MockPeerManagerAdapter; -use near_network::types::{ - AccountOrPeerIdOrHash, HighestHeightPeerInfo, PartialEncodedChunkRequestMsg, - PartialEncodedChunkResponseMsg, PeerInfo, PeerType, -}; -use near_network::types::{BlockInfo, PeerChainInfo}; -use near_network::types::{ - ConnectedPeerInfo, FullPeerInfo, NetworkRequests, NetworkResponses, PeerManagerAdapter, -}; -use near_network::types::{ - NetworkInfo, PeerManagerMessageRequest, PeerManagerMessageResponse, SetChainInfo, -}; -use near_o11y::testonly::TracingCapture; -use near_o11y::WithSpanContextExt; -use near_primitives::action::delegate::{DelegateAction, NonDelegateAction, SignedDelegateAction}; -use near_primitives::block::{ApprovalInner, Block, GenesisId}; -use near_primitives::epoch_manager::RngSeed; -use near_primitives::hash::{hash, CryptoHash}; -use near_primitives::merkle::{merklize, MerklePath, PartialMerkleTree}; -use near_primitives::network::PeerId; -use near_primitives::receipt::Receipt; -use near_primitives::runtime::config::RuntimeConfig; -use near_primitives::shard_layout::ShardUId; -use near_primitives::sharding::{EncodedShardChunk, PartialEncodedChunk, ReedSolomonWrapper}; -use near_primitives::static_clock::StaticClock; -use near_primitives::transaction::{Action, FunctionCallAction, SignedTransaction}; - -use near_primitives::types::{ - AccountId, Balance, BlockHeight, BlockHeightDelta, EpochId, NumBlocks, NumSeats, NumShards, - ShardId, -}; -use near_primitives::utils::MaybeValidated; -use near_primitives::validator_signer::ValidatorSigner; -use near_primitives::version::{ProtocolVersion, PROTOCOL_VERSION}; -use near_primitives::views::{ - AccountView, FinalExecutionOutcomeView, QueryRequest, QueryResponseKind, StateItem, -}; -use near_store::test_utils::create_test_store; -use near_store::{NodeStorage, Store}; -use near_telemetry::TelemetryActor; - -use crate::adapter::{ - AnnounceAccountRequest, BlockApproval, BlockHeadersRequest, BlockHeadersResponse, BlockRequest, - BlockResponse, ProcessTxResponse, SetNetworkInfo, StateRequestHeader, StateRequestPart, -}; - -pub struct PeerManagerMock { - handle: Box< - dyn FnMut( - PeerManagerMessageRequest, - &mut actix::Context, - ) -> PeerManagerMessageResponse, - >, -} - -impl PeerManagerMock { - fn new( - f: impl 'static - + FnMut( - PeerManagerMessageRequest, - &mut actix::Context, - ) -> PeerManagerMessageResponse, - ) -> Self { - Self { handle: Box::new(f) } - } -} - -impl actix::Actor for PeerManagerMock { - type Context = actix::Context; -} - -impl actix::Handler for PeerManagerMock { - type Result = PeerManagerMessageResponse; - fn handle(&mut self, msg: PeerManagerMessageRequest, ctx: &mut Self::Context) -> Self::Result { - (self.handle)(msg, ctx) - } -} - -impl actix::Handler for PeerManagerMock { - type Result = (); - fn handle(&mut self, _msg: SetChainInfo, _ctx: &mut Self::Context) {} -} - -/// min block production time in milliseconds -pub const MIN_BLOCK_PROD_TIME: Duration = Duration::from_millis(100); -/// max block production time in milliseconds -pub const MAX_BLOCK_PROD_TIME: Duration = Duration::from_millis(200); - -const TEST_SEED: RngSeed = [3; 32]; - -impl Client { - /// Unlike Client::start_process_block, which returns before the block finishes processing - /// This function waits until the block is processed. - /// `should_produce_chunk`: Normally, if a block is accepted, client will try to produce - /// chunks for the next block if it is the chunk producer. - /// If `should_produce_chunk` is set to false, client will skip the - /// chunk production. This is useful in tests that need to tweak - /// the produced chunk content. - fn process_block_sync_with_produce_chunk_options( - &mut self, - block: MaybeValidated, - provenance: Provenance, - should_produce_chunk: bool, - ) -> Result, near_chain::Error> { - self.start_process_block(block, provenance, Arc::new(|_| {}))?; - wait_for_all_blocks_in_processing(&mut self.chain); - let (accepted_blocks, errors) = - self.postprocess_ready_blocks(Arc::new(|_| {}), should_produce_chunk); - assert!(errors.is_empty(), "unexpected errors when processing blocks: {errors:#?}"); - Ok(accepted_blocks) - } - - pub fn process_block_test( - &mut self, - block: MaybeValidated, - provenance: Provenance, - ) -> Result, near_chain::Error> { - self.process_block_sync_with_produce_chunk_options(block, provenance, true) - } - - pub fn process_block_test_no_produce_chunk( - &mut self, - block: MaybeValidated, - provenance: Provenance, - ) -> Result, near_chain::Error> { - self.process_block_sync_with_produce_chunk_options(block, provenance, false) - } - - /// This function finishes processing all blocks that started being processed. - pub fn finish_blocks_in_processing(&mut self) -> Vec { - let mut accepted_blocks = vec![]; - while wait_for_all_blocks_in_processing(&mut self.chain) { - accepted_blocks.extend(self.postprocess_ready_blocks(Arc::new(|_| {}), true).0); - } - accepted_blocks - } - - /// This function finishes processing block with hash `hash`, if the processing of that block - /// has started. - pub fn finish_block_in_processing(&mut self, hash: &CryptoHash) -> Vec { - if let Ok(()) = wait_for_block_in_processing(&mut self.chain, hash) { - let (accepted_blocks, _) = self.postprocess_ready_blocks(Arc::new(|_| {}), true); - return accepted_blocks; - } - vec![] - } -} - -/// Sets up ClientActor and ViewClientActor viewing the same store/runtime. -pub fn setup( - vs: ValidatorSchedule, - epoch_length: BlockHeightDelta, - account_id: AccountId, - skip_sync_wait: bool, - min_block_prod_time: u64, - max_block_prod_time: u64, - enable_doomslug: bool, - archive: bool, - epoch_sync_enabled: bool, - state_sync_enabled: bool, - network_adapter: PeerManagerAdapter, - transaction_validity_period: NumBlocks, - genesis_time: DateTime, - ctx: &Context, -) -> (Block, ClientActor, Addr, ShardsManagerAdapterForTest) { - let store = create_test_store(); - let num_validator_seats = vs.all_block_producers().count() as NumSeats; - let epoch_manager = MockEpochManager::new_with_validators(store.clone(), vs, epoch_length); - let shard_tracker = ShardTracker::new_empty(epoch_manager.clone()); - let runtime = KeyValueRuntime::new_with_no_gc(store.clone(), epoch_manager.as_ref(), archive); - let chain_genesis = ChainGenesis { - time: genesis_time, - height: 0, - gas_limit: 1_000_000, - min_gas_price: 100, - max_gas_price: 1_000_000_000, - total_supply: 3_000_000_000_000_000_000_000_000_000_000_000, - gas_price_adjustment_rate: Ratio::from_integer(0), - transaction_validity_period, - epoch_length, - protocol_version: PROTOCOL_VERSION, - }; - let doomslug_threshold_mode = if enable_doomslug { - DoomslugThresholdMode::TwoThirds - } else { - DoomslugThresholdMode::NoApprovals - }; - let chain = Chain::new( - epoch_manager.clone(), - shard_tracker.clone(), - runtime.clone(), - &chain_genesis, - doomslug_threshold_mode, - ChainConfig { - save_trie_changes: true, - background_migration_threads: 1, - state_snapshot_every_n_blocks: None, - }, - None, - ) - .unwrap(); - let genesis_block = chain.get_block(&chain.genesis().hash().clone()).unwrap(); - - let signer = Arc::new(create_test_signer(account_id.as_str())); - let telemetry = TelemetryActor::default().start(); - let config = ClientConfig::test( - skip_sync_wait, - min_block_prod_time, - max_block_prod_time, - num_validator_seats, - archive, - true, - epoch_sync_enabled, - state_sync_enabled, - ); - - let adv = crate::adversarial::Controls::default(); - - let view_client_addr = start_view_client( - Some(signer.validator_id().clone()), - chain_genesis.clone(), - epoch_manager.clone(), - shard_tracker.clone(), - runtime.clone(), - network_adapter.clone(), - config.clone(), - adv.clone(), - ); - - let (shards_manager_addr, _) = start_shards_manager( - epoch_manager.clone(), - shard_tracker.clone(), - network_adapter.clone().into_sender(), - ctx.address().with_auto_span_context().into_sender(), - Some(account_id), - store, - config.chunk_request_retry_period, - ); - let shards_manager_adapter = Arc::new(shards_manager_addr); - - let client = Client::new( - config.clone(), - chain_genesis, - epoch_manager, - shard_tracker, - runtime, - network_adapter.clone(), - shards_manager_adapter.as_sender(), - Some(signer.clone()), - enable_doomslug, - TEST_SEED, - None, - ) - .unwrap(); - let client_actor = ClientActor::new( - client, - ctx.address(), - config, - PeerId::new(PublicKey::empty(KeyType::ED25519)), - network_adapter, - Some(signer), - telemetry, - ctx, - None, - adv, - None, - ) - .unwrap(); - (genesis_block, client_actor, view_client_addr, shards_manager_adapter.into()) -} - -pub fn setup_only_view( - vs: ValidatorSchedule, - epoch_length: BlockHeightDelta, - account_id: AccountId, - skip_sync_wait: bool, - min_block_prod_time: u64, - max_block_prod_time: u64, - enable_doomslug: bool, - archive: bool, - epoch_sync_enabled: bool, - state_sync_enabled: bool, - network_adapter: PeerManagerAdapter, - transaction_validity_period: NumBlocks, - genesis_time: DateTime, -) -> Addr { - let store = create_test_store(); - let num_validator_seats = vs.all_block_producers().count() as NumSeats; - let epoch_manager = MockEpochManager::new_with_validators(store.clone(), vs, epoch_length); - let shard_tracker = ShardTracker::new_empty(epoch_manager.clone()); - let runtime = KeyValueRuntime::new_with_no_gc(store, epoch_manager.as_ref(), archive); - let chain_genesis = ChainGenesis { - time: genesis_time, - height: 0, - gas_limit: 1_000_000, - min_gas_price: 100, - max_gas_price: 1_000_000_000, - total_supply: 3_000_000_000_000_000_000_000_000_000_000_000, - gas_price_adjustment_rate: Ratio::from_integer(0), - transaction_validity_period, - epoch_length, - protocol_version: PROTOCOL_VERSION, - }; - - let doomslug_threshold_mode = if enable_doomslug { - DoomslugThresholdMode::TwoThirds - } else { - DoomslugThresholdMode::NoApprovals - }; - Chain::new( - epoch_manager.clone(), - shard_tracker.clone(), - runtime.clone(), - &chain_genesis, - doomslug_threshold_mode, - ChainConfig { - save_trie_changes: true, - background_migration_threads: 1, - state_snapshot_every_n_blocks: None, - }, - None, - ) - .unwrap(); - - let signer = Arc::new(create_test_signer(account_id.as_str())); - TelemetryActor::default().start(); - let config = ClientConfig::test( - skip_sync_wait, - min_block_prod_time, - max_block_prod_time, - num_validator_seats, - archive, - true, - epoch_sync_enabled, - state_sync_enabled, - ); - - let adv = crate::adversarial::Controls::default(); - - start_view_client( - Some(signer.validator_id().clone()), - chain_genesis, - epoch_manager, - shard_tracker, - runtime, - network_adapter, - config, - adv, - ) -} - -/// Sets up ClientActor and ViewClientActor with mock PeerManager. -pub fn setup_mock( - validators: Vec, - account_id: AccountId, - skip_sync_wait: bool, - enable_doomslug: bool, - peer_manager_mock: Box< - dyn FnMut( - &PeerManagerMessageRequest, - &mut Context, - Addr, - ) -> PeerManagerMessageResponse, - >, -) -> ActorHandlesForTesting { - setup_mock_with_validity_period_and_no_epoch_sync( - validators, - account_id, - skip_sync_wait, - enable_doomslug, - peer_manager_mock, - 100, - ) -} - -pub fn setup_mock_with_validity_period_and_no_epoch_sync( - validators: Vec, - account_id: AccountId, - skip_sync_wait: bool, - enable_doomslug: bool, - mut peermanager_mock: Box< - dyn FnMut( - &PeerManagerMessageRequest, - &mut Context, - Addr, - ) -> PeerManagerMessageResponse, - >, - transaction_validity_period: NumBlocks, -) -> ActorHandlesForTesting { - let network_adapter = Arc::new(LateBoundSender::default()); - let mut vca: Option> = None; - let mut sma: Option = None; - let client_addr = ClientActor::create(|ctx: &mut Context| { - let vs = ValidatorSchedule::new().block_producers_per_epoch(vec![validators]); - let (_, client, view_client_addr, shards_manager_adapter) = setup( - vs, - 10, - account_id, - skip_sync_wait, - MIN_BLOCK_PROD_TIME.as_millis() as u64, - MAX_BLOCK_PROD_TIME.as_millis() as u64, - enable_doomslug, - false, - false, - true, - network_adapter.clone().into(), - transaction_validity_period, - StaticClock::utc(), - ctx, - ); - vca = Some(view_client_addr); - sma = Some(shards_manager_adapter); - client - }); - let client_addr1 = client_addr.clone(); - - let network_actor = - PeerManagerMock::new(move |msg, ctx| peermanager_mock(&msg, ctx, client_addr1.clone())) - .start(); - - network_adapter.bind(network_actor); - - ActorHandlesForTesting { - client_actor: client_addr, - view_client_actor: vca.unwrap(), - shards_manager_adapter: sma.unwrap(), - } -} - -pub struct BlockStats { - hash2depth: HashMap, - num_blocks: u64, - max_chain_length: u64, - last_check: Instant, - max_divergence: u64, - last_hash: Option, - parent: HashMap, -} - -impl BlockStats { - fn new() -> BlockStats { - BlockStats { - hash2depth: HashMap::new(), - num_blocks: 0, - max_chain_length: 0, - last_check: StaticClock::instant(), - max_divergence: 0, - last_hash: None, - parent: HashMap::new(), - } - } - - fn calculate_distance(&mut self, mut lhs: CryptoHash, mut rhs: CryptoHash) -> u64 { - let mut dlhs = *self.hash2depth.get(&lhs).unwrap(); - let mut drhs = *self.hash2depth.get(&rhs).unwrap(); - - let mut result: u64 = 0; - while dlhs > drhs { - lhs = *self.parent.get(&lhs).unwrap(); - dlhs -= 1; - result += 1; - } - while dlhs < drhs { - rhs = *self.parent.get(&rhs).unwrap(); - drhs -= 1; - result += 1; - } - while lhs != rhs { - lhs = *self.parent.get(&lhs).unwrap(); - rhs = *self.parent.get(&rhs).unwrap(); - result += 2; - } - result - } - - fn add_block(&mut self, block: &Block) { - if self.hash2depth.contains_key(block.hash()) { - return; - } - let prev_height = self.hash2depth.get(block.header().prev_hash()).map(|v| *v).unwrap_or(0); - self.hash2depth.insert(*block.hash(), prev_height + 1); - self.num_blocks += 1; - self.max_chain_length = max(self.max_chain_length, prev_height + 1); - self.parent.insert(*block.hash(), *block.header().prev_hash()); - - if let Some(last_hash2) = self.last_hash { - self.max_divergence = - max(self.max_divergence, self.calculate_distance(last_hash2, *block.hash())); - } - - self.last_hash = Some(*block.hash()); - } - - pub fn check_stats(&mut self, force: bool) { - let now = StaticClock::instant(); - let diff = now.duration_since(self.last_check); - if !force && diff.lt(&Duration::from_secs(60)) { - return; - } - self.last_check = now; - let cur_ratio = (self.num_blocks as f64) / (max(1, self.max_chain_length) as f64); - info!( - "Block stats: ratio: {:.2}, num_blocks: {} max_chain_length: {} max_divergence: {}", - cur_ratio, self.num_blocks, self.max_chain_length, self.max_divergence - ); - } - - pub fn check_block_ratio(&mut self, min_ratio: Option, max_ratio: Option) { - let cur_ratio = (self.num_blocks as f64) / (max(1, self.max_chain_length) as f64); - if let Some(min_ratio2) = min_ratio { - if cur_ratio < min_ratio2 { - panic!( - "ratio of blocks to longest chain is too low got: {:.2} expected: {:.2}", - cur_ratio, min_ratio2 - ); - } - } - if let Some(max_ratio2) = max_ratio { - if cur_ratio > max_ratio2 { - panic!( - "ratio of blocks to longest chain is too high got: {:.2} expected: {:.2}", - cur_ratio, max_ratio2 - ); - } - } - } -} - -#[derive(Clone)] -pub struct ActorHandlesForTesting { - pub client_actor: Addr, - pub view_client_actor: Addr, - pub shards_manager_adapter: ShardsManagerAdapterForTest, -} - -fn send_chunks( - connectors: &[ActorHandlesForTesting], - recipients: I, - target: T, - drop_chunks: bool, - send_to: F, -) where - T: Eq, - I: Iterator, - F: Fn(&ShardsManagerAdapterForTest), -{ - for (i, name) in recipients { - if name == target { - if !drop_chunks || !thread_rng().gen_ratio(1, 5) { - send_to(&connectors[i].shards_manager_adapter); - } - } - } -} - -/// Setup multiple clients talking to each other via a mock network. -/// -/// # Arguments -/// -/// `vs` - the set of validators and how they are assigned to shards in different epochs. -/// -/// `key_pairs` - keys for `validators` -/// -/// `skip_sync_wait` -/// -/// `block_prod_time` - Minimum block production time, assuming there is enough approvals. The -/// maximum block production time depends on the value of `tamper_with_fg`, and is -/// equal to `block_prod_time` if `tamper_with_fg` is `true`, otherwise it is -/// `block_prod_time * 2` -/// -/// `drop_chunks` - if set to true, 10% of all the chunk messages / requests will be dropped -/// -/// `tamper_with_fg` - if set to true, will split the heights into groups of 100. For some groups -/// all the approvals will be dropped (thus completely disabling the finality gadget -/// and introducing severe forkfulness if `block_prod_time` is sufficiently small), -/// for some groups will keep all the approvals (and test the fg invariants), and -/// for some will drop 50% of the approvals. -/// This was designed to tamper with the finality gadget when we -/// had it, unclear if has much effect today. Must be disabled if doomslug is -/// enabled (see below), because doomslug will stall if approvals are not delivered. -/// -/// `epoch_length` - approximate length of the epoch as measured -/// by the block heights difference of it's last and first block. -/// -/// `enable_doomslug` - If false, blocks will be created when at least one approval is present, without -/// waiting for 2/3. This allows for more forkfulness. `cross_shard_tx` has modes -/// both with enabled doomslug (to test "production" setting) and with disabled -/// doomslug (to test higher forkfullness) -/// -/// `network_mock` - the callback that is called for each message sent. The `mock` is called before -/// the default processing. `mock` returns `(response, perform_default)`. If -/// `perform_default` is false, then the message is not processed or broadcasted -/// further and `response` is returned to the requester immediately. Otherwise -/// the default action is performed, that might (and likely will) overwrite the -/// `response` before it is sent back to the requester. -pub fn setup_mock_all_validators( - vs: ValidatorSchedule, - key_pairs: Vec, - skip_sync_wait: bool, - block_prod_time: u64, - drop_chunks: bool, - tamper_with_fg: bool, - epoch_length: BlockHeightDelta, - enable_doomslug: bool, - archive: Vec, - epoch_sync_enabled: Vec, - check_block_stats: bool, - peer_manager_mock: Box< - dyn FnMut( - // Peer validators - &[ActorHandlesForTesting], - // Validator that sends the message - AccountId, - // The message itself - &PeerManagerMessageRequest, - ) -> (PeerManagerMessageResponse, /* perform default */ bool), - >, -) -> (Block, Vec, Arc>) { - let peer_manager_mock = Arc::new(RwLock::new(peer_manager_mock)); - let validators = vs.all_validators().cloned().collect::>(); - let key_pairs = key_pairs; - - let addresses: Vec<_> = (0..key_pairs.len()).map(|i| hash(vec![i as u8].as_ref())).collect(); - let genesis_time = StaticClock::utc(); - let mut ret = vec![]; - - let connectors: Arc>> = Default::default(); - - let announced_accounts = Arc::new(RwLock::new(HashSet::new())); - let genesis_block = Arc::new(RwLock::new(None)); - - let last_height = Arc::new(RwLock::new(vec![0; key_pairs.len()])); - let largest_endorsed_height = Arc::new(RwLock::new(vec![0u64; key_pairs.len()])); - let largest_skipped_height = Arc::new(RwLock::new(vec![0u64; key_pairs.len()])); - let hash_to_height = Arc::new(RwLock::new(HashMap::new())); - let block_stats = Arc::new(RwLock::new(BlockStats::new())); - - for (index, account_id) in validators.clone().into_iter().enumerate() { - let vs = vs.clone(); - let block_stats1 = block_stats.clone(); - let mut view_client_addr_slot = None; - let mut shards_manager_adapter_slot = None; - let validators_clone2 = validators.clone(); - let genesis_block1 = genesis_block.clone(); - let key_pairs = key_pairs.clone(); - let key_pairs1 = key_pairs.clone(); - let addresses = addresses.clone(); - let connectors1 = connectors.clone(); - let network_mock1 = peer_manager_mock.clone(); - let announced_accounts1 = announced_accounts.clone(); - let last_height1 = last_height.clone(); - let last_height2 = last_height.clone(); - let largest_endorsed_height1 = largest_endorsed_height.clone(); - let largest_skipped_height1 = largest_skipped_height.clone(); - let hash_to_height1 = hash_to_height.clone(); - let archive1 = archive.clone(); - let epoch_sync_enabled1 = epoch_sync_enabled.clone(); - let client_addr = ClientActor::create(|ctx| { - let client_addr = ctx.address(); - let _account_id = account_id.clone(); - let pm = PeerManagerMock::new(move |msg, _ctx| { - // Note: this `.wait` will block until all `ClientActors` are created. - let connectors1 = connectors1.wait(); - let mut guard = network_mock1.write().unwrap(); - let (resp, perform_default) = - guard.deref_mut()(connectors1.as_slice(), account_id.clone(), &msg); - drop(guard); - - if perform_default { - let my_ord = validators_clone2.iter().position(|it| it == &account_id).unwrap(); - let my_key_pair = key_pairs[my_ord].clone(); - let my_address = addresses[my_ord]; - - { - let last_height2 = last_height2.read().unwrap(); - let peers: Vec<_> = key_pairs1 - .iter() - .take(connectors1.len()) - .enumerate() - .map(|(i, peer_info)| ConnectedPeerInfo { - full_peer_info: FullPeerInfo { - peer_info: peer_info.clone(), - chain_info: PeerChainInfo { - genesis_id: GenesisId { - chain_id: "unittest".to_string(), - hash: Default::default(), - }, - // TODO: add the correct hash here - last_block: Some(BlockInfo { - height: last_height2[i], - hash: CryptoHash::default(), - }), - tracked_shards: vec![], - archival: true, - }, - }, - received_bytes_per_sec: 0, - sent_bytes_per_sec: 0, - last_time_peer_requested: near_async::time::Instant::now(), - last_time_received_message: near_async::time::Instant::now(), - connection_established_time: near_async::time::Instant::now(), - peer_type: PeerType::Outbound, - nonce: 3, - }) - .collect(); - let peers2 = peers - .iter() - .filter_map(|it| it.full_peer_info.clone().into()) - .collect(); - let info = NetworkInfo { - connected_peers: peers, - tier1_connections: vec![], - num_connected_peers: key_pairs1.len(), - peer_max_count: key_pairs1.len() as u32, - highest_height_peers: peers2, - sent_bytes_per_sec: 0, - received_bytes_per_sec: 0, - known_producers: vec![], - tier1_accounts_keys: vec![], - tier1_accounts_data: vec![], - }; - client_addr.do_send(SetNetworkInfo(info).with_span_context()); - } - - match msg.as_network_requests_ref() { - NetworkRequests::Block { block } => { - if check_block_stats { - let block_stats2 = &mut *block_stats1.write().unwrap(); - block_stats2.add_block(block); - block_stats2.check_stats(false); - } - - for actor_handles in connectors1 { - actor_handles.client_actor.do_send( - BlockResponse { - block: block.clone(), - peer_id: PeerInfo::random().id, - was_requested: false, - } - .with_span_context(), - ); - } - - let mut last_height1 = last_height1.write().unwrap(); - - let my_height = &mut last_height1[my_ord]; - - *my_height = max(*my_height, block.header().height()); - - hash_to_height1 - .write() - .unwrap() - .insert(*block.header().hash(), block.header().height()); - } - NetworkRequests::PartialEncodedChunkRequest { target, request, .. } => { - send_chunks( - connectors1, - validators_clone2.iter().map(|s| Some(s.clone())).enumerate(), - target.account_id.as_ref().map(|s| s.clone()), - drop_chunks, - |c| { - c.send(ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkRequest { partial_encoded_chunk_request: request.clone(), route_back: my_address }); - }, - ); - } - NetworkRequests::PartialEncodedChunkResponse { route_back, response } => { - send_chunks( - connectors1, - addresses.iter().enumerate(), - route_back, - drop_chunks, - |c| { - c.send(ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkResponse { partial_encoded_chunk_response: response.clone(), received_time: Instant::now() }); - }, - ); - } - NetworkRequests::PartialEncodedChunkMessage { - account_id, - partial_encoded_chunk, - } => { - send_chunks( - connectors1, - validators_clone2.iter().cloned().enumerate(), - account_id.clone(), - drop_chunks, - |c| { - c.send(ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunk(partial_encoded_chunk.clone().into())); - }, - ); - } - NetworkRequests::PartialEncodedChunkForward { account_id, forward } => { - send_chunks( - connectors1, - validators_clone2.iter().cloned().enumerate(), - account_id.clone(), - drop_chunks, - |c| { - c.send(ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkForward(forward.clone())); - } - ); - } - NetworkRequests::BlockRequest { hash, peer_id } => { - for (i, peer_info) in key_pairs.iter().enumerate() { - let peer_id = peer_id.clone(); - if peer_info.id == peer_id { - let me = connectors1[my_ord].client_actor.clone(); - actix::spawn( - connectors1[i] - .view_client_actor - .send(BlockRequest(*hash).with_span_context()) - .then(move |response| { - let response = response.unwrap(); - match response { - Some(block) => { - me.do_send( - BlockResponse { - block: *block, - peer_id, - was_requested: true, - } - .with_span_context(), - ); - } - None => {} - } - future::ready(()) - }), - ); - } - } - } - NetworkRequests::BlockHeadersRequest { hashes, peer_id } => { - for (i, peer_info) in key_pairs.iter().enumerate() { - let peer_id = peer_id.clone(); - if peer_info.id == peer_id { - let me = connectors1[my_ord].client_actor.clone(); - actix::spawn( - connectors1[i] - .view_client_actor - .send( - BlockHeadersRequest(hashes.clone()) - .with_span_context(), - ) - .then(move |response| { - let response = response.unwrap(); - match response { - Some(headers) => { - me.do_send( - BlockHeadersResponse(headers, peer_id) - .with_span_context(), - ); - } - None => {} - } - future::ready(()) - }), - ); - } - } - } - NetworkRequests::StateRequestHeader { - shard_id, - sync_hash, - target: target_account_id, - } => { - let target_account_id = match target_account_id { - AccountOrPeerIdOrHash::AccountId(x) => x, - _ => panic!(), - }; - for (i, name) in validators_clone2.iter().enumerate() { - if name == target_account_id { - let me = connectors1[my_ord].client_actor.clone(); - actix::spawn( - connectors1[i] - .view_client_actor - .send( - StateRequestHeader { - shard_id: *shard_id, - sync_hash: *sync_hash, - } - .with_span_context(), - ) - .then(move |response| { - let response = response.unwrap(); - match response { - Some(response) => { - me.do_send(response.with_span_context()); - } - None => {} - } - future::ready(()) - }), - ); - } - } - } - NetworkRequests::StateRequestPart { - shard_id, - sync_hash, - part_id, - target: target_account_id, - } => { - let target_account_id = match target_account_id { - AccountOrPeerIdOrHash::AccountId(x) => x, - _ => panic!(), - }; - for (i, name) in validators_clone2.iter().enumerate() { - if name == target_account_id { - let me = connectors1[my_ord].client_actor.clone(); - actix::spawn( - connectors1[i] - .view_client_actor - .send( - StateRequestPart { - shard_id: *shard_id, - sync_hash: *sync_hash, - part_id: *part_id, - } - .with_span_context(), - ) - .then(move |response| { - let response = response.unwrap(); - match response { - Some(response) => { - me.do_send(response.with_span_context()); - } - None => {} - } - future::ready(()) - }), - ); - } - } - } - NetworkRequests::AnnounceAccount(announce_account) => { - let mut aa = announced_accounts1.write().unwrap(); - let key = ( - announce_account.account_id.clone(), - announce_account.epoch_id.clone(), - ); - if aa.get(&key).is_none() { - aa.insert(key); - for actor_handles in connectors1 { - actor_handles.view_client_actor.do_send( - AnnounceAccountRequest(vec![( - announce_account.clone(), - None, - )]) - .with_span_context(), - ) - } - } - } - NetworkRequests::Approval { approval_message } => { - let height_mod = approval_message.approval.target_height % 300; - - let do_propagate = if tamper_with_fg { - if height_mod < 100 { - false - } else if height_mod < 200 { - let mut rng = rand::thread_rng(); - rng.gen() - } else { - true - } - } else { - true - }; - - let approval = approval_message.approval.clone(); - - if do_propagate { - for (i, name) in validators_clone2.iter().enumerate() { - if name == &approval_message.target { - connectors1[i].client_actor.do_send( - BlockApproval(approval.clone(), my_key_pair.id.clone()) - .with_span_context(), - ); - } - } - } - - // Verify doomslug invariant - match approval.inner { - ApprovalInner::Endorsement(parent_hash) => { - assert!( - approval.target_height - > largest_skipped_height1.read().unwrap()[my_ord] - ); - largest_endorsed_height1.write().unwrap()[my_ord] = - approval.target_height; - - if let Some(prev_height) = - hash_to_height1.read().unwrap().get(&parent_hash) - { - assert_eq!(prev_height + 1, approval.target_height); - } - } - ApprovalInner::Skip(prev_height) => { - largest_skipped_height1.write().unwrap()[my_ord] = - approval.target_height; - let e = largest_endorsed_height1.read().unwrap()[my_ord]; - // `e` is the *target* height of the last endorsement. `prev_height` - // is allowed to be anything >= to the source height, which is e-1. - assert!( - prev_height + 1 >= e, - "New: {}->{}, Old: {}->{}", - prev_height, - approval.target_height, - e - 1, - e - ); - } - }; - } - NetworkRequests::ForwardTx(_, _) - | NetworkRequests::BanPeer { .. } - | NetworkRequests::TxStatus(_, _, _) - | NetworkRequests::Challenge(_) => {} - }; - } - resp - }) - .start(); - let (block, client, view_client_addr, shards_manager_adapter) = setup( - vs, - epoch_length, - _account_id, - skip_sync_wait, - block_prod_time, - block_prod_time * 3, - enable_doomslug, - archive1[index], - epoch_sync_enabled1[index], - true, - Arc::new(pm).into(), - 10000, - genesis_time, - ctx, - ); - view_client_addr_slot = Some(view_client_addr); - shards_manager_adapter_slot = Some(shards_manager_adapter); - *genesis_block1.write().unwrap() = Some(block); - client - }); - ret.push(ActorHandlesForTesting { - client_actor: client_addr, - view_client_actor: view_client_addr_slot.unwrap(), - shards_manager_adapter: shards_manager_adapter_slot.unwrap(), - }); - } - hash_to_height.write().unwrap().insert(CryptoHash::default(), 0); - hash_to_height - .write() - .unwrap() - .insert(*genesis_block.read().unwrap().as_ref().unwrap().header().clone().hash(), 0); - connectors.set(ret.clone()).ok().unwrap(); - let value = genesis_block.read().unwrap(); - (value.clone().unwrap(), ret, block_stats) -} - -/// Sets up ClientActor and ViewClientActor without network. -pub fn setup_no_network( - validators: Vec, - account_id: AccountId, - skip_sync_wait: bool, - enable_doomslug: bool, -) -> ActorHandlesForTesting { - setup_no_network_with_validity_period_and_no_epoch_sync( - validators, - account_id, - skip_sync_wait, - 100, - enable_doomslug, - ) -} - -pub fn setup_no_network_with_validity_period_and_no_epoch_sync( - validators: Vec, - account_id: AccountId, - skip_sync_wait: bool, - transaction_validity_period: NumBlocks, - enable_doomslug: bool, -) -> ActorHandlesForTesting { - setup_mock_with_validity_period_and_no_epoch_sync( - validators, - account_id, - skip_sync_wait, - enable_doomslug, - Box::new(|_, _, _| { - PeerManagerMessageResponse::NetworkResponses(NetworkResponses::NoResponse) - }), - transaction_validity_period, - ) -} - -pub fn setup_client_with_runtime( - num_validator_seats: NumSeats, - account_id: Option, - enable_doomslug: bool, - network_adapter: PeerManagerAdapter, - shards_manager_adapter: ShardsManagerAdapterForTest, - chain_genesis: ChainGenesis, - epoch_manager: Arc, - shard_tracker: ShardTracker, - runtime: Arc, - rng_seed: RngSeed, - archive: bool, - save_trie_changes: bool, - make_state_snapshot_callback: Option, -) -> Client { - let validator_signer = - account_id.map(|x| Arc::new(create_test_signer(x.as_str())) as Arc); - let mut config = ClientConfig::test( - true, - 10, - 20, - num_validator_seats, - archive, - save_trie_changes, - true, - true, - ); - config.epoch_length = chain_genesis.epoch_length; - let mut client = Client::new( - config, - chain_genesis, - epoch_manager, - shard_tracker, - runtime, - network_adapter, - shards_manager_adapter.client, - validator_signer, - enable_doomslug, - rng_seed, - make_state_snapshot_callback, - ) - .unwrap(); - client.sync_status = SyncStatus::NoSync; - client -} - -pub fn setup_client( - store: Store, - vs: ValidatorSchedule, - account_id: Option, - enable_doomslug: bool, - network_adapter: PeerManagerAdapter, - shards_manager_adapter: ShardsManagerAdapterForTest, - chain_genesis: ChainGenesis, - rng_seed: RngSeed, - archive: bool, - save_trie_changes: bool, -) -> Client { - let num_validator_seats = vs.all_block_producers().count() as NumSeats; - let epoch_manager = - MockEpochManager::new_with_validators(store.clone(), vs, chain_genesis.epoch_length); - let shard_tracker = ShardTracker::new_empty(epoch_manager.clone()); - let runtime = KeyValueRuntime::new(store, epoch_manager.as_ref()); - setup_client_with_runtime( - num_validator_seats, - account_id, - enable_doomslug, - network_adapter, - shards_manager_adapter, - chain_genesis, - epoch_manager, - shard_tracker, - runtime, - rng_seed, - archive, - save_trie_changes, - None, - ) -} - -pub fn setup_synchronous_shards_manager( - account_id: Option, - client_adapter: Sender, - network_adapter: PeerManagerAdapter, - epoch_manager: Arc, - shard_tracker: ShardTracker, - runtime: Arc, - chain_genesis: &ChainGenesis, -) -> ShardsManagerAdapterForTest { - // Initialize the chain, to make sure that if the store is empty, we write the genesis - // into the store, and as a short cut to get the parameters needed to instantiate - // ShardsManager. This way we don't have to wait to construct the Client first. - // TODO(#8324): This should just be refactored so that we can construct Chain first - // before anything else. - let chain = Chain::new( - epoch_manager.clone(), - shard_tracker.clone(), - runtime, - chain_genesis, - DoomslugThresholdMode::TwoThirds, // irrelevant - ChainConfig { - save_trie_changes: true, - background_migration_threads: 1, - state_snapshot_every_n_blocks: None, - }, // irrelevant - None, - ) - .unwrap(); - let chain_head = chain.head().unwrap(); - let chain_header_head = chain.header_head().unwrap(); - let shards_manager = ShardsManager::new( - time::Clock::real(), - account_id, - epoch_manager, - shard_tracker, - network_adapter.request_sender, - client_adapter, - chain.store().new_read_only_chunks_store(), - chain_head, - chain_header_head, - ); - Arc::new(SynchronousShardsManagerAdapter::new(shards_manager)).into() -} - -pub fn setup_client_with_synchronous_shards_manager( - store: Store, - vs: ValidatorSchedule, - account_id: Option, - enable_doomslug: bool, - network_adapter: PeerManagerAdapter, - client_adapter: Sender, - chain_genesis: ChainGenesis, - rng_seed: RngSeed, - archive: bool, - save_trie_changes: bool, -) -> Client { - let num_validator_seats = vs.all_block_producers().count() as NumSeats; - let epoch_manager = - MockEpochManager::new_with_validators(store.clone(), vs, chain_genesis.epoch_length); - let shard_tracker = ShardTracker::new_empty(epoch_manager.clone()); - let runtime = KeyValueRuntime::new(store, epoch_manager.as_ref()); - let shards_manager_adapter = setup_synchronous_shards_manager( - account_id.clone(), - client_adapter, - network_adapter.clone(), - epoch_manager.clone(), - shard_tracker.clone(), - runtime.clone(), - &chain_genesis, - ); - setup_client_with_runtime( - num_validator_seats, - account_id, - enable_doomslug, - network_adapter, - shards_manager_adapter, - chain_genesis, - epoch_manager, - shard_tracker, - runtime, - rng_seed, - archive, - save_trie_changes, - None, - ) -} - -/// A combined trait bound for both the client side and network side of the ShardsManager API. -#[derive(Clone, derive_more::AsRef)] -pub struct ShardsManagerAdapterForTest { - pub client: Sender, - pub network: Sender, -} - -impl + CanSend> - From> for ShardsManagerAdapterForTest -{ - fn from(arc: Arc) -> Self { - Self { client: arc.as_sender(), network: arc.as_sender() } - } -} - -/// An environment for writing integration tests with multiple clients. -/// This environment can simulate near nodes without network and it can be configured to use different runtimes. -pub struct TestEnv { - pub chain_genesis: ChainGenesis, - pub validators: Vec, - pub network_adapters: Vec>, - pub client_adapters: Vec>, - pub shards_manager_adapters: Vec, - pub clients: Vec, - account_to_client_index: HashMap, - paused_blocks: Arc>>>>, - // random seed to be inject in each client according to AccountId - // if not set, a default constant TEST_SEED will be injected - seeds: HashMap, - archive: bool, - save_trie_changes: bool, -} - -#[derive(derive_more::From, Clone)] -enum EpochManagerKind { - Mock(Arc), - Handle(Arc), -} - -impl EpochManagerKind { - pub fn into_adapter(self) -> Arc { - match self { - Self::Mock(mock) => mock, - Self::Handle(handle) => handle, - } - } -} - -/// A builder for the TestEnv structure. -pub struct TestEnvBuilder { - chain_genesis: ChainGenesis, - clients: Vec, - validators: Vec, - home_dirs: Option>, - stores: Option>, - epoch_managers: Option>, - shard_trackers: Option>, - runtimes: Option>>, - network_adapters: Option>>, - num_shards: Option, - // random seed to be inject in each client according to AccountId - // if not set, a default constant TEST_SEED will be injected - seeds: HashMap, - archive: bool, - save_trie_changes: bool, - add_state_snapshots: bool, -} - -/// Builder for the [`TestEnv`] structure. -impl TestEnvBuilder { - /// Constructs a new builder. - fn new(chain_genesis: ChainGenesis) -> Self { - let clients = Self::make_accounts(1); - let validators = clients.clone(); - let seeds: HashMap = HashMap::with_capacity(1); - Self { - chain_genesis, - clients, - validators, - home_dirs: None, - stores: None, - epoch_managers: None, - shard_trackers: None, - runtimes: None, - network_adapters: None, - num_shards: None, - seeds, - archive: false, - save_trie_changes: true, - add_state_snapshots: false, - } - } - - /// Sets list of client [`AccountId`]s to the one provided. Panics if the - /// vector is empty. - pub fn clients(mut self, clients: Vec) -> Self { - assert!(!clients.is_empty()); - assert!(self.stores.is_none(), "Cannot set clients after stores"); - assert!(self.epoch_managers.is_none(), "Cannot set clients after epoch_managers"); - assert!(self.shard_trackers.is_none(), "Cannot set clients after shard_trackers"); - assert!(self.runtimes.is_none(), "Cannot set clients after runtimes"); - assert!(self.network_adapters.is_none(), "Cannot set clients after network_adapters"); - self.clients = clients; - self - } - - /// Sets random seed for each client according to the provided HashMap. - pub fn clients_random_seeds(mut self, seeds: HashMap) -> Self { - self.seeds = seeds; - self - } - - /// Sets number of clients to given one. To get [`AccountId`] used by the - /// validator associated with the client the [`TestEnv::get_client_id`] - /// method can be used. Tests should not rely on any particular format of - /// account identifiers used by the builder. Panics if `num` is zero. - pub fn clients_count(self, num: usize) -> Self { - self.clients(Self::make_accounts(num)) - } - - /// Sets list of validator [`AccountId`]s to the one provided. Panics if - /// the vector is empty. - pub fn validators(mut self, validators: Vec) -> Self { - assert!(!validators.is_empty()); - assert!(self.epoch_managers.is_none(), "Cannot set validators after epoch_managers"); - self.validators = validators; - self - } - - /// Sets number of validator seats to given one. To get [`AccountId`] used - /// in the test environment the `validators` field of the built [`TestEnv`] - /// object can be used. Tests should not rely on any particular format of - /// account identifiers used by the builder. Panics if `num` is zero. - pub fn validator_seats(self, num: usize) -> Self { - self.validators(Self::make_accounts(num)) - } - - fn ensure_home_dirs(mut self) -> Self { - if self.home_dirs.is_none() { - let home_dirs = (0..self.clients.len()) - .map(|_| { - let temp_dir = tempfile::tempdir().unwrap(); - temp_dir.into_path() - }) - .collect_vec(); - self.home_dirs = Some(home_dirs) - } - self - } - - /// Overrides the stores that are used to create epoch managers and runtimes. - pub fn stores(mut self, stores: Vec) -> Self { - assert_eq!(stores.len(), self.clients.len()); - assert!(self.stores.is_none(), "Cannot override twice"); - assert!(self.epoch_managers.is_none(), "Cannot override store after epoch_managers"); - assert!(self.runtimes.is_none(), "Cannot override store after runtimes"); - self.stores = Some(stores); - self - } - - pub fn real_stores(self) -> Self { - let ret = self.ensure_home_dirs(); - let stores = ret - .home_dirs - .as_ref() - .unwrap() - .iter() - .map(|home_dir| { - NodeStorage::opener(home_dir.as_path(), false, &Default::default(), None) - .open() - .unwrap() - .get_hot_store() - }) - .collect_vec(); - ret.stores(stores) - } - - /// Internal impl to make sure the stores are initialized. - fn ensure_stores(self) -> Self { - if self.stores.is_some() { - self - } else { - let num_clients = self.clients.len(); - self.stores((0..num_clients).map(|_| create_test_store()).collect()) - } - } - - /// Specifies custom MockEpochManager for each client. This allows us to - /// construct [`TestEnv`] with a custom implementation. - /// - /// The vector must have the same number of elements as they are clients - /// (one by default). If that does not hold, [`Self::build`] method will - /// panic. - pub fn mock_epoch_managers(mut self, epoch_managers: Vec>) -> Self { - assert_eq!(epoch_managers.len(), self.clients.len()); - assert!(self.epoch_managers.is_none(), "Cannot override twice"); - assert!( - self.num_shards.is_none(), - "Cannot set both num_shards and epoch_managers at the same time" - ); - assert!( - self.shard_trackers.is_none(), - "Cannot override epoch_managers after shard_trackers" - ); - assert!(self.runtimes.is_none(), "Cannot override epoch_managers after runtimes"); - self.epoch_managers = - Some(epoch_managers.into_iter().map(|epoch_manager| epoch_manager.into()).collect()); - self - } - - /// Specifies custom EpochManagerHandle for each client. This allows us to - /// construct [`TestEnv`] with a custom implementation. - /// - /// The vector must have the same number of elements as they are clients - /// (one by default). If that does not hold, [`Self::build`] method will - /// panic. - pub fn epoch_managers(mut self, epoch_managers: Vec>) -> Self { - assert_eq!(epoch_managers.len(), self.clients.len()); - assert!(self.epoch_managers.is_none(), "Cannot override twice"); - assert!( - self.num_shards.is_none(), - "Cannot set both num_shards and epoch_managers at the same time" - ); - assert!( - self.shard_trackers.is_none(), - "Cannot override epoch_managers after shard_trackers" - ); - assert!(self.runtimes.is_none(), "Cannot override epoch_managers after runtimes"); - self.epoch_managers = - Some(epoch_managers.into_iter().map(|epoch_manager| epoch_manager.into()).collect()); - self - } - - /// Constructs real EpochManager implementations for each instance. - pub fn real_epoch_managers(self, genesis_config: &GenesisConfig) -> Self { - assert!( - self.num_shards.is_none(), - "Cannot set both num_shards and epoch_managers at the same time" - ); - let ret = self.ensure_stores(); - let epoch_managers = (0..ret.clients.len()) - .map(|i| { - EpochManager::new_arc_handle( - ret.stores.as_ref().unwrap()[i].clone(), - genesis_config, - ) - }) - .collect(); - ret.epoch_managers(epoch_managers) - } - - /// Internal impl to make sure EpochManagers are initialized. - fn ensure_epoch_managers(self) -> Self { - let mut ret = self.ensure_stores(); - if ret.epoch_managers.is_some() { - ret - } else { - let epoch_managers: Vec = (0..ret.clients.len()) - .map(|i| { - let vs = ValidatorSchedule::new_with_shards(ret.num_shards.unwrap_or(1)) - .block_producers_per_epoch(vec![ret.validators.clone()]); - MockEpochManager::new_with_validators( - ret.stores.as_ref().unwrap()[i].clone(), - vs, - ret.chain_genesis.epoch_length, - ) - .into() - }) - .collect(); - assert!( - ret.shard_trackers.is_none(), - "Cannot override shard_trackers without overriding epoch_managers" - ); - assert!( - ret.runtimes.is_none(), - "Cannot override runtimes without overriding epoch_managers" - ); - ret.epoch_managers = Some(epoch_managers); - ret - } - } - - /// Visible for extension methods in integration-tests. - pub fn internal_ensure_epoch_managers_for_nightshade_runtime( - self, - ) -> (Self, Vec, Vec, Vec>) { - let builder = self.ensure_epoch_managers(); - let default_home_dirs = - (0..builder.clients.len()).map(|_| PathBuf::from("../../../..")).collect_vec(); - let home_dirs = builder.home_dirs.clone().unwrap_or(default_home_dirs); - let stores = builder.stores.clone().unwrap(); - let epoch_managers = builder - .epoch_managers - .clone() - .unwrap() - .into_iter() - .map(|kind| match kind { - EpochManagerKind::Mock(_) => { - panic!("NightshadeRuntime can only be instantiated with EpochManagerHandle") - } - EpochManagerKind::Handle(handle) => handle, - }) - .collect(); - (builder, home_dirs, stores, epoch_managers) - } - - /// Specifies custom ShardTracker for each client. This allows us to - /// construct [`TestEnv`] with a custom implementation. - pub fn shard_trackers(mut self, shard_trackers: Vec) -> Self { - assert_eq!(shard_trackers.len(), self.clients.len()); - assert!(self.shard_trackers.is_none(), "Cannot override twice"); - self.shard_trackers = Some(shard_trackers); - self - } - - /// Constructs ShardTracker that tracks all shards for each instance. - /// - /// Note that in order to track *NO* shards, just don't override shard_trackers. - pub fn track_all_shards(self) -> Self { - let ret = self.ensure_epoch_managers(); - let shard_trackers = ret - .epoch_managers - .as_ref() - .unwrap() - .iter() - .map(|epoch_manager| { - ShardTracker::new(TrackedConfig::AllShards, epoch_manager.clone().into_adapter()) - }) - .collect(); - ret.shard_trackers(shard_trackers) - } - - /// Internal impl to make sure ShardTrackers are initialized. - fn ensure_shard_trackers(self) -> Self { - let ret = self.ensure_epoch_managers(); - if ret.shard_trackers.is_some() { - ret - } else { - let shard_trackers = ret - .epoch_managers - .as_ref() - .unwrap() - .iter() - .map(|epoch_manager| { - ShardTracker::new( - TrackedConfig::new_empty(), - epoch_manager.clone().into_adapter(), - ) - }) - .collect(); - ret.shard_trackers(shard_trackers) - } - } - - /// Specifies custom RuntimeAdapter for each client. This allows us to - /// construct [`TestEnv`] with a custom implementation. - pub fn runtimes(mut self, runtimes: Vec>) -> Self { - assert_eq!(runtimes.len(), self.clients.len()); - assert!(self.runtimes.is_none(), "Cannot override twice"); - self.runtimes = Some(runtimes); - self - } - - /// Internal impl to make sure runtimes are initialized. - fn ensure_runtimes(self) -> Self { - let ret = self.ensure_epoch_managers(); - if ret.runtimes.is_some() { - ret - } else { - let runtimes = (0..ret.clients.len()) - .map(|i| { - let epoch_manager = match &ret.epoch_managers.as_ref().unwrap()[i] { - EpochManagerKind::Mock(mock) => mock.as_ref(), - EpochManagerKind::Handle(_) => { - panic!( - "Can only default construct KeyValueRuntime with MockEpochManager" - ) - } - }; - KeyValueRuntime::new(ret.stores.as_ref().unwrap()[i].clone(), epoch_manager) - as Arc - }) - .collect(); - ret.runtimes(runtimes) - } - } - - /// Specifies custom network adaptors for each client. - /// - /// The vector must have the same number of elements as they are clients - /// (one by default). If that does not hold, [`Self::build`] method will - /// panic. - pub fn network_adapters(mut self, adapters: Vec>) -> Self { - self.network_adapters = Some(adapters); - self - } - - /// Internal impl to make sure network adapters are initialized. - fn ensure_network_adapters(self) -> Self { - if self.network_adapters.is_some() { - self - } else { - let num_clients = self.clients.len(); - self.network_adapters((0..num_clients).map(|_| Arc::new(Default::default())).collect()) - } - } - - pub fn num_shards(mut self, num_shards: NumShards) -> Self { - assert!( - self.epoch_managers.is_none(), - "Cannot set both num_shards and epoch_managers at the same time" - ); - self.num_shards = Some(num_shards); - self - } - - pub fn archive(mut self, archive: bool) -> Self { - self.archive = archive; - self - } - - pub fn save_trie_changes(mut self, save_trie_changes: bool) -> Self { - self.save_trie_changes = save_trie_changes; - self - } - - /// Constructs new `TestEnv` structure. - /// - /// If no clients were configured (either through count or vector) one - /// client is created. Similarly, if no validator seats were configured, - /// one seat is configured. - /// - /// Panics if `runtime_adapters` or `network_adapters` methods were used and - /// the length of the vectors passed to them did not equal number of - /// configured clients. - pub fn build(self) -> TestEnv { - self.ensure_shard_trackers().ensure_runtimes().ensure_network_adapters().build_impl() - } - - fn build_impl(self) -> TestEnv { - let chain_genesis = self.chain_genesis; - let clients = self.clients.clone(); - let num_clients = clients.len(); - let validators = self.validators; - let num_validators = validators.len(); - let seeds = self.seeds; - let epoch_managers = self.epoch_managers.unwrap(); - let shard_trackers = self.shard_trackers.unwrap(); - let runtimes = self.runtimes.unwrap(); - let network_adapters = self.network_adapters.unwrap(); - let client_adapters = (0..num_clients) - .map(|_| Arc::new(MockClientAdapterForShardsManager::default())) - .collect::>(); - let shards_manager_adapters = (0..num_clients) - .map(|i| { - let epoch_manager = epoch_managers[i].clone(); - let shard_tracker = shard_trackers[i].clone(); - let runtime = runtimes[i].clone(); - let network_adapter = network_adapters[i].clone(); - let client_adapter = client_adapters[i].clone(); - setup_synchronous_shards_manager( - Some(clients[i].clone()), - client_adapter.as_sender(), - network_adapter.into(), - epoch_manager.into_adapter(), - shard_tracker, - runtime, - &chain_genesis, - ) - }) - .collect::>(); - let clients = (0..num_clients) - .map(|i| { - let account_id = clients[i].clone(); - let network_adapter = network_adapters[i].clone(); - let shards_manager_adapter = shards_manager_adapters[i].clone(); - let epoch_manager = epoch_managers[i].clone(); - let shard_tracker = shard_trackers[i].clone(); - let runtime = runtimes[i].clone(); - let rng_seed = match seeds.get(&account_id) { - Some(seed) => *seed, - None => TEST_SEED, - }; - let make_state_snapshot_callback : Option = if self.add_state_snapshots { - let runtime = runtime.clone(); - let snapshot : MakeSnapshotCallback = Arc::new(move |prev_block_hash, shard_uids, block| { - tracing::info!(target: "state_snapshot", ?prev_block_hash, "make_snapshot_callback"); - runtime.get_tries().make_state_snapshot(&prev_block_hash, &shard_uids, &block).unwrap(); - }); - Some(snapshot) - } else { - None - }; - setup_client_with_runtime( - u64::try_from(num_validators).unwrap(), - Some(account_id), - false, - network_adapter.into(), - shards_manager_adapter, - chain_genesis.clone(), - epoch_manager.into_adapter(), - shard_tracker, - runtime, - rng_seed, - self.archive, - self.save_trie_changes, - make_state_snapshot_callback, - ) - }) - .collect(); - - TestEnv { - chain_genesis, - validators, - network_adapters, - client_adapters, - shards_manager_adapters, - clients, - account_to_client_index: self - .clients - .into_iter() - .enumerate() - .map(|(index, client)| (client, index)) - .collect(), - paused_blocks: Default::default(), - seeds, - archive: self.archive, - save_trie_changes: self.save_trie_changes, - } - } - - fn make_accounts(count: usize) -> Vec { - (0..count).map(|i| format!("test{}", i).parse().unwrap()).collect() - } - - pub fn use_state_snapshots(mut self) -> Self { - self.add_state_snapshots = true; - self - } -} - -impl TestEnv { - pub fn builder(chain_genesis: ChainGenesis) -> TestEnvBuilder { - TestEnvBuilder::new(chain_genesis) - } - - /// Process a given block in the client with index `id`. - /// Simulate the block processing logic in `Client`, i.e, it would run catchup and then process accepted blocks and possibly produce chunks. - pub fn process_block(&mut self, id: usize, block: Block, provenance: Provenance) { - self.clients[id].process_block_test(MaybeValidated::from(block), provenance).unwrap(); - } - - /// Produces block by given client, which may kick off chunk production. - /// This means that transactions added before this call will be included in the next block produced by this validator. - pub fn produce_block(&mut self, id: usize, height: BlockHeight) { - let block = self.clients[id].produce_block(height).unwrap(); - self.process_block(id, block.unwrap(), Provenance::PRODUCED); - } - - /// Pause processing of the given block, which means that the background - /// thread which applies the chunks on the block will get blocked until - /// `resume_block_processing` is called. - /// - /// Note that you must call `resume_block_processing` at some later point to - /// unstuck the block. - /// - /// Implementation is rather crude and just hijacks our logging - /// infrastructure. Hopefully this is good enough, but, if it isn't, we can - /// add something more robust. - pub fn pause_block_processing(&mut self, capture: &mut TracingCapture, block: &CryptoHash) { - let paused_blocks = Arc::clone(&self.paused_blocks); - paused_blocks.lock().unwrap().insert(*block, Arc::new(OnceCell::new())); - capture.set_callback(move |msg| { - if msg.starts_with("do_apply_chunks") { - let cell = paused_blocks.lock().unwrap().iter().find_map(|(block_hash, cell)| { - if msg.contains(&format!("block_hash={block_hash}")) { - Some(Arc::clone(cell)) - } else { - None - } - }); - if let Some(cell) = cell { - cell.wait(); - } - } - }); - } - - /// See `pause_block_processing`. - pub fn resume_block_processing(&mut self, block: &CryptoHash) { - let mut paused_blocks = self.paused_blocks.lock().unwrap(); - let cell = paused_blocks.remove(block).unwrap(); - let _ = cell.set(()); - } - - pub fn client(&mut self, account_id: &AccountId) -> &mut Client { - &mut self.clients[self.account_to_client_index[account_id]] - } - - pub fn shards_manager(&self, account: &AccountId) -> &ShardsManagerAdapterForTest { - &self.shards_manager_adapters[self.account_to_client_index[account]] - } - - pub fn process_partial_encoded_chunks(&mut self) { - let network_adapters = self.network_adapters.clone(); - - let mut keep_going = true; - while keep_going { - for network_adapter in network_adapters.iter() { - keep_going = false; - // process partial encoded chunks - while let Some(request) = network_adapter.pop() { - // if there are any requests in any of the adapters reset - // keep going to true as processing of any message may - // trigger more messages to be processed in other clients - // it's a bit sad and it would be much nicer if all messages - // were forwarded to a single queue - // TODO would be nicer to first handle all PECs and then all PECFs - keep_going = true; - match request { - PeerManagerMessageRequest::NetworkRequests( - NetworkRequests::PartialEncodedChunkMessage { - account_id, - partial_encoded_chunk, - }, - ) => { - let partial_encoded_chunk = - PartialEncodedChunk::from(partial_encoded_chunk); - let message = - ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunk( - partial_encoded_chunk, - ); - self.shards_manager(&account_id).send(message); - } - PeerManagerMessageRequest::NetworkRequests( - NetworkRequests::PartialEncodedChunkForward { account_id, forward }, - ) => { - let message = - ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkForward( - forward, - ); - self.shards_manager(&account_id).send(message); - } - _ => { - tracing::debug!(target: "test", ?request, "skipping unsupported request type"); - } - } - } - } - } - } - - /// Process all PartialEncodedChunkRequests in the network queue for a client - /// `id`: id for the client - pub fn process_partial_encoded_chunks_requests(&mut self, id: usize) { - while let Some(request) = self.network_adapters[id].pop() { - self.process_partial_encoded_chunk_request(id, request); - } - } - - /// Send the PartialEncodedChunkRequest to the target client, get response and process the response - pub fn process_partial_encoded_chunk_request( - &mut self, - id: usize, - request: PeerManagerMessageRequest, - ) { - if let PeerManagerMessageRequest::NetworkRequests( - NetworkRequests::PartialEncodedChunkRequest { target, request, .. }, - ) = request - { - let target_id = self.account_to_client_index[&target.account_id.unwrap()]; - let response = self.get_partial_encoded_chunk_response(target_id, request); - if let Some(response) = response { - self.shards_manager_adapters[id].send( - ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkResponse { - partial_encoded_chunk_response: response, - received_time: Instant::now(), - }, - ); - } - } else { - panic!("The request is not a PartialEncodedChunk request {:?}", request); - } - } - - pub fn get_partial_encoded_chunk_response( - &mut self, - id: usize, - request: PartialEncodedChunkRequestMsg, - ) -> Option { - self.shards_manager_adapters[id].send( - ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkRequest { - partial_encoded_chunk_request: request.clone(), - route_back: CryptoHash::default(), - }, - ); - let response = self.network_adapters[id].pop_most_recent(); - match response { - Some(PeerManagerMessageRequest::NetworkRequests( - NetworkRequests::PartialEncodedChunkResponse { route_back: _, response }, - )) => return Some(response), - Some(response) => { - self.network_adapters[id].put_back_most_recent(response); - } - None => {} - } - - panic!( - "Failed to process PartialEncodedChunkRequest from shards manager {}: {:?}", - id, request - ); - } - - pub fn process_shards_manager_responses(&mut self, id: usize) -> bool { - let mut any_processed = false; - while let Some(msg) = self.client_adapters[id].pop() { - match msg { - ShardsManagerResponse::ChunkCompleted { partial_chunk, shard_chunk } => { - self.clients[id].on_chunk_completed( - partial_chunk, - shard_chunk, - Arc::new(|_| {}), - ); - } - ShardsManagerResponse::InvalidChunk(encoded_chunk) => { - self.clients[id].on_invalid_chunk(encoded_chunk); - } - ShardsManagerResponse::ChunkHeaderReadyForInclusion { - chunk_header, - chunk_producer, - } => { - self.clients[id] - .on_chunk_header_ready_for_inclusion(chunk_header, chunk_producer); - } - } - any_processed = true; - } - any_processed - } - - pub fn process_shards_manager_responses_and_finish_processing_blocks(&mut self, idx: usize) { - let _span = - tracing::debug_span!(target: "test", "process_shards_manager", client=idx).entered(); - - loop { - self.process_shards_manager_responses(idx); - if self.clients[idx].finish_blocks_in_processing().is_empty() { - return; - } - } - } - - pub fn send_money(&mut self, id: usize) -> ProcessTxResponse { - let account_id = self.get_client_id(0); - let signer = - InMemorySigner::from_seed(account_id.clone(), KeyType::ED25519, account_id.as_ref()); - let tx = SignedTransaction::send_money( - 1, - account_id.clone(), - account_id.clone(), - &signer, - 100, - self.clients[id].chain.head().unwrap().last_block_hash, - ); - self.clients[id].process_tx(tx, false, false) - } - - /// This function will actually bump to the latest protocol version instead of the provided one. - /// See /~https://github.com/near/nearcore/issues/8590 for details. - pub fn upgrade_protocol(&mut self, protocol_version: ProtocolVersion) { - assert_eq!(self.clients.len(), 1, "at the moment, this support only a single client"); - - let tip = self.clients[0].chain.head().unwrap(); - let epoch_id = self.clients[0] - .epoch_manager - .get_epoch_id_from_prev_block(&tip.last_block_hash) - .unwrap(); - let block_producer = - self.clients[0].epoch_manager.get_block_producer(&epoch_id, tip.height).unwrap(); - - let mut block = self.clients[0].produce_block(tip.height + 1).unwrap().unwrap(); - eprintln!("Producing block with version {protocol_version}"); - block.mut_header().set_latest_protocol_version(protocol_version); - block.mut_header().resign(&create_test_signer(block_producer.as_str())); - - let _ = self.clients[0] - .process_block_test_no_produce_chunk(block.into(), Provenance::NONE) - .unwrap(); - - for i in 0..self.clients[0].chain.epoch_length * 2 { - self.produce_block(0, tip.height + i + 2); - } - } - - pub fn query_account(&mut self, account_id: AccountId) -> AccountView { - let head = self.clients[0].chain.head().unwrap(); - let last_block = self.clients[0].chain.get_block(&head.last_block_hash).unwrap(); - let last_chunk_header = &last_block.chunks()[0]; - let response = self.clients[0] - .runtime_adapter - .query( - ShardUId::single_shard(), - &last_chunk_header.prev_state_root(), - last_block.header().height(), - last_block.header().raw_timestamp(), - last_block.header().prev_hash(), - last_block.header().hash(), - last_block.header().epoch_id(), - &QueryRequest::ViewAccount { account_id }, - ) - .unwrap(); - match response.kind { - QueryResponseKind::ViewAccount(account_view) => account_view, - _ => panic!("Wrong return value"), - } - } - - pub fn query_state(&mut self, account_id: AccountId) -> Vec { - let head = self.clients[0].chain.head().unwrap(); - let last_block = self.clients[0].chain.get_block(&head.last_block_hash).unwrap(); - let last_chunk_header = &last_block.chunks()[0]; - let response = self.clients[0] - .runtime_adapter - .query( - ShardUId::single_shard(), - &last_chunk_header.prev_state_root(), - last_block.header().height(), - last_block.header().raw_timestamp(), - last_block.header().prev_hash(), - last_block.header().hash(), - last_block.header().epoch_id(), - &QueryRequest::ViewState { - account_id, - prefix: vec![].into(), - include_proof: false, - }, - ) - .unwrap(); - match response.kind { - QueryResponseKind::ViewState(view_state_result) => view_state_result.values, - _ => panic!("Wrong return value"), - } - } - - pub fn query_balance(&mut self, account_id: AccountId) -> Balance { - self.query_account(account_id).amount - } - - /// Restarts client at given index. Note that the new client reuses runtime - /// adapter of old client. - /// TODO (#8269): create new `KeyValueRuntime` for new client. Currently it - /// doesn't work because `KeyValueRuntime` misses info about new epochs in - /// memory caches. - /// Though, it seems that it is not necessary for current use cases. - pub fn restart(&mut self, idx: usize) { - let account_id = self.get_client_id(idx).clone(); - let rng_seed = match self.seeds.get(&account_id) { - Some(seed) => *seed, - None => TEST_SEED, - }; - let vs = ValidatorSchedule::new().block_producers_per_epoch(vec![self.validators.clone()]); - let num_validator_seats = vs.all_block_producers().count() as NumSeats; - self.clients[idx] = setup_client_with_runtime( - num_validator_seats, - Some(self.get_client_id(idx).clone()), - false, - self.network_adapters[idx].clone().into(), - self.shards_manager_adapters[idx].clone(), - self.chain_genesis.clone(), - self.clients[idx].epoch_manager.clone(), - self.clients[idx].shard_tracker.clone(), - self.clients[idx].runtime_adapter.clone(), - rng_seed, - self.archive, - self.save_trie_changes, - None, - ) - } - - /// Returns an [`AccountId`] used by a client at given index. More - /// specifically, returns validator id of the client’s validator signer. - pub fn get_client_id(&self, idx: usize) -> &AccountId { - self.clients[idx].validator_signer.as_ref().unwrap().validator_id() - } - - pub fn get_runtime_config(&self, idx: usize, epoch_id: EpochId) -> RuntimeConfig { - self.clients[idx].runtime_adapter.get_protocol_config(&epoch_id).unwrap().runtime_config - } - - /// Create and sign transaction ready for execution. - pub fn tx_from_actions( - &mut self, - actions: Vec, - signer: &InMemorySigner, - receiver: AccountId, - ) -> SignedTransaction { - let tip = self.clients[0].chain.head().unwrap(); - SignedTransaction::from_actions( - tip.height + 1, - signer.account_id.clone(), - receiver, - signer, - actions, - tip.last_block_hash, - ) - } - - /// Wrap actions in a delegate action, put it in a transaction, sign. - pub fn meta_tx_from_actions( - &mut self, - actions: Vec, - sender: AccountId, - relayer: AccountId, - receiver_id: AccountId, - ) -> SignedTransaction { - let inner_signer = InMemorySigner::from_seed(sender.clone(), KeyType::ED25519, &sender); - let relayer_signer = InMemorySigner::from_seed(relayer.clone(), KeyType::ED25519, &relayer); - let tip = self.clients[0].chain.head().unwrap(); - let user_nonce = tip.height + 1; - let relayer_nonce = tip.height + 1; - let delegate_action = DelegateAction { - sender_id: inner_signer.account_id.clone(), - receiver_id, - actions: actions - .into_iter() - .map(|action| NonDelegateAction::try_from(action).unwrap()) - .collect(), - nonce: user_nonce, - max_block_height: tip.height + 100, - public_key: inner_signer.public_key(), - }; - let signature = inner_signer.sign(delegate_action.get_nep461_hash().as_bytes()); - let signed_delegate_action = SignedDelegateAction { delegate_action, signature }; - SignedTransaction::from_actions( - relayer_nonce, - relayer, - sender, - &relayer_signer, - vec![Action::Delegate(Box::new(signed_delegate_action))], - tip.last_block_hash, - ) - } - - /// Process a tx and its receipts, then return the execution outcome. - pub fn execute_tx( - &mut self, - tx: SignedTransaction, - ) -> Result { - let tx_hash = tx.get_hash(); - let response = self.clients[0].process_tx(tx, false, false); - // Check if the transaction got rejected - match response { - ProcessTxResponse::NoResponse - | ProcessTxResponse::RequestRouted - | ProcessTxResponse::ValidTx => (), - ProcessTxResponse::InvalidTx(e) => return Err(e), - ProcessTxResponse::DoesNotTrackShard => panic!("test setup is buggy"), - } - let max_iters = 100; - let tip = self.clients[0].chain.head().unwrap(); - for i in 0..max_iters { - let block = self.clients[0].produce_block(tip.height + i + 1).unwrap().unwrap(); - self.process_block(0, block.clone(), Provenance::PRODUCED); - if let Ok(outcome) = self.clients[0].chain.get_final_transaction_result(&tx_hash) { - return Ok(outcome); - } - } - panic!("No transaction outcome found after {max_iters} blocks.") - } - - /// Execute a function call transaction that calls main on the `TestEnv`. - /// - /// This function assumes that account has been deployed and that - /// `InMemorySigner::from_seed` produces a valid signer that has it's key - /// deployed already. - pub fn call_main(&mut self, account: &AccountId) -> FinalExecutionOutcomeView { - let signer = InMemorySigner::from_seed(account.clone(), KeyType::ED25519, account.as_str()); - let actions = vec![Action::FunctionCall(Box::new(FunctionCallAction { - method_name: "main".to_string(), - args: vec![], - gas: 3 * 10u64.pow(14), - deposit: 0, - }))]; - let tx = self.tx_from_actions(actions, &signer, signer.account_id.clone()); - self.execute_tx(tx).unwrap() - } -} - -impl Drop for TestEnv { - fn drop(&mut self) { - let paused_blocks = self.paused_blocks.lock().unwrap(); - for cell in paused_blocks.values() { - let _ = cell.set(()); - } - if !paused_blocks.is_empty() && !std::thread::panicking() { - panic!("some blocks are still paused, did you call `resume_block_processing`?") - } - } -} - -pub fn create_chunk_on_height_for_shard( - client: &mut Client, - next_height: BlockHeight, - shard_id: ShardId, -) -> (EncodedShardChunk, Vec, Vec) { - let last_block_hash = client.chain.head().unwrap().last_block_hash; - let last_block = client.chain.get_block(&last_block_hash).unwrap(); - client - .produce_chunk( - last_block_hash, - &client.epoch_manager.get_epoch_id_from_prev_block(&last_block_hash).unwrap(), - Chain::get_prev_chunk_header(client.epoch_manager.as_ref(), &last_block, shard_id) - .unwrap(), - next_height, - shard_id, - ) - .unwrap() - .unwrap() -} - -pub fn create_chunk_on_height( - client: &mut Client, - next_height: BlockHeight, -) -> (EncodedShardChunk, Vec, Vec) { - create_chunk_on_height_for_shard(client, next_height, 0) -} - -pub fn create_chunk_with_transactions( - client: &mut Client, - transactions: Vec, -) -> (EncodedShardChunk, Vec, Vec, Block) { - create_chunk(client, Some(transactions), None) -} - -/// Create a chunk with specified transactions and possibly a new state root. -/// Useful for writing tests with challenges. -pub fn create_chunk( - client: &mut Client, - replace_transactions: Option>, - replace_tx_root: Option, -) -> (EncodedShardChunk, Vec, Vec, Block) { - let last_block = client.chain.get_block_by_height(client.chain.head().unwrap().height).unwrap(); - let next_height = last_block.header().height() + 1; - let (mut chunk, mut merkle_paths, receipts) = client - .produce_chunk( - *last_block.hash(), - last_block.header().epoch_id(), - last_block.chunks()[0].clone(), - next_height, - 0, - ) - .unwrap() - .unwrap(); - let should_replace = replace_transactions.is_some() || replace_tx_root.is_some(); - let transactions = replace_transactions.unwrap_or_else(Vec::new); - let tx_root = match replace_tx_root { - Some(root) => root, - None => merklize(&transactions).0, - }; - // reconstruct the chunk with changes (if any) - if should_replace { - // The best way it to decode chunk, replace transactions and then recreate encoded chunk. - let total_parts = client.chain.epoch_manager.num_total_parts(); - let data_parts = client.chain.epoch_manager.num_data_parts(); - let decoded_chunk = chunk.decode_chunk(data_parts).unwrap(); - let parity_parts = total_parts - data_parts; - let mut rs = ReedSolomonWrapper::new(data_parts, parity_parts); - - let signer = client.validator_signer.as_ref().unwrap().clone(); - let header = chunk.cloned_header(); - let (mut encoded_chunk, mut new_merkle_paths) = EncodedShardChunk::new( - *header.prev_block_hash(), - header.prev_state_root(), - header.prev_outcome_root(), - header.height_created(), - header.shard_id(), - &mut rs, - header.prev_gas_used(), - header.gas_limit(), - header.prev_balance_burnt(), - tx_root, - header.prev_validator_proposals().collect(), - transactions, - decoded_chunk.prev_outgoing_receipts(), - header.prev_outgoing_receipts_root(), - &*signer, - PROTOCOL_VERSION, - ) - .unwrap(); - swap(&mut chunk, &mut encoded_chunk); - swap(&mut merkle_paths, &mut new_merkle_paths); - } - match &mut chunk { - EncodedShardChunk::V1(chunk) => { - chunk.header.height_included = next_height; - } - EncodedShardChunk::V2(chunk) => { - *chunk.header.height_included_mut() = next_height; - } - } - let block_merkle_tree = client.chain.store().get_block_merkle_tree(last_block.hash()).unwrap(); - let mut block_merkle_tree = PartialMerkleTree::clone(&block_merkle_tree); - block_merkle_tree.insert(*last_block.hash()); - let block = Block::produce( - PROTOCOL_VERSION, - PROTOCOL_VERSION, - last_block.header(), - next_height, - last_block.header().block_ordinal() + 1, - vec![chunk.cloned_header()], - last_block.header().epoch_id().clone(), - last_block.header().next_epoch_id().clone(), - None, - vec![], - Ratio::new(0, 1), - 0, - 100, - None, - vec![], - vec![], - &*client.validator_signer.as_ref().unwrap().clone(), - *last_block.header().next_bp_hash(), - block_merkle_tree.root(), - None, - ); - (chunk, merkle_paths, receipts, block) -} - -/// Keep running catchup until there is no more catchup work that can be done -/// Note that this function does not necessarily mean that all blocks are caught up. -/// It's possible that some blocks that need to be caught up are still being processed -/// and the catchup process can't catch up on these blocks yet. -pub fn run_catchup( - client: &mut Client, - highest_height_peers: &[HighestHeightPeerInfo], -) -> Result<(), Error> { - let f = |_| {}; - let block_messages = Arc::new(RwLock::new(vec![])); - let block_inside_messages = block_messages.clone(); - let block_catch_up = move |msg: BlockCatchUpRequest| { - block_inside_messages.write().unwrap().push(msg); - }; - let state_split_messages = Arc::new(RwLock::new(vec![])); - let state_split_inside_messages = state_split_messages.clone(); - let state_split = move |msg: StateSplitRequest| { - state_split_inside_messages.write().unwrap().push(msg); - }; - let _ = System::new(); - let state_parts_arbiter_handle = Arbiter::new().handle(); - loop { - client.run_catchup( - highest_height_peers, - &f, - &block_catch_up, - &state_split, - Arc::new(|_| {}), - &state_parts_arbiter_handle, - )?; - let mut catchup_done = true; - for msg in block_messages.write().unwrap().drain(..) { - let results = do_apply_chunks(msg.block_hash, msg.block_height, msg.work); - if let Some((_, _, blocks_catch_up_state)) = - client.catchup_state_syncs.get_mut(&msg.sync_hash) - { - assert!(blocks_catch_up_state.scheduled_blocks.remove(&msg.block_hash)); - blocks_catch_up_state.processed_blocks.insert(msg.block_hash, results); - } else { - panic!("block catch up processing result from unknown sync hash"); - } - catchup_done = false; - } - for msg in state_split_messages.write().unwrap().drain(..) { - let response = Chain::build_state_for_split_shards(msg); - if let Some((sync, _, _)) = client.catchup_state_syncs.get_mut(&response.sync_hash) { - // We are doing catchup - sync.set_split_result(response.shard_id, response.new_state_roots); - } else { - client.state_sync.set_split_result(response.shard_id, response.new_state_roots); - } - catchup_done = false; - } - if catchup_done { - break; - } - } - Ok(()) -} diff --git a/chain/client/src/test_utils/block_stats.rs b/chain/client/src/test_utils/block_stats.rs new file mode 100644 index 00000000000..046df8dcd45 --- /dev/null +++ b/chain/client/src/test_utils/block_stats.rs @@ -0,0 +1,107 @@ +use std::cmp::max; +use std::collections::HashMap; +use std::time::{Duration, Instant}; + +use near_primitives::block::Block; +use near_primitives::hash::CryptoHash; +use near_primitives::static_clock::StaticClock; +use tracing::info; + +pub struct BlockStats { + hash2depth: HashMap, + num_blocks: u64, + max_chain_length: u64, + last_check: Instant, + max_divergence: u64, + last_hash: Option, + parent: HashMap, +} + +impl BlockStats { + pub(crate) fn new() -> BlockStats { + BlockStats { + hash2depth: HashMap::new(), + num_blocks: 0, + max_chain_length: 0, + last_check: StaticClock::instant(), + max_divergence: 0, + last_hash: None, + parent: HashMap::new(), + } + } + + fn calculate_distance(&mut self, mut lhs: CryptoHash, mut rhs: CryptoHash) -> u64 { + let mut dlhs = *self.hash2depth.get(&lhs).unwrap(); + let mut drhs = *self.hash2depth.get(&rhs).unwrap(); + + let mut result: u64 = 0; + while dlhs > drhs { + lhs = *self.parent.get(&lhs).unwrap(); + dlhs -= 1; + result += 1; + } + while dlhs < drhs { + rhs = *self.parent.get(&rhs).unwrap(); + drhs -= 1; + result += 1; + } + while lhs != rhs { + lhs = *self.parent.get(&lhs).unwrap(); + rhs = *self.parent.get(&rhs).unwrap(); + result += 2; + } + result + } + + pub(crate) fn add_block(&mut self, block: &Block) { + if self.hash2depth.contains_key(block.hash()) { + return; + } + let prev_height = self.hash2depth.get(block.header().prev_hash()).map(|v| *v).unwrap_or(0); + self.hash2depth.insert(*block.hash(), prev_height + 1); + self.num_blocks += 1; + self.max_chain_length = max(self.max_chain_length, prev_height + 1); + self.parent.insert(*block.hash(), *block.header().prev_hash()); + + if let Some(last_hash2) = self.last_hash { + self.max_divergence = + max(self.max_divergence, self.calculate_distance(last_hash2, *block.hash())); + } + + self.last_hash = Some(*block.hash()); + } + + pub fn check_stats(&mut self, force: bool) { + let now = StaticClock::instant(); + let diff = now.duration_since(self.last_check); + if !force && diff.lt(&Duration::from_secs(60)) { + return; + } + self.last_check = now; + let cur_ratio = (self.num_blocks as f64) / (max(1, self.max_chain_length) as f64); + info!( + "Block stats: ratio: {:.2}, num_blocks: {} max_chain_length: {} max_divergence: {}", + cur_ratio, self.num_blocks, self.max_chain_length, self.max_divergence + ); + } + + pub fn check_block_ratio(&mut self, min_ratio: Option, max_ratio: Option) { + let cur_ratio = (self.num_blocks as f64) / (max(1, self.max_chain_length) as f64); + if let Some(min_ratio2) = min_ratio { + if cur_ratio < min_ratio2 { + panic!( + "ratio of blocks to longest chain is too low got: {:.2} expected: {:.2}", + cur_ratio, min_ratio2 + ); + } + } + if let Some(max_ratio2) = max_ratio { + if cur_ratio > max_ratio2 { + panic!( + "ratio of blocks to longest chain is too high got: {:.2} expected: {:.2}", + cur_ratio, max_ratio2 + ); + } + } + } +} diff --git a/chain/client/src/test_utils/client.rs b/chain/client/src/test_utils/client.rs new file mode 100644 index 00000000000..fc047e1d9b2 --- /dev/null +++ b/chain/client/src/test_utils/client.rs @@ -0,0 +1,271 @@ +// FIXME(nagisa): Is there a good reason we're triggering this? Luckily though this is just test +// code so we're in the clear. +#![allow(clippy::arc_with_non_send_sync)] + +use std::mem::swap; +use std::sync::{Arc, RwLock}; + +use crate::Client; +use actix_rt::{Arbiter, System}; +use near_chain::chain::{do_apply_chunks, BlockCatchUpRequest}; +use near_chain::resharding::StateSplitRequest; +use near_chain::test_utils::{wait_for_all_blocks_in_processing, wait_for_block_in_processing}; +use near_chain::{Chain, ChainStoreAccess, Provenance}; +use near_client_primitives::types::Error; +use near_network::types::HighestHeightPeerInfo; +use near_primitives::block::Block; +use near_primitives::hash::CryptoHash; +use near_primitives::merkle::{merklize, MerklePath, PartialMerkleTree}; +use near_primitives::receipt::Receipt; +use near_primitives::sharding::{EncodedShardChunk, ReedSolomonWrapper}; +use near_primitives::transaction::SignedTransaction; +use near_primitives::types::{BlockHeight, ShardId}; +use near_primitives::utils::MaybeValidated; +use near_primitives::version::PROTOCOL_VERSION; +use num_rational::Ratio; + +impl Client { + /// Unlike Client::start_process_block, which returns before the block finishes processing + /// This function waits until the block is processed. + /// `should_produce_chunk`: Normally, if a block is accepted, client will try to produce + /// chunks for the next block if it is the chunk producer. + /// If `should_produce_chunk` is set to false, client will skip the + /// chunk production. This is useful in tests that need to tweak + /// the produced chunk content. + fn process_block_sync_with_produce_chunk_options( + &mut self, + block: MaybeValidated, + provenance: Provenance, + should_produce_chunk: bool, + ) -> Result, near_chain::Error> { + self.start_process_block(block, provenance, Arc::new(|_| {}))?; + wait_for_all_blocks_in_processing(&mut self.chain); + let (accepted_blocks, errors) = + self.postprocess_ready_blocks(Arc::new(|_| {}), should_produce_chunk); + assert!(errors.is_empty(), "unexpected errors when processing blocks: {errors:#?}"); + Ok(accepted_blocks) + } + + pub fn process_block_test( + &mut self, + block: MaybeValidated, + provenance: Provenance, + ) -> Result, near_chain::Error> { + self.process_block_sync_with_produce_chunk_options(block, provenance, true) + } + + pub fn process_block_test_no_produce_chunk( + &mut self, + block: MaybeValidated, + provenance: Provenance, + ) -> Result, near_chain::Error> { + self.process_block_sync_with_produce_chunk_options(block, provenance, false) + } + + /// This function finishes processing all blocks that started being processed. + pub fn finish_blocks_in_processing(&mut self) -> Vec { + let mut accepted_blocks = vec![]; + while wait_for_all_blocks_in_processing(&mut self.chain) { + accepted_blocks.extend(self.postprocess_ready_blocks(Arc::new(|_| {}), true).0); + } + accepted_blocks + } + + /// This function finishes processing block with hash `hash`, if the processing of that block + /// has started. + pub fn finish_block_in_processing(&mut self, hash: &CryptoHash) -> Vec { + if let Ok(()) = wait_for_block_in_processing(&mut self.chain, hash) { + let (accepted_blocks, _) = self.postprocess_ready_blocks(Arc::new(|_| {}), true); + return accepted_blocks; + } + vec![] + } +} + +fn create_chunk_on_height_for_shard( + client: &mut Client, + next_height: BlockHeight, + shard_id: ShardId, +) -> (EncodedShardChunk, Vec, Vec) { + let last_block_hash = client.chain.head().unwrap().last_block_hash; + let last_block = client.chain.get_block(&last_block_hash).unwrap(); + client + .produce_chunk( + last_block_hash, + &client.epoch_manager.get_epoch_id_from_prev_block(&last_block_hash).unwrap(), + Chain::get_prev_chunk_header(client.epoch_manager.as_ref(), &last_block, shard_id) + .unwrap(), + next_height, + shard_id, + ) + .unwrap() + .unwrap() +} + +pub fn create_chunk_on_height( + client: &mut Client, + next_height: BlockHeight, +) -> (EncodedShardChunk, Vec, Vec) { + create_chunk_on_height_for_shard(client, next_height, 0) +} + +pub fn create_chunk_with_transactions( + client: &mut Client, + transactions: Vec, +) -> (EncodedShardChunk, Vec, Vec, Block) { + create_chunk(client, Some(transactions), None) +} + +/// Create a chunk with specified transactions and possibly a new state root. +/// Useful for writing tests with challenges. +pub fn create_chunk( + client: &mut Client, + replace_transactions: Option>, + replace_tx_root: Option, +) -> (EncodedShardChunk, Vec, Vec, Block) { + let last_block = client.chain.get_block_by_height(client.chain.head().unwrap().height).unwrap(); + let next_height = last_block.header().height() + 1; + let (mut chunk, mut merkle_paths, receipts) = client + .produce_chunk( + *last_block.hash(), + last_block.header().epoch_id(), + last_block.chunks()[0].clone(), + next_height, + 0, + ) + .unwrap() + .unwrap(); + let should_replace = replace_transactions.is_some() || replace_tx_root.is_some(); + let transactions = replace_transactions.unwrap_or_else(Vec::new); + let tx_root = match replace_tx_root { + Some(root) => root, + None => merklize(&transactions).0, + }; + // reconstruct the chunk with changes (if any) + if should_replace { + // The best way it to decode chunk, replace transactions and then recreate encoded chunk. + let total_parts = client.chain.epoch_manager.num_total_parts(); + let data_parts = client.chain.epoch_manager.num_data_parts(); + let decoded_chunk = chunk.decode_chunk(data_parts).unwrap(); + let parity_parts = total_parts - data_parts; + let mut rs = ReedSolomonWrapper::new(data_parts, parity_parts); + + let signer = client.validator_signer.as_ref().unwrap().clone(); + let header = chunk.cloned_header(); + let (mut encoded_chunk, mut new_merkle_paths) = EncodedShardChunk::new( + *header.prev_block_hash(), + header.prev_state_root(), + header.prev_outcome_root(), + header.height_created(), + header.shard_id(), + &mut rs, + header.prev_gas_used(), + header.gas_limit(), + header.prev_balance_burnt(), + tx_root, + header.prev_validator_proposals().collect(), + transactions, + decoded_chunk.prev_outgoing_receipts(), + header.prev_outgoing_receipts_root(), + &*signer, + PROTOCOL_VERSION, + ) + .unwrap(); + swap(&mut chunk, &mut encoded_chunk); + swap(&mut merkle_paths, &mut new_merkle_paths); + } + match &mut chunk { + EncodedShardChunk::V1(chunk) => { + chunk.header.height_included = next_height; + } + EncodedShardChunk::V2(chunk) => { + *chunk.header.height_included_mut() = next_height; + } + } + let block_merkle_tree = client.chain.store().get_block_merkle_tree(last_block.hash()).unwrap(); + let mut block_merkle_tree = PartialMerkleTree::clone(&block_merkle_tree); + block_merkle_tree.insert(*last_block.hash()); + let block = Block::produce( + PROTOCOL_VERSION, + PROTOCOL_VERSION, + last_block.header(), + next_height, + last_block.header().block_ordinal() + 1, + vec![chunk.cloned_header()], + last_block.header().epoch_id().clone(), + last_block.header().next_epoch_id().clone(), + None, + vec![], + Ratio::new(0, 1), + 0, + 100, + None, + vec![], + vec![], + &*client.validator_signer.as_ref().unwrap().clone(), + *last_block.header().next_bp_hash(), + block_merkle_tree.root(), + None, + ); + (chunk, merkle_paths, receipts, block) +} + +/// Keep running catchup until there is no more catchup work that can be done +/// Note that this function does not necessarily mean that all blocks are caught up. +/// It's possible that some blocks that need to be caught up are still being processed +/// and the catchup process can't catch up on these blocks yet. +pub fn run_catchup( + client: &mut Client, + highest_height_peers: &[HighestHeightPeerInfo], +) -> Result<(), Error> { + let f = |_| {}; + let block_messages = Arc::new(RwLock::new(vec![])); + let block_inside_messages = block_messages.clone(); + let block_catch_up = move |msg: BlockCatchUpRequest| { + block_inside_messages.write().unwrap().push(msg); + }; + let state_split_messages = Arc::new(RwLock::new(vec![])); + let state_split_inside_messages = state_split_messages.clone(); + let state_split = move |msg: StateSplitRequest| { + state_split_inside_messages.write().unwrap().push(msg); + }; + let _ = System::new(); + let state_parts_arbiter_handle = Arbiter::new().handle(); + loop { + client.run_catchup( + highest_height_peers, + &f, + &block_catch_up, + &state_split, + Arc::new(|_| {}), + &state_parts_arbiter_handle, + )?; + let mut catchup_done = true; + for msg in block_messages.write().unwrap().drain(..) { + let results = do_apply_chunks(msg.block_hash, msg.block_height, msg.work); + if let Some((_, _, blocks_catch_up_state)) = + client.catchup_state_syncs.get_mut(&msg.sync_hash) + { + assert!(blocks_catch_up_state.scheduled_blocks.remove(&msg.block_hash)); + blocks_catch_up_state.processed_blocks.insert(msg.block_hash, results); + } else { + panic!("block catch up processing result from unknown sync hash"); + } + catchup_done = false; + } + for msg in state_split_messages.write().unwrap().drain(..) { + let response = Chain::build_state_for_split_shards(msg); + if let Some((sync, _, _)) = client.catchup_state_syncs.get_mut(&response.sync_hash) { + // We are doing catchup + sync.set_split_result(response.shard_id, response.new_state_roots); + } else { + client.state_sync.set_split_result(response.shard_id, response.new_state_roots); + } + catchup_done = false; + } + if catchup_done { + break; + } + } + Ok(()) +} diff --git a/chain/client/src/test_utils/mod.rs b/chain/client/src/test_utils/mod.rs new file mode 100644 index 00000000000..e2c3081bfe5 --- /dev/null +++ b/chain/client/src/test_utils/mod.rs @@ -0,0 +1,13 @@ +pub mod block_stats; +pub mod client; +pub mod peer_manager_mock; +pub mod setup; +pub mod test_env; +pub mod test_env_builder; + +pub use block_stats::*; +pub use client::*; +pub use peer_manager_mock::*; +pub use setup::*; +pub use test_env::*; +pub use test_env_builder::*; diff --git a/chain/client/src/test_utils/peer_manager_mock.rs b/chain/client/src/test_utils/peer_manager_mock.rs new file mode 100644 index 00000000000..0f6c9031890 --- /dev/null +++ b/chain/client/src/test_utils/peer_manager_mock.rs @@ -0,0 +1,39 @@ +use near_network::types::SetChainInfo; +use near_network::types::{PeerManagerMessageRequest, PeerManagerMessageResponse}; + +pub struct PeerManagerMock { + handle: Box< + dyn FnMut( + PeerManagerMessageRequest, + &mut actix::Context, + ) -> PeerManagerMessageResponse, + >, +} + +impl PeerManagerMock { + pub(crate) fn new( + f: impl 'static + + FnMut( + PeerManagerMessageRequest, + &mut actix::Context, + ) -> PeerManagerMessageResponse, + ) -> Self { + Self { handle: Box::new(f) } + } +} + +impl actix::Actor for PeerManagerMock { + type Context = actix::Context; +} + +impl actix::Handler for PeerManagerMock { + type Result = PeerManagerMessageResponse; + fn handle(&mut self, msg: PeerManagerMessageRequest, ctx: &mut Self::Context) -> Self::Result { + (self.handle)(msg, ctx) + } +} + +impl actix::Handler for PeerManagerMock { + type Result = (); + fn handle(&mut self, _msg: SetChainInfo, _ctx: &mut Self::Context) {} +} diff --git a/chain/client/src/test_utils/setup.rs b/chain/client/src/test_utils/setup.rs new file mode 100644 index 00000000000..cd64c2d4887 --- /dev/null +++ b/chain/client/src/test_utils/setup.rs @@ -0,0 +1,1099 @@ +// FIXME(nagisa): Is there a good reason we're triggering this? Luckily though this is just test +// code so we're in the clear. +#![allow(clippy::arc_with_non_send_sync)] + +use std::cmp::max; +use std::collections::{HashMap, HashSet}; +use std::ops::DerefMut; +use std::sync::{Arc, RwLock}; +use std::time::{Duration, Instant}; + +use crate::adapter::{ + AnnounceAccountRequest, BlockApproval, BlockHeadersRequest, BlockHeadersResponse, BlockRequest, + BlockResponse, SetNetworkInfo, StateRequestHeader, StateRequestPart, +}; +use crate::{start_view_client, Client, ClientActor, SyncStatus, ViewClientActor}; +use actix::{Actor, Addr, AsyncContext, Context}; +use chrono::DateTime; +use chrono::Utc; +use futures::{future, FutureExt}; +use near_async::actix::AddrWithAutoSpanContextExt; +use near_async::messaging::{CanSend, IntoSender, LateBoundSender, Sender}; +use near_async::time; +use near_chain::state_snapshot_actor::MakeSnapshotCallback; +use near_chain::test_utils::{KeyValueRuntime, MockEpochManager, ValidatorSchedule}; +use near_chain::types::{ChainConfig, RuntimeAdapter}; +use near_chain::{Chain, ChainGenesis, DoomslugThresholdMode}; +use near_chain_configs::ClientConfig; +use near_chunks::adapter::ShardsManagerRequestFromClient; +use near_chunks::client::ShardsManagerResponse; +use near_chunks::shards_manager_actor::start_shards_manager; +use near_chunks::test_utils::SynchronousShardsManagerAdapter; +use near_chunks::ShardsManager; +use near_crypto::{KeyType, PublicKey}; +use near_epoch_manager::shard_tracker::ShardTracker; +use near_epoch_manager::EpochManagerAdapter; +use near_network::shards_manager::ShardsManagerRequestFromNetwork; +use near_network::types::{AccountOrPeerIdOrHash, PeerInfo, PeerType}; +use near_network::types::{BlockInfo, PeerChainInfo}; +use near_network::types::{ + ConnectedPeerInfo, FullPeerInfo, NetworkRequests, NetworkResponses, PeerManagerAdapter, +}; +use near_network::types::{NetworkInfo, PeerManagerMessageRequest, PeerManagerMessageResponse}; +use near_o11y::WithSpanContextExt; +use near_primitives::block::{ApprovalInner, Block, GenesisId}; +use near_primitives::epoch_manager::RngSeed; +use near_primitives::hash::{hash, CryptoHash}; +use near_primitives::network::PeerId; +use near_primitives::static_clock::StaticClock; +use near_primitives::test_utils::create_test_signer; +use near_primitives::types::{AccountId, BlockHeightDelta, NumBlocks, NumSeats}; +use near_primitives::validator_signer::ValidatorSigner; +use near_primitives::version::PROTOCOL_VERSION; +use near_store::test_utils::create_test_store; +use near_store::Store; +use near_telemetry::TelemetryActor; +use num_rational::Ratio; +use once_cell::sync::OnceCell; +use rand::{thread_rng, Rng}; + +use super::block_stats::BlockStats; +use super::peer_manager_mock::PeerManagerMock; + +pub const TEST_SEED: RngSeed = [3; 32]; + +/// min block production time in milliseconds +pub const MIN_BLOCK_PROD_TIME: Duration = Duration::from_millis(100); +/// max block production time in milliseconds +pub const MAX_BLOCK_PROD_TIME: Duration = Duration::from_millis(200); + +/// Sets up ClientActor and ViewClientActor viewing the same store/runtime. +pub fn setup( + vs: ValidatorSchedule, + epoch_length: BlockHeightDelta, + account_id: AccountId, + skip_sync_wait: bool, + min_block_prod_time: u64, + max_block_prod_time: u64, + enable_doomslug: bool, + archive: bool, + epoch_sync_enabled: bool, + state_sync_enabled: bool, + network_adapter: PeerManagerAdapter, + transaction_validity_period: NumBlocks, + genesis_time: DateTime, + ctx: &Context, +) -> (Block, ClientActor, Addr, ShardsManagerAdapterForTest) { + let store = create_test_store(); + let num_validator_seats = vs.all_block_producers().count() as NumSeats; + let epoch_manager = MockEpochManager::new_with_validators(store.clone(), vs, epoch_length); + let shard_tracker = ShardTracker::new_empty(epoch_manager.clone()); + let runtime = KeyValueRuntime::new_with_no_gc(store.clone(), epoch_manager.as_ref(), archive); + let chain_genesis = ChainGenesis { + time: genesis_time, + height: 0, + gas_limit: 1_000_000, + min_gas_price: 100, + max_gas_price: 1_000_000_000, + total_supply: 3_000_000_000_000_000_000_000_000_000_000_000, + gas_price_adjustment_rate: Ratio::from_integer(0), + transaction_validity_period, + epoch_length, + protocol_version: PROTOCOL_VERSION, + }; + let doomslug_threshold_mode = if enable_doomslug { + DoomslugThresholdMode::TwoThirds + } else { + DoomslugThresholdMode::NoApprovals + }; + let chain = Chain::new( + epoch_manager.clone(), + shard_tracker.clone(), + runtime.clone(), + &chain_genesis, + doomslug_threshold_mode, + ChainConfig { + save_trie_changes: true, + background_migration_threads: 1, + state_snapshot_every_n_blocks: None, + }, + None, + ) + .unwrap(); + let genesis_block = chain.get_block(&chain.genesis().hash().clone()).unwrap(); + + let signer = Arc::new(create_test_signer(account_id.as_str())); + let telemetry = TelemetryActor::default().start(); + let config = ClientConfig::test( + skip_sync_wait, + min_block_prod_time, + max_block_prod_time, + num_validator_seats, + archive, + true, + epoch_sync_enabled, + state_sync_enabled, + ); + + let adv = crate::adversarial::Controls::default(); + + let view_client_addr = start_view_client( + Some(signer.validator_id().clone()), + chain_genesis.clone(), + epoch_manager.clone(), + shard_tracker.clone(), + runtime.clone(), + network_adapter.clone(), + config.clone(), + adv.clone(), + ); + + let (shards_manager_addr, _) = start_shards_manager( + epoch_manager.clone(), + shard_tracker.clone(), + network_adapter.clone().into_sender(), + ctx.address().with_auto_span_context().into_sender(), + Some(account_id), + store, + config.chunk_request_retry_period, + ); + let shards_manager_adapter = Arc::new(shards_manager_addr); + + let client = Client::new( + config.clone(), + chain_genesis, + epoch_manager, + shard_tracker, + runtime, + network_adapter.clone(), + shards_manager_adapter.as_sender(), + Some(signer.clone()), + enable_doomslug, + TEST_SEED, + None, + ) + .unwrap(); + let client_actor = ClientActor::new( + client, + ctx.address(), + config, + PeerId::new(PublicKey::empty(KeyType::ED25519)), + network_adapter, + Some(signer), + telemetry, + ctx, + None, + adv, + None, + ) + .unwrap(); + (genesis_block, client_actor, view_client_addr, shards_manager_adapter.into()) +} + +pub fn setup_only_view( + vs: ValidatorSchedule, + epoch_length: BlockHeightDelta, + account_id: AccountId, + skip_sync_wait: bool, + min_block_prod_time: u64, + max_block_prod_time: u64, + enable_doomslug: bool, + archive: bool, + epoch_sync_enabled: bool, + state_sync_enabled: bool, + network_adapter: PeerManagerAdapter, + transaction_validity_period: NumBlocks, + genesis_time: DateTime, +) -> Addr { + let store = create_test_store(); + let num_validator_seats = vs.all_block_producers().count() as NumSeats; + let epoch_manager = MockEpochManager::new_with_validators(store.clone(), vs, epoch_length); + let shard_tracker = ShardTracker::new_empty(epoch_manager.clone()); + let runtime = KeyValueRuntime::new_with_no_gc(store, epoch_manager.as_ref(), archive); + let chain_genesis = ChainGenesis { + time: genesis_time, + height: 0, + gas_limit: 1_000_000, + min_gas_price: 100, + max_gas_price: 1_000_000_000, + total_supply: 3_000_000_000_000_000_000_000_000_000_000_000, + gas_price_adjustment_rate: Ratio::from_integer(0), + transaction_validity_period, + epoch_length, + protocol_version: PROTOCOL_VERSION, + }; + + let doomslug_threshold_mode = if enable_doomslug { + DoomslugThresholdMode::TwoThirds + } else { + DoomslugThresholdMode::NoApprovals + }; + Chain::new( + epoch_manager.clone(), + shard_tracker.clone(), + runtime.clone(), + &chain_genesis, + doomslug_threshold_mode, + ChainConfig { + save_trie_changes: true, + background_migration_threads: 1, + state_snapshot_every_n_blocks: None, + }, + None, + ) + .unwrap(); + + let signer = Arc::new(create_test_signer(account_id.as_str())); + TelemetryActor::default().start(); + let config = ClientConfig::test( + skip_sync_wait, + min_block_prod_time, + max_block_prod_time, + num_validator_seats, + archive, + true, + epoch_sync_enabled, + state_sync_enabled, + ); + + let adv = crate::adversarial::Controls::default(); + + start_view_client( + Some(signer.validator_id().clone()), + chain_genesis, + epoch_manager, + shard_tracker, + runtime, + network_adapter, + config, + adv, + ) +} + +/// Sets up ClientActor and ViewClientActor with mock PeerManager. +pub fn setup_mock( + validators: Vec, + account_id: AccountId, + skip_sync_wait: bool, + enable_doomslug: bool, + peer_manager_mock: Box< + dyn FnMut( + &PeerManagerMessageRequest, + &mut Context, + Addr, + ) -> PeerManagerMessageResponse, + >, +) -> ActorHandlesForTesting { + setup_mock_with_validity_period_and_no_epoch_sync( + validators, + account_id, + skip_sync_wait, + enable_doomslug, + peer_manager_mock, + 100, + ) +} + +pub fn setup_mock_with_validity_period_and_no_epoch_sync( + validators: Vec, + account_id: AccountId, + skip_sync_wait: bool, + enable_doomslug: bool, + mut peermanager_mock: Box< + dyn FnMut( + &PeerManagerMessageRequest, + &mut Context, + Addr, + ) -> PeerManagerMessageResponse, + >, + transaction_validity_period: NumBlocks, +) -> ActorHandlesForTesting { + let network_adapter = Arc::new(LateBoundSender::default()); + let mut vca: Option> = None; + let mut sma: Option = None; + let client_addr = ClientActor::create(|ctx: &mut Context| { + let vs = ValidatorSchedule::new().block_producers_per_epoch(vec![validators]); + let (_, client, view_client_addr, shards_manager_adapter) = setup( + vs, + 10, + account_id, + skip_sync_wait, + MIN_BLOCK_PROD_TIME.as_millis() as u64, + MAX_BLOCK_PROD_TIME.as_millis() as u64, + enable_doomslug, + false, + false, + true, + network_adapter.clone().into(), + transaction_validity_period, + StaticClock::utc(), + ctx, + ); + vca = Some(view_client_addr); + sma = Some(shards_manager_adapter); + client + }); + let client_addr1 = client_addr.clone(); + + let network_actor = + PeerManagerMock::new(move |msg, ctx| peermanager_mock(&msg, ctx, client_addr1.clone())) + .start(); + + network_adapter.bind(network_actor); + + ActorHandlesForTesting { + client_actor: client_addr, + view_client_actor: vca.unwrap(), + shards_manager_adapter: sma.unwrap(), + } +} + +#[derive(Clone)] +pub struct ActorHandlesForTesting { + pub client_actor: Addr, + pub view_client_actor: Addr, + pub shards_manager_adapter: ShardsManagerAdapterForTest, +} + +fn send_chunks( + connectors: &[ActorHandlesForTesting], + recipients: I, + target: T, + drop_chunks: bool, + send_to: F, +) where + T: Eq, + I: Iterator, + F: Fn(&ShardsManagerAdapterForTest), +{ + for (i, name) in recipients { + if name == target { + if !drop_chunks || !thread_rng().gen_ratio(1, 5) { + send_to(&connectors[i].shards_manager_adapter); + } + } + } +} + +/// Setup multiple clients talking to each other via a mock network. +/// +/// # Arguments +/// +/// `vs` - the set of validators and how they are assigned to shards in different epochs. +/// +/// `key_pairs` - keys for `validators` +/// +/// `skip_sync_wait` +/// +/// `block_prod_time` - Minimum block production time, assuming there is enough approvals. The +/// maximum block production time depends on the value of `tamper_with_fg`, and is +/// equal to `block_prod_time` if `tamper_with_fg` is `true`, otherwise it is +/// `block_prod_time * 2` +/// +/// `drop_chunks` - if set to true, 10% of all the chunk messages / requests will be dropped +/// +/// `tamper_with_fg` - if set to true, will split the heights into groups of 100. For some groups +/// all the approvals will be dropped (thus completely disabling the finality gadget +/// and introducing severe forkfulness if `block_prod_time` is sufficiently small), +/// for some groups will keep all the approvals (and test the fg invariants), and +/// for some will drop 50% of the approvals. +/// This was designed to tamper with the finality gadget when we +/// had it, unclear if has much effect today. Must be disabled if doomslug is +/// enabled (see below), because doomslug will stall if approvals are not delivered. +/// +/// `epoch_length` - approximate length of the epoch as measured +/// by the block heights difference of it's last and first block. +/// +/// `enable_doomslug` - If false, blocks will be created when at least one approval is present, without +/// waiting for 2/3. This allows for more forkfulness. `cross_shard_tx` has modes +/// both with enabled doomslug (to test "production" setting) and with disabled +/// doomslug (to test higher forkfullness) +/// +/// `network_mock` - the callback that is called for each message sent. The `mock` is called before +/// the default processing. `mock` returns `(response, perform_default)`. If +/// `perform_default` is false, then the message is not processed or broadcasted +/// further and `response` is returned to the requester immediately. Otherwise +/// the default action is performed, that might (and likely will) overwrite the +/// `response` before it is sent back to the requester. +pub fn setup_mock_all_validators( + vs: ValidatorSchedule, + key_pairs: Vec, + skip_sync_wait: bool, + block_prod_time: u64, + drop_chunks: bool, + tamper_with_fg: bool, + epoch_length: BlockHeightDelta, + enable_doomslug: bool, + archive: Vec, + epoch_sync_enabled: Vec, + check_block_stats: bool, + peer_manager_mock: Box< + dyn FnMut( + // Peer validators + &[ActorHandlesForTesting], + // Validator that sends the message + AccountId, + // The message itself + &PeerManagerMessageRequest, + ) -> (PeerManagerMessageResponse, /* perform default */ bool), + >, +) -> (Block, Vec, Arc>) { + let peer_manager_mock = Arc::new(RwLock::new(peer_manager_mock)); + let validators = vs.all_validators().cloned().collect::>(); + let key_pairs = key_pairs; + + let addresses: Vec<_> = (0..key_pairs.len()).map(|i| hash(vec![i as u8].as_ref())).collect(); + let genesis_time = StaticClock::utc(); + let mut ret = vec![]; + + let connectors: Arc>> = Default::default(); + + let announced_accounts = Arc::new(RwLock::new(HashSet::new())); + let genesis_block = Arc::new(RwLock::new(None)); + + let last_height = Arc::new(RwLock::new(vec![0; key_pairs.len()])); + let largest_endorsed_height = Arc::new(RwLock::new(vec![0u64; key_pairs.len()])); + let largest_skipped_height = Arc::new(RwLock::new(vec![0u64; key_pairs.len()])); + let hash_to_height = Arc::new(RwLock::new(HashMap::new())); + let block_stats = Arc::new(RwLock::new(BlockStats::new())); + + for (index, account_id) in validators.clone().into_iter().enumerate() { + let vs = vs.clone(); + let block_stats1 = block_stats.clone(); + let mut view_client_addr_slot = None; + let mut shards_manager_adapter_slot = None; + let validators_clone2 = validators.clone(); + let genesis_block1 = genesis_block.clone(); + let key_pairs = key_pairs.clone(); + let key_pairs1 = key_pairs.clone(); + let addresses = addresses.clone(); + let connectors1 = connectors.clone(); + let network_mock1 = peer_manager_mock.clone(); + let announced_accounts1 = announced_accounts.clone(); + let last_height1 = last_height.clone(); + let last_height2 = last_height.clone(); + let largest_endorsed_height1 = largest_endorsed_height.clone(); + let largest_skipped_height1 = largest_skipped_height.clone(); + let hash_to_height1 = hash_to_height.clone(); + let archive1 = archive.clone(); + let epoch_sync_enabled1 = epoch_sync_enabled.clone(); + let client_addr = ClientActor::create(|ctx| { + let client_addr = ctx.address(); + let _account_id = account_id.clone(); + let pm = PeerManagerMock::new(move |msg, _ctx| { + // Note: this `.wait` will block until all `ClientActors` are created. + let connectors1 = connectors1.wait(); + let mut guard = network_mock1.write().unwrap(); + let (resp, perform_default) = + guard.deref_mut()(connectors1.as_slice(), account_id.clone(), &msg); + drop(guard); + + if perform_default { + let my_ord = validators_clone2.iter().position(|it| it == &account_id).unwrap(); + let my_key_pair = key_pairs[my_ord].clone(); + let my_address = addresses[my_ord]; + + { + let last_height2 = last_height2.read().unwrap(); + let peers: Vec<_> = key_pairs1 + .iter() + .take(connectors1.len()) + .enumerate() + .map(|(i, peer_info)| ConnectedPeerInfo { + full_peer_info: FullPeerInfo { + peer_info: peer_info.clone(), + chain_info: PeerChainInfo { + genesis_id: GenesisId { + chain_id: "unittest".to_string(), + hash: Default::default(), + }, + // TODO: add the correct hash here + last_block: Some(BlockInfo { + height: last_height2[i], + hash: CryptoHash::default(), + }), + tracked_shards: vec![], + archival: true, + }, + }, + received_bytes_per_sec: 0, + sent_bytes_per_sec: 0, + last_time_peer_requested: near_async::time::Instant::now(), + last_time_received_message: near_async::time::Instant::now(), + connection_established_time: near_async::time::Instant::now(), + peer_type: PeerType::Outbound, + nonce: 3, + }) + .collect(); + let peers2 = peers + .iter() + .filter_map(|it| it.full_peer_info.clone().into()) + .collect(); + let info = NetworkInfo { + connected_peers: peers, + tier1_connections: vec![], + num_connected_peers: key_pairs1.len(), + peer_max_count: key_pairs1.len() as u32, + highest_height_peers: peers2, + sent_bytes_per_sec: 0, + received_bytes_per_sec: 0, + known_producers: vec![], + tier1_accounts_keys: vec![], + tier1_accounts_data: vec![], + }; + client_addr.do_send(SetNetworkInfo(info).with_span_context()); + } + + match msg.as_network_requests_ref() { + NetworkRequests::Block { block } => { + if check_block_stats { + let block_stats2 = &mut *block_stats1.write().unwrap(); + block_stats2.add_block(block); + block_stats2.check_stats(false); + } + + for actor_handles in connectors1 { + actor_handles.client_actor.do_send( + BlockResponse { + block: block.clone(), + peer_id: PeerInfo::random().id, + was_requested: false, + } + .with_span_context(), + ); + } + + let mut last_height1 = last_height1.write().unwrap(); + + let my_height = &mut last_height1[my_ord]; + + *my_height = max(*my_height, block.header().height()); + + hash_to_height1 + .write() + .unwrap() + .insert(*block.header().hash(), block.header().height()); + } + NetworkRequests::PartialEncodedChunkRequest { target, request, .. } => { + send_chunks( + connectors1, + validators_clone2.iter().map(|s| Some(s.clone())).enumerate(), + target.account_id.as_ref().map(|s| s.clone()), + drop_chunks, + |c| { + c.send(ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkRequest { partial_encoded_chunk_request: request.clone(), route_back: my_address }); + }, + ); + } + NetworkRequests::PartialEncodedChunkResponse { route_back, response } => { + send_chunks( + connectors1, + addresses.iter().enumerate(), + route_back, + drop_chunks, + |c| { + c.send(ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkResponse { partial_encoded_chunk_response: response.clone(), received_time: Instant::now() }); + }, + ); + } + NetworkRequests::PartialEncodedChunkMessage { + account_id, + partial_encoded_chunk, + } => { + send_chunks( + connectors1, + validators_clone2.iter().cloned().enumerate(), + account_id.clone(), + drop_chunks, + |c| { + c.send(ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunk(partial_encoded_chunk.clone().into())); + }, + ); + } + NetworkRequests::PartialEncodedChunkForward { account_id, forward } => { + send_chunks( + connectors1, + validators_clone2.iter().cloned().enumerate(), + account_id.clone(), + drop_chunks, + |c| { + c.send(ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkForward(forward.clone())); + } + ); + } + NetworkRequests::BlockRequest { hash, peer_id } => { + for (i, peer_info) in key_pairs.iter().enumerate() { + let peer_id = peer_id.clone(); + if peer_info.id == peer_id { + let me = connectors1[my_ord].client_actor.clone(); + actix::spawn( + connectors1[i] + .view_client_actor + .send(BlockRequest(*hash).with_span_context()) + .then(move |response| { + let response = response.unwrap(); + match response { + Some(block) => { + me.do_send( + BlockResponse { + block: *block, + peer_id, + was_requested: true, + } + .with_span_context(), + ); + } + None => {} + } + future::ready(()) + }), + ); + } + } + } + NetworkRequests::BlockHeadersRequest { hashes, peer_id } => { + for (i, peer_info) in key_pairs.iter().enumerate() { + let peer_id = peer_id.clone(); + if peer_info.id == peer_id { + let me = connectors1[my_ord].client_actor.clone(); + actix::spawn( + connectors1[i] + .view_client_actor + .send( + BlockHeadersRequest(hashes.clone()) + .with_span_context(), + ) + .then(move |response| { + let response = response.unwrap(); + match response { + Some(headers) => { + me.do_send( + BlockHeadersResponse(headers, peer_id) + .with_span_context(), + ); + } + None => {} + } + future::ready(()) + }), + ); + } + } + } + NetworkRequests::StateRequestHeader { + shard_id, + sync_hash, + target: target_account_id, + } => { + let target_account_id = match target_account_id { + AccountOrPeerIdOrHash::AccountId(x) => x, + _ => panic!(), + }; + for (i, name) in validators_clone2.iter().enumerate() { + if name == target_account_id { + let me = connectors1[my_ord].client_actor.clone(); + actix::spawn( + connectors1[i] + .view_client_actor + .send( + StateRequestHeader { + shard_id: *shard_id, + sync_hash: *sync_hash, + } + .with_span_context(), + ) + .then(move |response| { + let response = response.unwrap(); + match response { + Some(response) => { + me.do_send(response.with_span_context()); + } + None => {} + } + future::ready(()) + }), + ); + } + } + } + NetworkRequests::StateRequestPart { + shard_id, + sync_hash, + part_id, + target: target_account_id, + } => { + let target_account_id = match target_account_id { + AccountOrPeerIdOrHash::AccountId(x) => x, + _ => panic!(), + }; + for (i, name) in validators_clone2.iter().enumerate() { + if name == target_account_id { + let me = connectors1[my_ord].client_actor.clone(); + actix::spawn( + connectors1[i] + .view_client_actor + .send( + StateRequestPart { + shard_id: *shard_id, + sync_hash: *sync_hash, + part_id: *part_id, + } + .with_span_context(), + ) + .then(move |response| { + let response = response.unwrap(); + match response { + Some(response) => { + me.do_send(response.with_span_context()); + } + None => {} + } + future::ready(()) + }), + ); + } + } + } + NetworkRequests::AnnounceAccount(announce_account) => { + let mut aa = announced_accounts1.write().unwrap(); + let key = ( + announce_account.account_id.clone(), + announce_account.epoch_id.clone(), + ); + if aa.get(&key).is_none() { + aa.insert(key); + for actor_handles in connectors1 { + actor_handles.view_client_actor.do_send( + AnnounceAccountRequest(vec![( + announce_account.clone(), + None, + )]) + .with_span_context(), + ) + } + } + } + NetworkRequests::Approval { approval_message } => { + let height_mod = approval_message.approval.target_height % 300; + + let do_propagate = if tamper_with_fg { + if height_mod < 100 { + false + } else if height_mod < 200 { + let mut rng = rand::thread_rng(); + rng.gen() + } else { + true + } + } else { + true + }; + + let approval = approval_message.approval.clone(); + + if do_propagate { + for (i, name) in validators_clone2.iter().enumerate() { + if name == &approval_message.target { + connectors1[i].client_actor.do_send( + BlockApproval(approval.clone(), my_key_pair.id.clone()) + .with_span_context(), + ); + } + } + } + + // Verify doomslug invariant + match approval.inner { + ApprovalInner::Endorsement(parent_hash) => { + assert!( + approval.target_height + > largest_skipped_height1.read().unwrap()[my_ord] + ); + largest_endorsed_height1.write().unwrap()[my_ord] = + approval.target_height; + + if let Some(prev_height) = + hash_to_height1.read().unwrap().get(&parent_hash) + { + assert_eq!(prev_height + 1, approval.target_height); + } + } + ApprovalInner::Skip(prev_height) => { + largest_skipped_height1.write().unwrap()[my_ord] = + approval.target_height; + let e = largest_endorsed_height1.read().unwrap()[my_ord]; + // `e` is the *target* height of the last endorsement. `prev_height` + // is allowed to be anything >= to the source height, which is e-1. + assert!( + prev_height + 1 >= e, + "New: {}->{}, Old: {}->{}", + prev_height, + approval.target_height, + e - 1, + e + ); + } + }; + } + NetworkRequests::ForwardTx(_, _) + | NetworkRequests::BanPeer { .. } + | NetworkRequests::TxStatus(_, _, _) + | NetworkRequests::Challenge(_) => {} + }; + } + resp + }) + .start(); + let (block, client, view_client_addr, shards_manager_adapter) = setup( + vs, + epoch_length, + _account_id, + skip_sync_wait, + block_prod_time, + block_prod_time * 3, + enable_doomslug, + archive1[index], + epoch_sync_enabled1[index], + true, + Arc::new(pm).into(), + 10000, + genesis_time, + ctx, + ); + view_client_addr_slot = Some(view_client_addr); + shards_manager_adapter_slot = Some(shards_manager_adapter); + *genesis_block1.write().unwrap() = Some(block); + client + }); + ret.push(ActorHandlesForTesting { + client_actor: client_addr, + view_client_actor: view_client_addr_slot.unwrap(), + shards_manager_adapter: shards_manager_adapter_slot.unwrap(), + }); + } + hash_to_height.write().unwrap().insert(CryptoHash::default(), 0); + hash_to_height + .write() + .unwrap() + .insert(*genesis_block.read().unwrap().as_ref().unwrap().header().clone().hash(), 0); + connectors.set(ret.clone()).ok().unwrap(); + let value = genesis_block.read().unwrap(); + (value.clone().unwrap(), ret, block_stats) +} + +/// Sets up ClientActor and ViewClientActor without network. +pub fn setup_no_network( + validators: Vec, + account_id: AccountId, + skip_sync_wait: bool, + enable_doomslug: bool, +) -> ActorHandlesForTesting { + setup_no_network_with_validity_period_and_no_epoch_sync( + validators, + account_id, + skip_sync_wait, + 100, + enable_doomslug, + ) +} + +pub fn setup_no_network_with_validity_period_and_no_epoch_sync( + validators: Vec, + account_id: AccountId, + skip_sync_wait: bool, + transaction_validity_period: NumBlocks, + enable_doomslug: bool, +) -> ActorHandlesForTesting { + setup_mock_with_validity_period_and_no_epoch_sync( + validators, + account_id, + skip_sync_wait, + enable_doomslug, + Box::new(|_, _, _| { + PeerManagerMessageResponse::NetworkResponses(NetworkResponses::NoResponse) + }), + transaction_validity_period, + ) +} + +pub fn setup_client_with_runtime( + num_validator_seats: NumSeats, + account_id: Option, + enable_doomslug: bool, + network_adapter: PeerManagerAdapter, + shards_manager_adapter: ShardsManagerAdapterForTest, + chain_genesis: ChainGenesis, + epoch_manager: Arc, + shard_tracker: ShardTracker, + runtime: Arc, + rng_seed: RngSeed, + archive: bool, + save_trie_changes: bool, + make_state_snapshot_callback: Option, +) -> Client { + let validator_signer = + account_id.map(|x| Arc::new(create_test_signer(x.as_str())) as Arc); + let mut config = ClientConfig::test( + true, + 10, + 20, + num_validator_seats, + archive, + save_trie_changes, + true, + true, + ); + config.epoch_length = chain_genesis.epoch_length; + let mut client = Client::new( + config, + chain_genesis, + epoch_manager, + shard_tracker, + runtime, + network_adapter, + shards_manager_adapter.client, + validator_signer, + enable_doomslug, + rng_seed, + make_state_snapshot_callback, + ) + .unwrap(); + client.sync_status = SyncStatus::NoSync; + client +} + +pub fn setup_client( + store: Store, + vs: ValidatorSchedule, + account_id: Option, + enable_doomslug: bool, + network_adapter: PeerManagerAdapter, + shards_manager_adapter: ShardsManagerAdapterForTest, + chain_genesis: ChainGenesis, + rng_seed: RngSeed, + archive: bool, + save_trie_changes: bool, +) -> Client { + let num_validator_seats = vs.all_block_producers().count() as NumSeats; + let epoch_manager = + MockEpochManager::new_with_validators(store.clone(), vs, chain_genesis.epoch_length); + let shard_tracker = ShardTracker::new_empty(epoch_manager.clone()); + let runtime = KeyValueRuntime::new(store, epoch_manager.as_ref()); + setup_client_with_runtime( + num_validator_seats, + account_id, + enable_doomslug, + network_adapter, + shards_manager_adapter, + chain_genesis, + epoch_manager, + shard_tracker, + runtime, + rng_seed, + archive, + save_trie_changes, + None, + ) +} + +pub fn setup_synchronous_shards_manager( + account_id: Option, + client_adapter: Sender, + network_adapter: PeerManagerAdapter, + epoch_manager: Arc, + shard_tracker: ShardTracker, + runtime: Arc, + chain_genesis: &ChainGenesis, +) -> ShardsManagerAdapterForTest { + // Initialize the chain, to make sure that if the store is empty, we write the genesis + // into the store, and as a short cut to get the parameters needed to instantiate + // ShardsManager. This way we don't have to wait to construct the Client first. + // TODO(#8324): This should just be refactored so that we can construct Chain first + // before anything else. + let chain = Chain::new( + epoch_manager.clone(), + shard_tracker.clone(), + runtime, + chain_genesis, + DoomslugThresholdMode::TwoThirds, // irrelevant + ChainConfig { + save_trie_changes: true, + background_migration_threads: 1, + state_snapshot_every_n_blocks: None, + }, // irrelevant + None, + ) + .unwrap(); + let chain_head = chain.head().unwrap(); + let chain_header_head = chain.header_head().unwrap(); + let shards_manager = ShardsManager::new( + time::Clock::real(), + account_id, + epoch_manager, + shard_tracker, + network_adapter.request_sender, + client_adapter, + chain.store().new_read_only_chunks_store(), + chain_head, + chain_header_head, + ); + Arc::new(SynchronousShardsManagerAdapter::new(shards_manager)).into() +} + +pub fn setup_client_with_synchronous_shards_manager( + store: Store, + vs: ValidatorSchedule, + account_id: Option, + enable_doomslug: bool, + network_adapter: PeerManagerAdapter, + client_adapter: Sender, + chain_genesis: ChainGenesis, + rng_seed: RngSeed, + archive: bool, + save_trie_changes: bool, +) -> Client { + let num_validator_seats = vs.all_block_producers().count() as NumSeats; + let epoch_manager = + MockEpochManager::new_with_validators(store.clone(), vs, chain_genesis.epoch_length); + let shard_tracker = ShardTracker::new_empty(epoch_manager.clone()); + let runtime = KeyValueRuntime::new(store, epoch_manager.as_ref()); + let shards_manager_adapter = setup_synchronous_shards_manager( + account_id.clone(), + client_adapter, + network_adapter.clone(), + epoch_manager.clone(), + shard_tracker.clone(), + runtime.clone(), + &chain_genesis, + ); + setup_client_with_runtime( + num_validator_seats, + account_id, + enable_doomslug, + network_adapter, + shards_manager_adapter, + chain_genesis, + epoch_manager, + shard_tracker, + runtime, + rng_seed, + archive, + save_trie_changes, + None, + ) +} + +/// A combined trait bound for both the client side and network side of the ShardsManager API. +#[derive(Clone, derive_more::AsRef)] +pub struct ShardsManagerAdapterForTest { + pub client: Sender, + pub network: Sender, +} + +impl + CanSend> + From> for ShardsManagerAdapterForTest +{ + fn from(arc: Arc) -> Self { + Self { client: arc.as_sender(), network: arc.as_sender() } + } +} diff --git a/chain/client/src/test_utils/test_env.rs b/chain/client/src/test_utils/test_env.rs new file mode 100644 index 00000000000..ebb35a7d83d --- /dev/null +++ b/chain/client/src/test_utils/test_env.rs @@ -0,0 +1,516 @@ +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; +use std::time::Instant; + +use crate::adapter::ProcessTxResponse; +use crate::Client; +use near_async::messaging::CanSend; +use near_chain::test_utils::ValidatorSchedule; +use near_chain::{ChainGenesis, Provenance}; +use near_chunks::client::ShardsManagerResponse; +use near_chunks::test_utils::MockClientAdapterForShardsManager; +use near_crypto::{InMemorySigner, KeyType, Signer}; +use near_network::shards_manager::ShardsManagerRequestFromNetwork; +use near_network::test_utils::MockPeerManagerAdapter; +use near_network::types::NetworkRequests; +use near_network::types::PeerManagerMessageRequest; +use near_network::types::{PartialEncodedChunkRequestMsg, PartialEncodedChunkResponseMsg}; +use near_o11y::testonly::TracingCapture; +use near_primitives::action::delegate::{DelegateAction, NonDelegateAction, SignedDelegateAction}; +use near_primitives::block::Block; +use near_primitives::epoch_manager::RngSeed; +use near_primitives::errors::InvalidTxError; +use near_primitives::hash::CryptoHash; +use near_primitives::runtime::config::RuntimeConfig; +use near_primitives::shard_layout::ShardUId; +use near_primitives::sharding::PartialEncodedChunk; +use near_primitives::test_utils::create_test_signer; +use near_primitives::transaction::{Action, FunctionCallAction, SignedTransaction}; +use near_primitives::types::{AccountId, Balance, BlockHeight, EpochId, NumSeats}; +use near_primitives::utils::MaybeValidated; +use near_primitives::version::ProtocolVersion; +use near_primitives::views::{ + AccountView, FinalExecutionOutcomeView, QueryRequest, QueryResponseKind, StateItem, +}; +use once_cell::sync::OnceCell; + +use super::setup::{setup_client_with_runtime, ShardsManagerAdapterForTest}; +use super::test_env_builder::TestEnvBuilder; +use super::TEST_SEED; + +/// An environment for writing integration tests with multiple clients. +/// This environment can simulate near nodes without network and it can be configured to use different runtimes. +pub struct TestEnv { + pub chain_genesis: ChainGenesis, + pub validators: Vec, + pub network_adapters: Vec>, + pub client_adapters: Vec>, + pub shards_manager_adapters: Vec, + pub clients: Vec, + pub(crate) account_to_client_index: HashMap, + pub(crate) paused_blocks: Arc>>>>, + // random seed to be inject in each client according to AccountId + // if not set, a default constant TEST_SEED will be injected + pub(crate) seeds: HashMap, + pub(crate) archive: bool, + pub(crate) save_trie_changes: bool, +} + +impl TestEnv { + pub fn builder(chain_genesis: ChainGenesis) -> TestEnvBuilder { + TestEnvBuilder::new(chain_genesis) + } + + /// Process a given block in the client with index `id`. + /// Simulate the block processing logic in `Client`, i.e, it would run catchup and then process accepted blocks and possibly produce chunks. + pub fn process_block(&mut self, id: usize, block: Block, provenance: Provenance) { + self.clients[id].process_block_test(MaybeValidated::from(block), provenance).unwrap(); + } + + /// Produces block by given client, which may kick off chunk production. + /// This means that transactions added before this call will be included in the next block produced by this validator. + pub fn produce_block(&mut self, id: usize, height: BlockHeight) { + let block = self.clients[id].produce_block(height).unwrap(); + self.process_block(id, block.unwrap(), Provenance::PRODUCED); + } + + /// Pause processing of the given block, which means that the background + /// thread which applies the chunks on the block will get blocked until + /// `resume_block_processing` is called. + /// + /// Note that you must call `resume_block_processing` at some later point to + /// unstuck the block. + /// + /// Implementation is rather crude and just hijacks our logging + /// infrastructure. Hopefully this is good enough, but, if it isn't, we can + /// add something more robust. + pub fn pause_block_processing(&mut self, capture: &mut TracingCapture, block: &CryptoHash) { + let paused_blocks = Arc::clone(&self.paused_blocks); + paused_blocks.lock().unwrap().insert(*block, Arc::new(OnceCell::new())); + capture.set_callback(move |msg| { + if msg.starts_with("do_apply_chunks") { + let cell = paused_blocks.lock().unwrap().iter().find_map(|(block_hash, cell)| { + if msg.contains(&format!("block_hash={block_hash}")) { + Some(Arc::clone(cell)) + } else { + None + } + }); + if let Some(cell) = cell { + cell.wait(); + } + } + }); + } + + /// See `pause_block_processing`. + pub fn resume_block_processing(&mut self, block: &CryptoHash) { + let mut paused_blocks = self.paused_blocks.lock().unwrap(); + let cell = paused_blocks.remove(block).unwrap(); + let _ = cell.set(()); + } + + pub fn client(&mut self, account_id: &AccountId) -> &mut Client { + &mut self.clients[self.account_to_client_index[account_id]] + } + + pub fn shards_manager(&self, account: &AccountId) -> &ShardsManagerAdapterForTest { + &self.shards_manager_adapters[self.account_to_client_index[account]] + } + + pub fn process_partial_encoded_chunks(&mut self) { + let network_adapters = self.network_adapters.clone(); + + let mut keep_going = true; + while keep_going { + for network_adapter in network_adapters.iter() { + keep_going = false; + // process partial encoded chunks + while let Some(request) = network_adapter.pop() { + // if there are any requests in any of the adapters reset + // keep going to true as processing of any message may + // trigger more messages to be processed in other clients + // it's a bit sad and it would be much nicer if all messages + // were forwarded to a single queue + // TODO would be nicer to first handle all PECs and then all PECFs + keep_going = true; + match request { + PeerManagerMessageRequest::NetworkRequests( + NetworkRequests::PartialEncodedChunkMessage { + account_id, + partial_encoded_chunk, + }, + ) => { + let partial_encoded_chunk = + PartialEncodedChunk::from(partial_encoded_chunk); + let message = + ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunk( + partial_encoded_chunk, + ); + self.shards_manager(&account_id).send(message); + } + PeerManagerMessageRequest::NetworkRequests( + NetworkRequests::PartialEncodedChunkForward { account_id, forward }, + ) => { + let message = + ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkForward( + forward, + ); + self.shards_manager(&account_id).send(message); + } + _ => { + tracing::debug!(target: "test", ?request, "skipping unsupported request type"); + } + } + } + } + } + } + + /// Process all PartialEncodedChunkRequests in the network queue for a client + /// `id`: id for the client + pub fn process_partial_encoded_chunks_requests(&mut self, id: usize) { + while let Some(request) = self.network_adapters[id].pop() { + self.process_partial_encoded_chunk_request(id, request); + } + } + + /// Send the PartialEncodedChunkRequest to the target client, get response and process the response + pub fn process_partial_encoded_chunk_request( + &mut self, + id: usize, + request: PeerManagerMessageRequest, + ) { + if let PeerManagerMessageRequest::NetworkRequests( + NetworkRequests::PartialEncodedChunkRequest { target, request, .. }, + ) = request + { + let target_id = self.account_to_client_index[&target.account_id.unwrap()]; + let response = self.get_partial_encoded_chunk_response(target_id, request); + if let Some(response) = response { + self.shards_manager_adapters[id].send( + ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkResponse { + partial_encoded_chunk_response: response, + received_time: Instant::now(), + }, + ); + } + } else { + panic!("The request is not a PartialEncodedChunk request {:?}", request); + } + } + + pub fn get_partial_encoded_chunk_response( + &mut self, + id: usize, + request: PartialEncodedChunkRequestMsg, + ) -> Option { + self.shards_manager_adapters[id].send( + ShardsManagerRequestFromNetwork::ProcessPartialEncodedChunkRequest { + partial_encoded_chunk_request: request.clone(), + route_back: CryptoHash::default(), + }, + ); + let response = self.network_adapters[id].pop_most_recent(); + match response { + Some(PeerManagerMessageRequest::NetworkRequests( + NetworkRequests::PartialEncodedChunkResponse { route_back: _, response }, + )) => return Some(response), + Some(response) => { + self.network_adapters[id].put_back_most_recent(response); + } + None => {} + } + + panic!( + "Failed to process PartialEncodedChunkRequest from shards manager {}: {:?}", + id, request + ); + } + + pub fn process_shards_manager_responses(&mut self, id: usize) -> bool { + let mut any_processed = false; + while let Some(msg) = self.client_adapters[id].pop() { + match msg { + ShardsManagerResponse::ChunkCompleted { partial_chunk, shard_chunk } => { + self.clients[id].on_chunk_completed( + partial_chunk, + shard_chunk, + Arc::new(|_| {}), + ); + } + ShardsManagerResponse::InvalidChunk(encoded_chunk) => { + self.clients[id].on_invalid_chunk(encoded_chunk); + } + ShardsManagerResponse::ChunkHeaderReadyForInclusion { + chunk_header, + chunk_producer, + } => { + self.clients[id] + .on_chunk_header_ready_for_inclusion(chunk_header, chunk_producer); + } + } + any_processed = true; + } + any_processed + } + + pub fn process_shards_manager_responses_and_finish_processing_blocks(&mut self, idx: usize) { + let _span = + tracing::debug_span!(target: "test", "process_shards_manager", client=idx).entered(); + + loop { + self.process_shards_manager_responses(idx); + if self.clients[idx].finish_blocks_in_processing().is_empty() { + return; + } + } + } + + pub fn send_money(&mut self, id: usize) -> ProcessTxResponse { + let account_id = self.get_client_id(0); + let signer = + InMemorySigner::from_seed(account_id.clone(), KeyType::ED25519, account_id.as_ref()); + let tx = SignedTransaction::send_money( + 1, + account_id.clone(), + account_id.clone(), + &signer, + 100, + self.clients[id].chain.head().unwrap().last_block_hash, + ); + self.clients[id].process_tx(tx, false, false) + } + + /// This function will actually bump to the latest protocol version instead of the provided one. + /// See /~https://github.com/near/nearcore/issues/8590 for details. + pub fn upgrade_protocol(&mut self, protocol_version: ProtocolVersion) { + assert_eq!(self.clients.len(), 1, "at the moment, this support only a single client"); + + let tip = self.clients[0].chain.head().unwrap(); + let epoch_id = self.clients[0] + .epoch_manager + .get_epoch_id_from_prev_block(&tip.last_block_hash) + .unwrap(); + let block_producer = + self.clients[0].epoch_manager.get_block_producer(&epoch_id, tip.height).unwrap(); + + let mut block = self.clients[0].produce_block(tip.height + 1).unwrap().unwrap(); + eprintln!("Producing block with version {protocol_version}"); + block.mut_header().set_latest_protocol_version(protocol_version); + block.mut_header().resign(&create_test_signer(block_producer.as_str())); + + let _ = self.clients[0] + .process_block_test_no_produce_chunk(block.into(), Provenance::NONE) + .unwrap(); + + for i in 0..self.clients[0].chain.epoch_length * 2 { + self.produce_block(0, tip.height + i + 2); + } + } + + pub fn query_account(&mut self, account_id: AccountId) -> AccountView { + let head = self.clients[0].chain.head().unwrap(); + let last_block = self.clients[0].chain.get_block(&head.last_block_hash).unwrap(); + let last_chunk_header = &last_block.chunks()[0]; + let response = self.clients[0] + .runtime_adapter + .query( + ShardUId::single_shard(), + &last_chunk_header.prev_state_root(), + last_block.header().height(), + last_block.header().raw_timestamp(), + last_block.header().prev_hash(), + last_block.header().hash(), + last_block.header().epoch_id(), + &QueryRequest::ViewAccount { account_id }, + ) + .unwrap(); + match response.kind { + QueryResponseKind::ViewAccount(account_view) => account_view, + _ => panic!("Wrong return value"), + } + } + + pub fn query_state(&mut self, account_id: AccountId) -> Vec { + let head = self.clients[0].chain.head().unwrap(); + let last_block = self.clients[0].chain.get_block(&head.last_block_hash).unwrap(); + let last_chunk_header = &last_block.chunks()[0]; + let response = self.clients[0] + .runtime_adapter + .query( + ShardUId::single_shard(), + &last_chunk_header.prev_state_root(), + last_block.header().height(), + last_block.header().raw_timestamp(), + last_block.header().prev_hash(), + last_block.header().hash(), + last_block.header().epoch_id(), + &QueryRequest::ViewState { + account_id, + prefix: vec![].into(), + include_proof: false, + }, + ) + .unwrap(); + match response.kind { + QueryResponseKind::ViewState(view_state_result) => view_state_result.values, + _ => panic!("Wrong return value"), + } + } + + pub fn query_balance(&mut self, account_id: AccountId) -> Balance { + self.query_account(account_id).amount + } + + /// Restarts client at given index. Note that the new client reuses runtime + /// adapter of old client. + /// TODO (#8269): create new `KeyValueRuntime` for new client. Currently it + /// doesn't work because `KeyValueRuntime` misses info about new epochs in + /// memory caches. + /// Though, it seems that it is not necessary for current use cases. + pub fn restart(&mut self, idx: usize) { + let account_id = self.get_client_id(idx).clone(); + let rng_seed = match self.seeds.get(&account_id) { + Some(seed) => *seed, + None => TEST_SEED, + }; + let vs = ValidatorSchedule::new().block_producers_per_epoch(vec![self.validators.clone()]); + let num_validator_seats = vs.all_block_producers().count() as NumSeats; + self.clients[idx] = setup_client_with_runtime( + num_validator_seats, + Some(self.get_client_id(idx).clone()), + false, + self.network_adapters[idx].clone().into(), + self.shards_manager_adapters[idx].clone(), + self.chain_genesis.clone(), + self.clients[idx].epoch_manager.clone(), + self.clients[idx].shard_tracker.clone(), + self.clients[idx].runtime_adapter.clone(), + rng_seed, + self.archive, + self.save_trie_changes, + None, + ) + } + + /// Returns an [`AccountId`] used by a client at given index. More + /// specifically, returns validator id of the client’s validator signer. + pub fn get_client_id(&self, idx: usize) -> &AccountId { + self.clients[idx].validator_signer.as_ref().unwrap().validator_id() + } + + pub fn get_runtime_config(&self, idx: usize, epoch_id: EpochId) -> RuntimeConfig { + self.clients[idx].runtime_adapter.get_protocol_config(&epoch_id).unwrap().runtime_config + } + + /// Create and sign transaction ready for execution. + pub fn tx_from_actions( + &mut self, + actions: Vec, + signer: &InMemorySigner, + receiver: AccountId, + ) -> SignedTransaction { + let tip = self.clients[0].chain.head().unwrap(); + SignedTransaction::from_actions( + tip.height + 1, + signer.account_id.clone(), + receiver, + signer, + actions, + tip.last_block_hash, + ) + } + + /// Wrap actions in a delegate action, put it in a transaction, sign. + pub fn meta_tx_from_actions( + &mut self, + actions: Vec, + sender: AccountId, + relayer: AccountId, + receiver_id: AccountId, + ) -> SignedTransaction { + let inner_signer = InMemorySigner::from_seed(sender.clone(), KeyType::ED25519, &sender); + let relayer_signer = InMemorySigner::from_seed(relayer.clone(), KeyType::ED25519, &relayer); + let tip = self.clients[0].chain.head().unwrap(); + let user_nonce = tip.height + 1; + let relayer_nonce = tip.height + 1; + let delegate_action = DelegateAction { + sender_id: inner_signer.account_id.clone(), + receiver_id, + actions: actions + .into_iter() + .map(|action| NonDelegateAction::try_from(action).unwrap()) + .collect(), + nonce: user_nonce, + max_block_height: tip.height + 100, + public_key: inner_signer.public_key(), + }; + let signature = inner_signer.sign(delegate_action.get_nep461_hash().as_bytes()); + let signed_delegate_action = SignedDelegateAction { delegate_action, signature }; + SignedTransaction::from_actions( + relayer_nonce, + relayer, + sender, + &relayer_signer, + vec![Action::Delegate(Box::new(signed_delegate_action))], + tip.last_block_hash, + ) + } + + /// Process a tx and its receipts, then return the execution outcome. + pub fn execute_tx( + &mut self, + tx: SignedTransaction, + ) -> Result { + let tx_hash = tx.get_hash(); + let response = self.clients[0].process_tx(tx, false, false); + // Check if the transaction got rejected + match response { + ProcessTxResponse::NoResponse + | ProcessTxResponse::RequestRouted + | ProcessTxResponse::ValidTx => (), + ProcessTxResponse::InvalidTx(e) => return Err(e), + ProcessTxResponse::DoesNotTrackShard => panic!("test setup is buggy"), + } + let max_iters = 100; + let tip = self.clients[0].chain.head().unwrap(); + for i in 0..max_iters { + let block = self.clients[0].produce_block(tip.height + i + 1).unwrap().unwrap(); + self.process_block(0, block.clone(), Provenance::PRODUCED); + if let Ok(outcome) = self.clients[0].chain.get_final_transaction_result(&tx_hash) { + return Ok(outcome); + } + } + panic!("No transaction outcome found after {max_iters} blocks.") + } + + /// Execute a function call transaction that calls main on the `TestEnv`. + /// + /// This function assumes that account has been deployed and that + /// `InMemorySigner::from_seed` produces a valid signer that has it's key + /// deployed already. + pub fn call_main(&mut self, account: &AccountId) -> FinalExecutionOutcomeView { + let signer = InMemorySigner::from_seed(account.clone(), KeyType::ED25519, account.as_str()); + let actions = vec![Action::FunctionCall(Box::new(FunctionCallAction { + method_name: "main".to_string(), + args: vec![], + gas: 3 * 10u64.pow(14), + deposit: 0, + }))]; + let tx = self.tx_from_actions(actions, &signer, signer.account_id.clone()); + self.execute_tx(tx).unwrap() + } +} + +impl Drop for TestEnv { + fn drop(&mut self) { + let paused_blocks = self.paused_blocks.lock().unwrap(); + for cell in paused_blocks.values() { + let _ = cell.set(()); + } + if !paused_blocks.is_empty() && !std::thread::panicking() { + panic!("some blocks are still paused, did you call `resume_block_processing`?") + } + } +} diff --git a/chain/client/src/test_utils/test_env_builder.rs b/chain/client/src/test_utils/test_env_builder.rs new file mode 100644 index 00000000000..52bf817c280 --- /dev/null +++ b/chain/client/src/test_utils/test_env_builder.rs @@ -0,0 +1,531 @@ +use itertools::Itertools; +use std::collections::HashMap; +use std::path::PathBuf; +use std::sync::Arc; + +use near_async::messaging::IntoSender; +use near_chain::state_snapshot_actor::MakeSnapshotCallback; +use near_chain::test_utils::{KeyValueRuntime, MockEpochManager, ValidatorSchedule}; +use near_chain::types::RuntimeAdapter; +use near_chain::ChainGenesis; +use near_chain_configs::GenesisConfig; +use near_chunks::test_utils::MockClientAdapterForShardsManager; +use near_epoch_manager::shard_tracker::{ShardTracker, TrackedConfig}; +use near_epoch_manager::{EpochManager, EpochManagerAdapter, EpochManagerHandle}; +use near_network::test_utils::MockPeerManagerAdapter; +use near_primitives::epoch_manager::RngSeed; +use near_primitives::types::{AccountId, NumShards}; +use near_store::test_utils::create_test_store; +use near_store::{NodeStorage, Store}; + +use super::setup::{setup_client_with_runtime, setup_synchronous_shards_manager}; +use super::test_env::TestEnv; +use super::TEST_SEED; + +#[derive(derive_more::From, Clone)] +enum EpochManagerKind { + Mock(Arc), + Handle(Arc), +} + +impl EpochManagerKind { + pub fn into_adapter(self) -> Arc { + match self { + Self::Mock(mock) => mock, + Self::Handle(handle) => handle, + } + } +} + +/// A builder for the TestEnv structure. +pub struct TestEnvBuilder { + chain_genesis: ChainGenesis, + clients: Vec, + validators: Vec, + home_dirs: Option>, + stores: Option>, + epoch_managers: Option>, + shard_trackers: Option>, + runtimes: Option>>, + network_adapters: Option>>, + num_shards: Option, + // random seed to be inject in each client according to AccountId + // if not set, a default constant TEST_SEED will be injected + seeds: HashMap, + archive: bool, + save_trie_changes: bool, + add_state_snapshots: bool, +} + +/// Builder for the [`TestEnv`] structure. +impl TestEnvBuilder { + /// Constructs a new builder. + pub(crate) fn new(chain_genesis: ChainGenesis) -> Self { + let clients = Self::make_accounts(1); + let validators = clients.clone(); + let seeds: HashMap = HashMap::with_capacity(1); + Self { + chain_genesis, + clients, + validators, + home_dirs: None, + stores: None, + epoch_managers: None, + shard_trackers: None, + runtimes: None, + network_adapters: None, + num_shards: None, + seeds, + archive: false, + save_trie_changes: true, + add_state_snapshots: false, + } + } + + /// Sets list of client [`AccountId`]s to the one provided. Panics if the + /// vector is empty. + pub fn clients(mut self, clients: Vec) -> Self { + assert!(!clients.is_empty()); + assert!(self.stores.is_none(), "Cannot set clients after stores"); + assert!(self.epoch_managers.is_none(), "Cannot set clients after epoch_managers"); + assert!(self.shard_trackers.is_none(), "Cannot set clients after shard_trackers"); + assert!(self.runtimes.is_none(), "Cannot set clients after runtimes"); + assert!(self.network_adapters.is_none(), "Cannot set clients after network_adapters"); + self.clients = clients; + self + } + + /// Sets random seed for each client according to the provided HashMap. + pub fn clients_random_seeds(mut self, seeds: HashMap) -> Self { + self.seeds = seeds; + self + } + + /// Sets number of clients to given one. To get [`AccountId`] used by the + /// validator associated with the client the [`TestEnv::get_client_id`] + /// method can be used. Tests should not rely on any particular format of + /// account identifiers used by the builder. Panics if `num` is zero. + pub fn clients_count(self, num: usize) -> Self { + self.clients(Self::make_accounts(num)) + } + + /// Sets list of validator [`AccountId`]s to the one provided. Panics if + /// the vector is empty. + pub fn validators(mut self, validators: Vec) -> Self { + assert!(!validators.is_empty()); + assert!(self.epoch_managers.is_none(), "Cannot set validators after epoch_managers"); + self.validators = validators; + self + } + + /// Sets number of validator seats to given one. To get [`AccountId`] used + /// in the test environment the `validators` field of the built [`TestEnv`] + /// object can be used. Tests should not rely on any particular format of + /// account identifiers used by the builder. Panics if `num` is zero. + pub fn validator_seats(self, num: usize) -> Self { + self.validators(Self::make_accounts(num)) + } + + fn ensure_home_dirs(mut self) -> Self { + if self.home_dirs.is_none() { + let home_dirs = (0..self.clients.len()) + .map(|_| { + let temp_dir = tempfile::tempdir().unwrap(); + temp_dir.into_path() + }) + .collect_vec(); + self.home_dirs = Some(home_dirs) + } + self + } + + /// Overrides the stores that are used to create epoch managers and runtimes. + pub fn stores(mut self, stores: Vec) -> Self { + assert_eq!(stores.len(), self.clients.len()); + assert!(self.stores.is_none(), "Cannot override twice"); + assert!(self.epoch_managers.is_none(), "Cannot override store after epoch_managers"); + assert!(self.runtimes.is_none(), "Cannot override store after runtimes"); + self.stores = Some(stores); + self + } + + pub fn real_stores(self) -> Self { + let ret = self.ensure_home_dirs(); + let stores = ret + .home_dirs + .as_ref() + .unwrap() + .iter() + .map(|home_dir| { + NodeStorage::opener(home_dir.as_path(), false, &Default::default(), None) + .open() + .unwrap() + .get_hot_store() + }) + .collect_vec(); + ret.stores(stores) + } + + /// Internal impl to make sure the stores are initialized. + fn ensure_stores(self) -> Self { + if self.stores.is_some() { + self + } else { + let num_clients = self.clients.len(); + self.stores((0..num_clients).map(|_| create_test_store()).collect()) + } + } + + /// Specifies custom MockEpochManager for each client. This allows us to + /// construct [`TestEnv`] with a custom implementation. + /// + /// The vector must have the same number of elements as they are clients + /// (one by default). If that does not hold, [`Self::build`] method will + /// panic. + pub fn mock_epoch_managers(mut self, epoch_managers: Vec>) -> Self { + assert_eq!(epoch_managers.len(), self.clients.len()); + assert!(self.epoch_managers.is_none(), "Cannot override twice"); + assert!( + self.num_shards.is_none(), + "Cannot set both num_shards and epoch_managers at the same time" + ); + assert!( + self.shard_trackers.is_none(), + "Cannot override epoch_managers after shard_trackers" + ); + assert!(self.runtimes.is_none(), "Cannot override epoch_managers after runtimes"); + self.epoch_managers = + Some(epoch_managers.into_iter().map(|epoch_manager| epoch_manager.into()).collect()); + self + } + + /// Specifies custom EpochManagerHandle for each client. This allows us to + /// construct [`TestEnv`] with a custom implementation. + /// + /// The vector must have the same number of elements as they are clients + /// (one by default). If that does not hold, [`Self::build`] method will + /// panic. + pub fn epoch_managers(mut self, epoch_managers: Vec>) -> Self { + assert_eq!(epoch_managers.len(), self.clients.len()); + assert!(self.epoch_managers.is_none(), "Cannot override twice"); + assert!( + self.num_shards.is_none(), + "Cannot set both num_shards and epoch_managers at the same time" + ); + assert!( + self.shard_trackers.is_none(), + "Cannot override epoch_managers after shard_trackers" + ); + assert!(self.runtimes.is_none(), "Cannot override epoch_managers after runtimes"); + self.epoch_managers = + Some(epoch_managers.into_iter().map(|epoch_manager| epoch_manager.into()).collect()); + self + } + + /// Constructs real EpochManager implementations for each instance. + pub fn real_epoch_managers(self, genesis_config: &GenesisConfig) -> Self { + assert!( + self.num_shards.is_none(), + "Cannot set both num_shards and epoch_managers at the same time" + ); + let ret = self.ensure_stores(); + let epoch_managers = (0..ret.clients.len()) + .map(|i| { + EpochManager::new_arc_handle( + ret.stores.as_ref().unwrap()[i].clone(), + genesis_config, + ) + }) + .collect(); + ret.epoch_managers(epoch_managers) + } + + /// Internal impl to make sure EpochManagers are initialized. + fn ensure_epoch_managers(self) -> Self { + let mut ret = self.ensure_stores(); + if ret.epoch_managers.is_some() { + ret + } else { + let epoch_managers: Vec = (0..ret.clients.len()) + .map(|i| { + let vs = ValidatorSchedule::new_with_shards(ret.num_shards.unwrap_or(1)) + .block_producers_per_epoch(vec![ret.validators.clone()]); + MockEpochManager::new_with_validators( + ret.stores.as_ref().unwrap()[i].clone(), + vs, + ret.chain_genesis.epoch_length, + ) + .into() + }) + .collect(); + assert!( + ret.shard_trackers.is_none(), + "Cannot override shard_trackers without overriding epoch_managers" + ); + assert!( + ret.runtimes.is_none(), + "Cannot override runtimes without overriding epoch_managers" + ); + ret.epoch_managers = Some(epoch_managers); + ret + } + } + + /// Visible for extension methods in integration-tests. + pub fn internal_ensure_epoch_managers_for_nightshade_runtime( + self, + ) -> (Self, Vec, Vec, Vec>) { + let builder = self.ensure_epoch_managers(); + let default_home_dirs = + (0..builder.clients.len()).map(|_| PathBuf::from("../../../..")).collect_vec(); + let home_dirs = builder.home_dirs.clone().unwrap_or(default_home_dirs); + let stores = builder.stores.clone().unwrap(); + let epoch_managers = builder + .epoch_managers + .clone() + .unwrap() + .into_iter() + .map(|kind| match kind { + EpochManagerKind::Mock(_) => { + panic!("NightshadeRuntime can only be instantiated with EpochManagerHandle") + } + EpochManagerKind::Handle(handle) => handle, + }) + .collect(); + (builder, home_dirs, stores, epoch_managers) + } + + /// Specifies custom ShardTracker for each client. This allows us to + /// construct [`TestEnv`] with a custom implementation. + pub fn shard_trackers(mut self, shard_trackers: Vec) -> Self { + assert_eq!(shard_trackers.len(), self.clients.len()); + assert!(self.shard_trackers.is_none(), "Cannot override twice"); + self.shard_trackers = Some(shard_trackers); + self + } + + /// Constructs ShardTracker that tracks all shards for each instance. + /// + /// Note that in order to track *NO* shards, just don't override shard_trackers. + pub fn track_all_shards(self) -> Self { + let ret = self.ensure_epoch_managers(); + let shard_trackers = ret + .epoch_managers + .as_ref() + .unwrap() + .iter() + .map(|epoch_manager| { + ShardTracker::new(TrackedConfig::AllShards, epoch_manager.clone().into_adapter()) + }) + .collect(); + ret.shard_trackers(shard_trackers) + } + + /// Internal impl to make sure ShardTrackers are initialized. + fn ensure_shard_trackers(self) -> Self { + let ret = self.ensure_epoch_managers(); + if ret.shard_trackers.is_some() { + ret + } else { + let shard_trackers = ret + .epoch_managers + .as_ref() + .unwrap() + .iter() + .map(|epoch_manager| { + ShardTracker::new( + TrackedConfig::new_empty(), + epoch_manager.clone().into_adapter(), + ) + }) + .collect(); + ret.shard_trackers(shard_trackers) + } + } + + /// Specifies custom RuntimeAdapter for each client. This allows us to + /// construct [`TestEnv`] with a custom implementation. + pub fn runtimes(mut self, runtimes: Vec>) -> Self { + assert_eq!(runtimes.len(), self.clients.len()); + assert!(self.runtimes.is_none(), "Cannot override twice"); + self.runtimes = Some(runtimes); + self + } + + /// Internal impl to make sure runtimes are initialized. + fn ensure_runtimes(self) -> Self { + let ret = self.ensure_epoch_managers(); + if ret.runtimes.is_some() { + ret + } else { + let runtimes = (0..ret.clients.len()) + .map(|i| { + let epoch_manager = match &ret.epoch_managers.as_ref().unwrap()[i] { + EpochManagerKind::Mock(mock) => mock.as_ref(), + EpochManagerKind::Handle(_) => { + panic!( + "Can only default construct KeyValueRuntime with MockEpochManager" + ) + } + }; + KeyValueRuntime::new(ret.stores.as_ref().unwrap()[i].clone(), epoch_manager) + as Arc + }) + .collect(); + ret.runtimes(runtimes) + } + } + + /// Specifies custom network adaptors for each client. + /// + /// The vector must have the same number of elements as they are clients + /// (one by default). If that does not hold, [`Self::build`] method will + /// panic. + pub fn network_adapters(mut self, adapters: Vec>) -> Self { + self.network_adapters = Some(adapters); + self + } + + /// Internal impl to make sure network adapters are initialized. + fn ensure_network_adapters(self) -> Self { + if self.network_adapters.is_some() { + self + } else { + let num_clients = self.clients.len(); + self.network_adapters((0..num_clients).map(|_| Arc::new(Default::default())).collect()) + } + } + + pub fn num_shards(mut self, num_shards: NumShards) -> Self { + assert!( + self.epoch_managers.is_none(), + "Cannot set both num_shards and epoch_managers at the same time" + ); + self.num_shards = Some(num_shards); + self + } + + pub fn archive(mut self, archive: bool) -> Self { + self.archive = archive; + self + } + + pub fn save_trie_changes(mut self, save_trie_changes: bool) -> Self { + self.save_trie_changes = save_trie_changes; + self + } + + /// Constructs new `TestEnv` structure. + /// + /// If no clients were configured (either through count or vector) one + /// client is created. Similarly, if no validator seats were configured, + /// one seat is configured. + /// + /// Panics if `runtime_adapters` or `network_adapters` methods were used and + /// the length of the vectors passed to them did not equal number of + /// configured clients. + pub fn build(self) -> TestEnv { + self.ensure_shard_trackers().ensure_runtimes().ensure_network_adapters().build_impl() + } + + fn build_impl(self) -> TestEnv { + let chain_genesis = self.chain_genesis; + let clients = self.clients.clone(); + let num_clients = clients.len(); + let validators = self.validators; + let num_validators = validators.len(); + let seeds = self.seeds; + let epoch_managers = self.epoch_managers.unwrap(); + let shard_trackers = self.shard_trackers.unwrap(); + let runtimes = self.runtimes.unwrap(); + let network_adapters = self.network_adapters.unwrap(); + let client_adapters = (0..num_clients) + .map(|_| Arc::new(MockClientAdapterForShardsManager::default())) + .collect::>(); + let shards_manager_adapters = (0..num_clients) + .map(|i| { + let epoch_manager = epoch_managers[i].clone(); + let shard_tracker = shard_trackers[i].clone(); + let runtime = runtimes[i].clone(); + let network_adapter = network_adapters[i].clone(); + let client_adapter = client_adapters[i].clone(); + setup_synchronous_shards_manager( + Some(clients[i].clone()), + client_adapter.as_sender(), + network_adapter.into(), + epoch_manager.into_adapter(), + shard_tracker, + runtime, + &chain_genesis, + ) + }) + .collect::>(); + let clients = (0..num_clients) + .map(|i| { + let account_id = clients[i].clone(); + let network_adapter = network_adapters[i].clone(); + let shards_manager_adapter = shards_manager_adapters[i].clone(); + let epoch_manager = epoch_managers[i].clone(); + let shard_tracker = shard_trackers[i].clone(); + let runtime = runtimes[i].clone(); + let rng_seed = match seeds.get(&account_id) { + Some(seed) => *seed, + None => TEST_SEED, + }; + let make_state_snapshot_callback : Option = if self.add_state_snapshots { + let runtime = runtime.clone(); + let snapshot : MakeSnapshotCallback = Arc::new(move |prev_block_hash, shard_uids, block| { + tracing::info!(target: "state_snapshot", ?prev_block_hash, "make_snapshot_callback"); + runtime.get_tries().make_state_snapshot(&prev_block_hash, &shard_uids, &block).unwrap(); + }); + Some(snapshot) + } else { + None + }; + setup_client_with_runtime( + u64::try_from(num_validators).unwrap(), + Some(account_id), + false, + network_adapter.into(), + shards_manager_adapter, + chain_genesis.clone(), + epoch_manager.into_adapter(), + shard_tracker, + runtime, + rng_seed, + self.archive, + self.save_trie_changes, + make_state_snapshot_callback, + ) + }) + .collect(); + + TestEnv { + chain_genesis, + validators, + network_adapters, + client_adapters, + shards_manager_adapters, + clients, + account_to_client_index: self + .clients + .into_iter() + .enumerate() + .map(|(index, client)| (client, index)) + .collect(), + paused_blocks: Default::default(), + seeds, + archive: self.archive, + save_trie_changes: self.save_trie_changes, + } + } + + fn make_accounts(count: usize) -> Vec { + (0..count).map(|i| format!("test{}", i).parse().unwrap()).collect() + } + + pub fn use_state_snapshots(mut self) -> Self { + self.add_state_snapshots = true; + self + } +} diff --git a/integration-tests/src/tests/client/benchmarks.rs b/integration-tests/src/tests/client/benchmarks.rs index 80113e435b3..d3d0f096111 100644 --- a/integration-tests/src/tests/client/benchmarks.rs +++ b/integration-tests/src/tests/client/benchmarks.rs @@ -4,7 +4,6 @@ //! we want to test here are pretty heavy and its enough to run them once and //! note the wall-clock time. -use crate::tests::client::utils::TestEnvNightshadeSetupExt; use borsh::BorshSerialize; use near_chain::ChainGenesis; use near_chain_configs::Genesis; @@ -13,6 +12,7 @@ use near_client::ProcessTxResponse; use near_crypto::{InMemorySigner, KeyType}; use near_primitives::transaction::{Action, DeployContractAction, SignedTransaction}; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; /// How long does it take to produce a large chunk? /// diff --git a/integration-tests/src/tests/client/challenges.rs b/integration-tests/src/tests/client/challenges.rs index 5493ea85fb5..d1638ed66ea 100644 --- a/integration-tests/src/tests/client/challenges.rs +++ b/integration-tests/src/tests/client/challenges.rs @@ -31,10 +31,9 @@ use near_primitives::validator_signer::ValidatorSigner; use near_primitives::version::PROTOCOL_VERSION; use near_store::Trie; use nearcore::config::{GenesisExt, FISHERMEN_THRESHOLD}; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use std::sync::Arc; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; - /// Check that block containing a challenge is rejected. /// TODO (#2445): Enable challenges when they are working correctly. #[test] diff --git a/integration-tests/src/tests/client/cold_storage.rs b/integration-tests/src/tests/client/cold_storage.rs index c8733f57f72..e62538dd0b2 100644 --- a/integration-tests/src/tests/client/cold_storage.rs +++ b/integration-tests/src/tests/client/cold_storage.rs @@ -21,13 +21,12 @@ use near_store::metadata::DB_VERSION; use near_store::test_utils::create_test_node_storage_with_cold; use near_store::{DBCol, Store, COLD_HEAD_KEY, HEAD_KEY}; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use nearcore::{cold_storage::spawn_cold_store_loop, NearConfig}; use std::collections::HashSet; use std::str::FromStr; use strum::IntoEnumIterator; -use super::utils::TestEnvNightshadeSetupExt; - fn check_key(first_store: &Store, second_store: &Store, col: DBCol, key: &[u8]) { let pretty_key = near_fmt::StorageKey(key); tracing::debug!("Checking {:?} {:?}", col, pretty_key); diff --git a/integration-tests/src/tests/client/epoch_sync.rs b/integration-tests/src/tests/client/epoch_sync.rs index 2d684b041b9..d2d62f1f615 100644 --- a/integration-tests/src/tests/client/epoch_sync.rs +++ b/integration-tests/src/tests/client/epoch_sync.rs @@ -1,4 +1,3 @@ -use super::utils::TestEnvNightshadeSetupExt; use near_chain::{ChainGenesis, Provenance}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; @@ -11,6 +10,7 @@ use near_primitives::transaction::{ use near_primitives_core::hash::CryptoHash; use near_primitives_core::types::BlockHeight; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; fn generate_transactions(last_hash: &CryptoHash, h: BlockHeight) -> Vec { let mut txs = vec![]; diff --git a/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs b/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs index eec91272d3b..2e2db28d05e 100644 --- a/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs +++ b/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs @@ -1,5 +1,4 @@ use crate::tests::client::process_blocks::produce_blocks_from_height; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; use assert_matches::assert_matches; use near_async::messaging::CanSend; use near_chain::chain::NUM_ORPHAN_ANCESTORS_CHECK; @@ -12,7 +11,6 @@ use near_crypto::{InMemorySigner, KeyType, Signer}; use near_network::shards_manager::ShardsManagerRequestFromNetwork; use near_network::types::{NetworkRequests, PeerManagerMessageRequest}; use near_o11y::testonly::init_test_logger; - use near_primitives::account::AccessKey; use near_primitives::errors::InvalidTxError; use near_primitives::runtime::config_store::RuntimeConfigStore; @@ -23,6 +21,7 @@ use near_primitives::types::{AccountId, BlockHeight}; use near_primitives::version::{ProtocolFeature, ProtocolVersion}; use near_primitives::views::FinalExecutionStatus; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use nearcore::NEAR_BASE; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; diff --git a/integration-tests/src/tests/client/features/account_id_in_function_call_permission.rs b/integration-tests/src/tests/client/features/account_id_in_function_call_permission.rs index 564095170ea..1b23ba06933 100644 --- a/integration-tests/src/tests/client/features/account_id_in_function_call_permission.rs +++ b/integration-tests/src/tests/client/features/account_id_in_function_call_permission.rs @@ -9,8 +9,7 @@ use near_primitives::hash::CryptoHash; use near_primitives::runtime::config_store::RuntimeConfigStore; use near_primitives::transaction::{Action, AddKeyAction, Transaction}; use nearcore::config::GenesisExt; - -use crate::tests::client::utils::TestEnvNightshadeSetupExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; #[test] fn test_account_id_in_function_call_permission_upgrade() { diff --git a/integration-tests/src/tests/client/features/adversarial_behaviors.rs b/integration-tests/src/tests/client/features/adversarial_behaviors.rs index ad333acc163..4468efd71dc 100644 --- a/integration-tests/src/tests/client/features/adversarial_behaviors.rs +++ b/integration-tests/src/tests/client/features/adversarial_behaviors.rs @@ -14,10 +14,9 @@ use near_primitives::{ types::{AccountId, EpochId, ShardId}, }; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use tracing::log::debug; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; - struct AdversarialBehaviorTestData { num_validators: usize, env: TestEnv, diff --git a/integration-tests/src/tests/client/features/chunk_nodes_cache.rs b/integration-tests/src/tests/client/features/chunk_nodes_cache.rs index bd8ee2f8975..ea42af5eaaf 100644 --- a/integration-tests/src/tests/client/features/chunk_nodes_cache.rs +++ b/integration-tests/src/tests/client/features/chunk_nodes_cache.rs @@ -1,5 +1,4 @@ use crate::tests::client::process_blocks::{deploy_test_contract, set_block_protocol_version}; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; use assert_matches::assert_matches; use near_chain::{ChainGenesis, Provenance}; use near_chain_configs::Genesis; @@ -17,6 +16,7 @@ use near_primitives::types::{BlockHeightDelta, Gas, TrieNodesCount}; use near_primitives::version::{ProtocolFeature, ProtocolVersion}; use near_primitives::views::FinalExecutionStatus; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; fn process_transaction( env: &mut TestEnv, diff --git a/integration-tests/src/tests/client/features/delegate_action.rs b/integration-tests/src/tests/client/features/delegate_action.rs index 44e5e6653e2..abb5d73feea 100644 --- a/integration-tests/src/tests/client/features/delegate_action.rs +++ b/integration-tests/src/tests/client/features/delegate_action.rs @@ -4,7 +4,6 @@ //! This is the module for its integration tests. use crate::node::{Node, RuntimeNode}; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; use crate::tests::standard_cases::fee_helper; use near_chain::ChainGenesis; use near_chain_configs::Genesis; @@ -28,6 +27,7 @@ use near_primitives::views::{ }; use near_test_contracts::{ft_contract, smallest_rs_contract}; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use nearcore::NEAR_BASE; use testlib::runtime_utils::{ add_account_with_access_key, add_contract, add_test_contract, alice_account, bob_account, diff --git a/integration-tests/src/tests/client/features/fix_contract_loading_cost.rs b/integration-tests/src/tests/client/features/fix_contract_loading_cost.rs index b1b77120fab..f860418c264 100644 --- a/integration-tests/src/tests/client/features/fix_contract_loading_cost.rs +++ b/integration-tests/src/tests/client/features/fix_contract_loading_cost.rs @@ -1,5 +1,4 @@ use super::super::process_blocks::deploy_test_contract; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; use assert_matches::assert_matches; use near_chain::ChainGenesis; use near_chain_configs::Genesis; @@ -7,6 +6,7 @@ use near_client::test_utils::TestEnv; use near_primitives::types::{AccountId, BlockHeight}; use near_primitives::views::FinalExecutionStatus; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; /// Create a `TestEnv` with an account and a contract deployed to that account. fn prepare_env_with_contract( diff --git a/integration-tests/src/tests/client/features/fix_storage_usage.rs b/integration-tests/src/tests/client/features/fix_storage_usage.rs index 0315be791c6..caa67405b4a 100644 --- a/integration-tests/src/tests/client/features/fix_storage_usage.rs +++ b/integration-tests/src/tests/client/features/fix_storage_usage.rs @@ -7,9 +7,9 @@ use near_primitives::version::ProtocolFeature; use near_primitives::{trie_key::TrieKey, types::AccountId}; use near_store::{ShardUId, TrieUpdate}; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use crate::tests::client::process_blocks::set_block_protocol_version; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; fn process_blocks_with_storage_usage_fix( chain_id: String, diff --git a/integration-tests/src/tests/client/features/flat_storage.rs b/integration-tests/src/tests/client/features/flat_storage.rs index 7fa1cffb9d1..65ad4f7925f 100644 --- a/integration-tests/src/tests/client/features/flat_storage.rs +++ b/integration-tests/src/tests/client/features/flat_storage.rs @@ -1,5 +1,4 @@ use crate::tests::client::process_blocks::deploy_test_contract_with_protocol_version; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; use near_chain::ChainGenesis; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; @@ -12,6 +11,7 @@ use near_primitives_core::config::ExtCosts; use near_primitives_core::hash::CryptoHash; use near_primitives_core::types::Gas; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; /// Check that after flat storage upgrade: /// - value read from contract is the same; diff --git a/integration-tests/src/tests/client/features/increase_deployment_cost.rs b/integration-tests/src/tests/client/features/increase_deployment_cost.rs index 1eeedf65005..b8b3eae89b7 100644 --- a/integration-tests/src/tests/client/features/increase_deployment_cost.rs +++ b/integration-tests/src/tests/client/features/increase_deployment_cost.rs @@ -10,8 +10,7 @@ use near_primitives::views::FinalExecutionStatus; use near_primitives_core::version::PROTOCOL_VERSION; use near_vm_runner::VMKind; use nearcore::config::GenesisExt; - -use crate::tests::client::utils::TestEnvNightshadeSetupExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; /// Tests if the cost of deployment is higher after the protocol update 53 #[test] diff --git a/integration-tests/src/tests/client/features/increase_storage_compute_cost.rs b/integration-tests/src/tests/client/features/increase_storage_compute_cost.rs index 922dbcbc757..a13138776e9 100644 --- a/integration-tests/src/tests/client/features/increase_storage_compute_cost.rs +++ b/integration-tests/src/tests/client/features/increase_storage_compute_cost.rs @@ -24,10 +24,9 @@ use near_primitives::transaction::{ use near_primitives::types::AccountId; use near_primitives::version::ProtocolFeature; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use node_runtime::config::RuntimeConfig; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; - /// Tracked in /~https://github.com/near/nearcore/issues/8938 const INCREASED_STORAGE_COSTS_PROTOCOL_VERSION: u32 = 61; diff --git a/integration-tests/src/tests/client/features/limit_contract_functions_number.rs b/integration-tests/src/tests/client/features/limit_contract_functions_number.rs index 29a7ad5dacd..de0794e3e18 100644 --- a/integration-tests/src/tests/client/features/limit_contract_functions_number.rs +++ b/integration-tests/src/tests/client/features/limit_contract_functions_number.rs @@ -1,5 +1,4 @@ use crate::tests::client::process_blocks::deploy_test_contract; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; use assert_matches::assert_matches; use near_chain::ChainGenesis; use near_chain_configs::Genesis; @@ -11,6 +10,7 @@ use near_primitives::runtime::config_store::RuntimeConfigStore; use near_primitives::version::ProtocolFeature; use near_primitives::views::FinalExecutionStatus; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; fn verify_contract_limits_upgrade( feature: ProtocolFeature, diff --git a/integration-tests/src/tests/client/features/lower_storage_key_limit.rs b/integration-tests/src/tests/client/features/lower_storage_key_limit.rs index 47e2dcdafd0..789dad17bab 100644 --- a/integration-tests/src/tests/client/features/lower_storage_key_limit.rs +++ b/integration-tests/src/tests/client/features/lower_storage_key_limit.rs @@ -12,11 +12,11 @@ use near_primitives::transaction::{Action, FunctionCallAction, Transaction}; use near_primitives::types::BlockHeight; use near_primitives::views::FinalExecutionStatus; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use crate::tests::client::process_blocks::{ deploy_test_contract_with_protocol_version, produce_blocks_from_height_with_protocol_version, }; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; /// Check correctness of the protocol upgrade and ability to write 2 KB keys. #[test] diff --git a/integration-tests/src/tests/client/features/nearvm.rs b/integration-tests/src/tests/client/features/nearvm.rs index 47703eb403f..1c242cc471d 100644 --- a/integration-tests/src/tests/client/features/nearvm.rs +++ b/integration-tests/src/tests/client/features/nearvm.rs @@ -1,7 +1,6 @@ #![cfg_attr(not(feature = "nightly"), allow(unused_imports))] use crate::tests::client::process_blocks::deploy_test_contract; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; use near_chain::ChainGenesis; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; @@ -11,6 +10,7 @@ use near_primitives::hash::CryptoHash; use near_primitives::runtime::config_store::RuntimeConfigStore; use near_primitives::transaction::{Action, FunctionCallAction, Transaction}; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; #[cfg_attr(all(target_arch = "aarch64", target_vendor = "apple"), ignore)] #[test] diff --git a/integration-tests/src/tests/client/features/restore_receipts_after_fix_apply_chunks.rs b/integration-tests/src/tests/client/features/restore_receipts_after_fix_apply_chunks.rs index 58a23460e97..444162f8d07 100644 --- a/integration-tests/src/tests/client/features/restore_receipts_after_fix_apply_chunks.rs +++ b/integration-tests/src/tests/client/features/restore_receipts_after_fix_apply_chunks.rs @@ -1,5 +1,4 @@ use crate::tests::client::process_blocks::set_block_protocol_version; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; use near_chain::{ChainGenesis, Provenance}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; @@ -10,6 +9,7 @@ use near_primitives::types::BlockHeight; use near_primitives::version::ProtocolFeature; use nearcore::config::GenesisExt; use nearcore::migrations::load_migration_data; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use std::collections::HashSet; const EPOCH_LENGTH: u64 = 5; diff --git a/integration-tests/src/tests/client/features/zero_balance_account.rs b/integration-tests/src/tests/client/features/zero_balance_account.rs index 7721d7de046..7f82b703c77 100644 --- a/integration-tests/src/tests/client/features/zero_balance_account.rs +++ b/integration-tests/src/tests/client/features/zero_balance_account.rs @@ -19,11 +19,9 @@ use near_primitives::transaction::{Action, AddKeyAction, DeleteKeyAction, Signed use near_primitives::version::{ProtocolFeature, PROTOCOL_VERSION}; use near_primitives::views::{FinalExecutionStatus, QueryRequest, QueryResponseKind}; use nearcore::config::GenesisExt; - +use nearcore::test_utils::TestEnvNightshadeSetupExt; use node_runtime::ZERO_BALANCE_ACCOUNT_STORAGE_LIMIT; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; - /// Assert that an account exists and has zero balance fn assert_zero_balance_account(env: &mut TestEnv, account_id: &AccountId) { let head = env.clients[0].chain.head().unwrap(); diff --git a/integration-tests/src/tests/client/flat_storage.rs b/integration-tests/src/tests/client/flat_storage.rs index fa96cadc4cc..eef269ce94b 100644 --- a/integration-tests/src/tests/client/flat_storage.rs +++ b/integration-tests/src/tests/client/flat_storage.rs @@ -20,12 +20,11 @@ use near_store::test_utils::create_test_store; use near_store::{KeyLookupMode, Store, TrieTraversalItem}; use near_vm_runner::logic::TrieNodesCount; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use std::str::FromStr; use std::thread; use std::time::Duration; -use super::utils::TestEnvNightshadeSetupExt; - /// Height on which we start flat storage background creation. const START_HEIGHT: BlockHeight = 7; diff --git a/integration-tests/src/tests/client/mod.rs b/integration-tests/src/tests/client/mod.rs index c97f2c15c0a..6472636cc67 100644 --- a/integration-tests/src/tests/client/mod.rs +++ b/integration-tests/src/tests/client/mod.rs @@ -15,4 +15,3 @@ mod state_dump; mod state_snapshot; mod sync_state_nodes; mod undo_block; -mod utils; diff --git a/integration-tests/src/tests/client/process_blocks.rs b/integration-tests/src/tests/client/process_blocks.rs index c3e143cf774..da1eef236bf 100644 --- a/integration-tests/src/tests/client/process_blocks.rs +++ b/integration-tests/src/tests/client/process_blocks.rs @@ -79,12 +79,11 @@ use near_store::test_utils::create_test_store; use near_store::NodeStorage; use near_store::{get, DBCol, TrieChanges}; use nearcore::config::{GenesisExt, TESTING_INIT_BALANCE, TESTING_INIT_STAKE}; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use nearcore::NEAR_BASE; use rand::prelude::StdRng; use rand::{Rng, SeedableRng}; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; - pub fn set_block_protocol_version( block: &mut Block, block_producer: AccountId, diff --git a/integration-tests/src/tests/client/runtimes.rs b/integration-tests/src/tests/client/runtimes.rs index 3e3211d0917..a8bb9c1d5b9 100644 --- a/integration-tests/src/tests/client/runtimes.rs +++ b/integration-tests/src/tests/client/runtimes.rs @@ -13,11 +13,10 @@ use near_primitives::network::PeerId; use near_primitives::test_utils::create_test_signer; use near_primitives::validator_signer::InMemoryValidatorSigner; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use std::collections::HashMap; use std::sync::Arc; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; - #[test] fn test_pending_approvals() { let genesis = Genesis::test(vec!["test0".parse().unwrap(), "test1".parse().unwrap()], 1); diff --git a/integration-tests/src/tests/client/sandbox.rs b/integration-tests/src/tests/client/sandbox.rs index 1c80dfd2bc5..73b1bfd66fa 100644 --- a/integration-tests/src/tests/client/sandbox.rs +++ b/integration-tests/src/tests/client/sandbox.rs @@ -1,4 +1,3 @@ -use super::utils::TestEnvNightshadeSetupExt; use near_chain::{ChainGenesis, Provenance}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; @@ -12,6 +11,7 @@ use near_primitives::transaction::{ }; use near_primitives::types::{AccountId, BlockHeight, Nonce}; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; fn test_setup() -> (TestEnv, InMemorySigner) { let epoch_length = 5; diff --git a/integration-tests/src/tests/client/sharding_upgrade.rs b/integration-tests/src/tests/client/sharding_upgrade.rs index bc254f98c7b..8d31300ec97 100644 --- a/integration-tests/src/tests/client/sharding_upgrade.rs +++ b/integration-tests/src/tests/client/sharding_upgrade.rs @@ -1,20 +1,17 @@ -use borsh::BorshSerialize; -use near_client::{Client, ProcessTxResponse}; -use near_primitives::epoch_manager::{AllEpochConfig, EpochConfig}; -use near_primitives_core::num_rational::Rational32; -use rand::rngs::StdRng; - use crate::tests::client::process_blocks::set_block_protocol_version; use assert_matches::assert_matches; +use borsh::BorshSerialize; use near_chain::near_chain_primitives::Error; use near_chain::test_utils::wait_for_all_blocks_in_processing; use near_chain::{ChainGenesis, ChainStoreAccess, Provenance}; use near_chain_configs::Genesis; use near_client::test_utils::{run_catchup, TestEnv}; +use near_client::{Client, ProcessTxResponse}; use near_crypto::{InMemorySigner, KeyType, Signer}; use near_o11y::testonly::init_test_logger; use near_primitives::account::id::AccountId; use near_primitives::block::{Block, Tip}; +use near_primitives::epoch_manager::{AllEpochConfig, EpochConfig}; use near_primitives::hash::CryptoHash; use near_primitives::serialize::to_base64; use near_primitives::shard_layout::{account_id_to_shard_id, account_id_to_shard_uid}; @@ -27,17 +24,18 @@ use near_primitives::version::ProtocolFeature; #[cfg(not(feature = "protocol_feature_simple_nightshade_v2"))] use near_primitives::version::PROTOCOL_VERSION; use near_primitives::views::{ExecutionStatusView, FinalExecutionStatus, QueryRequest}; +use near_primitives_core::num_rational::Rational32; use near_store::test_utils::{gen_account, gen_unique_accounts}; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use nearcore::NEAR_BASE; +use rand::rngs::StdRng; use rand::seq::{IteratorRandom, SliceRandom}; use rand::{Rng, SeedableRng}; use std::collections::{BTreeMap, HashMap, HashSet}; use std::sync::Arc; use tracing::debug; -use super::utils::TestEnvNightshadeSetupExt; - const SIMPLE_NIGHTSHADE_PROTOCOL_VERSION: ProtocolVersion = ProtocolFeature::SimpleNightshade.protocol_version(); diff --git a/integration-tests/src/tests/client/state_dump.rs b/integration-tests/src/tests/client/state_dump.rs index 53e1a3e55b7..5b102fcb95c 100644 --- a/integration-tests/src/tests/client/state_dump.rs +++ b/integration-tests/src/tests/client/state_dump.rs @@ -1,4 +1,3 @@ -use super::utils::TestEnvNightshadeSetupExt; use assert_matches::assert_matches; use borsh::BorshSerialize; use near_chain::near_chain_primitives::error::QueryError; @@ -25,6 +24,7 @@ use near_store::DBCol; use near_store::Store; use nearcore::config::GenesisExt; use nearcore::state_sync::spawn_state_sync_dump; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use nearcore::NEAR_BASE; use std::ops::ControlFlow; use std::sync::Arc; diff --git a/integration-tests/src/tests/client/state_snapshot.rs b/integration-tests/src/tests/client/state_snapshot.rs index 8ab3b1b5665..94a1ed92a6a 100644 --- a/integration-tests/src/tests/client/state_snapshot.rs +++ b/integration-tests/src/tests/client/state_snapshot.rs @@ -15,11 +15,10 @@ use near_store::{ }; use near_store::{NodeStorage, Store}; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use nearcore::NEAR_BASE; use std::path::PathBuf; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; - struct StateSnaptshotTestEnv { home_dir: PathBuf, hot_store_path: PathBuf, diff --git a/integration-tests/src/tests/client/sync_state_nodes.rs b/integration-tests/src/tests/client/sync_state_nodes.rs index a5f345f545b..39b73a27156 100644 --- a/integration-tests/src/tests/client/sync_state_nodes.rs +++ b/integration-tests/src/tests/client/sync_state_nodes.rs @@ -1,5 +1,4 @@ use crate::test_helpers::heavy_test; -use crate::tests::client::utils::TestEnvNightshadeSetupExt; use actix::{Actor, System}; use futures::{future, FutureExt}; use near_actix_test_utils::run_actix; @@ -21,6 +20,7 @@ use near_primitives::transaction::SignedTransaction; use near_primitives::utils::MaybeValidated; use near_primitives_core::types::ShardId; use near_store::DBCol; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use nearcore::{config::GenesisExt, load_test_config, start_with_config}; use std::ops::ControlFlow; use std::sync::{Arc, RwLock}; diff --git a/integration-tests/src/tests/client/undo_block.rs b/integration-tests/src/tests/client/undo_block.rs index 5f6218dd69b..87ef286e99c 100644 --- a/integration-tests/src/tests/client/undo_block.rs +++ b/integration-tests/src/tests/client/undo_block.rs @@ -7,10 +7,9 @@ use near_store::test_utils::create_test_store; use near_store::Store; use near_undo_block::undo_block; use nearcore::config::GenesisExt; +use nearcore::test_utils::TestEnvNightshadeSetupExt; use std::sync::Arc; -use super::utils::TestEnvNightshadeSetupExt; - /// Setup environment with one Near client for testing. fn setup_env(genesis: &Genesis, store: Store) -> (TestEnv, Arc) { let chain_genesis = ChainGenesis::new(genesis); diff --git a/nearcore/src/lib.rs b/nearcore/src/lib.rs index ffdad6eb12d..f2045ad335f 100644 --- a/nearcore/src/lib.rs +++ b/nearcore/src/lib.rs @@ -45,6 +45,7 @@ mod metrics; pub mod migrations; mod runtime; pub mod state_sync; +pub mod test_utils; pub fn get_default_home() -> PathBuf { if let Ok(near_home) = std::env::var("NEAR_HOME") { diff --git a/integration-tests/src/tests/client/utils.rs b/nearcore/src/test_utils.rs similarity index 98% rename from integration-tests/src/tests/client/utils.rs rename to nearcore/src/test_utils.rs index f3693b49716..8023bfcc5dc 100644 --- a/integration-tests/src/tests/client/utils.rs +++ b/nearcore/src/test_utils.rs @@ -3,9 +3,10 @@ use near_chain_configs::Genesis; use near_client::test_utils::TestEnvBuilder; use near_primitives::runtime::config_store::RuntimeConfigStore; use near_store::genesis::initialize_genesis_state; -use nearcore::NightshadeRuntime; use std::sync::Arc; +use crate::NightshadeRuntime; + pub trait TestEnvNightshadeSetupExt { fn nightshade_runtimes(self, genesis: &Genesis) -> Self; fn nightshade_runtimes_with_runtime_config_store(