diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index 9ef79c67b98..03d846747b8 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -3420,7 +3420,7 @@ impl Chain { let is_new_chunk = chunk_header.is_new_chunk(block_height); if let Some(result) = self.apply_chunk_results_cache.peek(&cached_shard_update_key) { - info!(target: "chain", ?shard_id, ?cached_shard_update_key, "Using cached ShardUpdate result"); + debug!(target: "chain", ?shard_id, ?cached_shard_update_key, "Using cached ShardUpdate result"); let result = result.clone(); return Ok(Some(( shard_id, @@ -3428,7 +3428,7 @@ impl Chain { Box::new(move |_| -> Result { Ok(result) }), ))); } - info!(target: "chain", ?shard_id, ?cached_shard_update_key, "Creating ShardUpdate job"); + debug!(target: "chain", ?shard_id, ?cached_shard_update_key, "Creating ShardUpdate job"); let shard_update_reason = if is_new_chunk { // Validate new chunk and collect incoming receipts for it. @@ -3616,7 +3616,9 @@ impl Chain { self.epoch_manager.is_next_block_epoch_start(&head.last_block_hash)?; let will_shard_layout_change = self.epoch_manager.will_shard_layout_change(&head.last_block_hash)?; - let protocol_version = self.epoch_manager.get_epoch_protocol_version(&head.epoch_id)?; + let next_block_epoch = + self.epoch_manager.get_epoch_id_from_prev_block(&head.last_block_hash)?; + let protocol_version = self.epoch_manager.get_epoch_protocol_version(&next_block_epoch)?; let tries = self.runtime_adapter.get_tries(); let snapshot_config = tries.state_snapshot_config(); diff --git a/chain/jsonrpc/src/lib.rs b/chain/jsonrpc/src/lib.rs index 46a78c60452..d9656221c7c 100644 --- a/chain/jsonrpc/src/lib.rs +++ b/chain/jsonrpc/src/lib.rs @@ -1,7 +1,7 @@ #![doc = include_str!("../README.md")] use actix_cors::Cors; -use actix_web::http::header; +use actix_web::http::header::{self, ContentType}; use actix_web::HttpRequest; use actix_web::{get, http, middleware, web, App, Error as HttpError, HttpResponse, HttpServer}; pub use api::{RpcFrom, RpcInto, RpcRequest}; @@ -1496,7 +1496,9 @@ pub async fn prometheus_handler() -> Result { encoder.encode(&prometheus::gather(), &mut buffer).unwrap(); match String::from_utf8(buffer) { - Ok(text) => Ok(HttpResponse::Ok().body(text)), + Ok(text) => Ok(HttpResponse::Ok() + .content_type(ContentType("text/plain".parse().unwrap())) + .body(text)), Err(_) => Ok(HttpResponse::ServiceUnavailable().finish()), } } diff --git a/core/parameters/res/runtime_configs/129.yaml b/core/parameters/res/runtime_configs/129.yaml index c60af6224e9..6be15ee4218 100644 --- a/core/parameters/res/runtime_configs/129.yaml +++ b/core/parameters/res/runtime_configs/129.yaml @@ -1 +1,26 @@ fix_contract_loading_cost: { old: false, new: true } +action_deploy_global_contract: { + old: { + send_sir: 999_999_999_999_999, + send_not_sir: 999_999_999_999_999, + execution: 999_999_999_999_999, + }, + new: { + send_sir: 184_765_750_000, + send_not_sir: 184_765_750_000, + execution: 184_765_750_000, + } +} +action_deploy_global_contract_per_byte: { + old: { + send_sir: 999_999_999_999_999, + send_not_sir: 999_999_999_999_999, + execution: 999_999_999_999_999, + }, + new: { + send_sir: 6_812_999, + send_not_sir: 6_812_999, + execution: 70_000_000, + } +} +global_contract_storage_amount_per_byte: { old: 999_999_999_999_999_999_999_999_999, new: 100_000_000_000_000_000_000 } \ No newline at end of file diff --git a/core/parameters/res/runtime_configs/parameters.snap b/core/parameters/res/runtime_configs/parameters.snap index 09375bd925f..84170eb02ee 100644 --- a/core/parameters/res/runtime_configs/parameters.snap +++ b/core/parameters/res/runtime_configs/parameters.snap @@ -223,3 +223,12 @@ max_shard_bandwidth 4_500_000 max_single_grant 4_194_304 max_allowance 4_500_000 max_base_bandwidth 100_000 +action_deploy_global_contract +- send_sir: 999_999_999_999_999 +- send_not_sir: 999_999_999_999_999 +- execution: 999_999_999_999_999 +action_deploy_global_contract_per_byte +- send_sir: 999_999_999_999_999 +- send_not_sir: 999_999_999_999_999 +- execution: 999_999_999_999_999 +global_contract_storage_amount_per_byte 999999999999999999999999999 diff --git a/core/parameters/res/runtime_configs/parameters.yaml b/core/parameters/res/runtime_configs/parameters.yaml index 0f4a25d3929..22cc640f0dd 100644 --- a/core/parameters/res/runtime_configs/parameters.yaml +++ b/core/parameters/res/runtime_configs/parameters.yaml @@ -277,3 +277,16 @@ max_shard_bandwidth: 999_999_999_999_999 max_single_grant: 999_999_999_999_999 max_allowance: 999_999_999_999_999 max_base_bandwidth: 999_999_999_999_999 + +# Global contracts +action_deploy_global_contract: { + send_sir: 999_999_999_999_999, + send_not_sir: 999_999_999_999_999, + execution: 999_999_999_999_999, +} +action_deploy_global_contract_per_byte: { + send_sir: 999_999_999_999_999, + send_not_sir: 999_999_999_999_999, + execution: 999_999_999_999_999, +} +global_contract_storage_amount_per_byte: 999_999_999_999_999_999_999_999_999 \ No newline at end of file diff --git a/core/parameters/res/runtime_configs/parameters_testnet.yaml b/core/parameters/res/runtime_configs/parameters_testnet.yaml index 2f07aaf8f2d..159dd7faa89 100644 --- a/core/parameters/res/runtime_configs/parameters_testnet.yaml +++ b/core/parameters/res/runtime_configs/parameters_testnet.yaml @@ -267,3 +267,16 @@ max_shard_bandwidth: 999_999_999_999_999 max_single_grant: 999_999_999_999_999 max_allowance: 999_999_999_999_999 max_base_bandwidth: 999_999_999_999_999 + +# Global contracts +action_deploy_global_contract: { + send_sir: 999_999_999_999_999, + send_not_sir: 999_999_999_999_999, + execution: 999_999_999_999_999, +} +action_deploy_global_contract_per_byte: { + send_sir: 999_999_999_999_999, + send_not_sir: 999_999_999_999_999, + execution: 999_999_999_999_999, +} +global_contract_storage_amount_per_byte: 999_999_999_999_999_999_999_999_999 \ No newline at end of file diff --git a/core/parameters/src/cost.rs b/core/parameters/src/cost.rs index fb3db1ee3eb..2ef2b585983 100644 --- a/core/parameters/src/cost.rs +++ b/core/parameters/src/cost.rs @@ -307,6 +307,8 @@ pub enum ActionCosts { new_data_receipt_base = 13, new_data_receipt_byte = 14, delegate = 15, + deploy_global_contract_base = 16, + deploy_global_contract_byte = 17, } impl ExtCosts { @@ -438,6 +440,8 @@ pub struct StorageUsageConfig { pub num_bytes_account: u64, /// Additional number of bytes for a k/v record pub num_extra_bytes_record: u64, + /// Amount of yN burned per byte of deployed Global Contract code. + pub global_contract_storage_amount_per_byte: Balance, } impl RuntimeFeesConfig { @@ -532,6 +536,16 @@ impl RuntimeFeesConfig { send_not_sir: 200_000_000_000, execution: 200_000_000_000, }, + ActionCosts::deploy_global_contract_base => Fee { + send_sir: 184_765_750_000, + send_not_sir: 184_765_750_000, + execution: 184_765_750_000, + }, + ActionCosts::deploy_global_contract_byte => Fee { + send_sir: 6_812_999, + send_not_sir: 6_812_999, + execution: 70_000_000, + }, }, } } @@ -563,11 +577,17 @@ impl StorageUsageConfig { num_bytes_account: 100, num_extra_bytes_record: 40, storage_amount_per_byte: 909 * 100_000_000_000_000_000, + global_contract_storage_amount_per_byte: 100_000_000_000_000_000_000, } } pub(crate) fn free() -> StorageUsageConfig { - Self { num_bytes_account: 0, num_extra_bytes_record: 0, storage_amount_per_byte: 0 } + Self { + num_bytes_account: 0, + num_extra_bytes_record: 0, + storage_amount_per_byte: 0, + global_contract_storage_amount_per_byte: 0, + } } } diff --git a/core/parameters/src/parameter.rs b/core/parameters/src/parameter.rs index 997b27fec41..1b762c037e8 100644 --- a/core/parameters/src/parameter.rs +++ b/core/parameters/src/parameter.rs @@ -229,6 +229,11 @@ pub enum Parameter { MaxSingleGrant, MaxAllowance, MaxBaseBandwidth, + + // Global contracts + ActionDeployGlobalContract, + ActionDeployGlobalContractPerByte, + GlobalContractStorageAmountPerByte, } #[derive( @@ -261,6 +266,8 @@ pub enum FeeParameter { ActionAddFunctionCallKeyPerByte, ActionDeleteKey, ActionDelegate, + ActionDeployGlobalContract, + ActionDeployGlobalContractPerByte, } impl Parameter { @@ -324,6 +331,8 @@ impl From for FeeParameter { ActionCosts::new_action_receipt => Self::ActionReceiptCreation, ActionCosts::new_data_receipt_base => Self::DataReceiptCreationBase, ActionCosts::new_data_receipt_byte => Self::DataReceiptCreationPerByte, + ActionCosts::deploy_global_contract_base => Self::ActionDeployGlobalContract, + ActionCosts::deploy_global_contract_byte => Self::ActionDeployGlobalContractPerByte, } } } diff --git a/core/parameters/src/parameter_table.rs b/core/parameters/src/parameter_table.rs index 8753b838189..9c09f7e8531 100644 --- a/core/parameters/src/parameter_table.rs +++ b/core/parameters/src/parameter_table.rs @@ -302,6 +302,8 @@ impl TryFrom<&ParameterTable> for RuntimeConfig { storage_amount_per_byte: params.get(Parameter::StorageAmountPerByte)?, num_bytes_account: params.get(Parameter::StorageNumBytesAccount)?, num_extra_bytes_record: params.get(Parameter::StorageNumExtraBytesRecord)?, + global_contract_storage_amount_per_byte: params + .get(Parameter::GlobalContractStorageAmountPerByte)?, }, }), wasm_config: Arc::new(Config { diff --git a/core/primitives-core/src/account.rs b/core/primitives-core/src/account.rs index e2de5a68050..6f66ff31944 100644 --- a/core/primitives-core/src/account.rs +++ b/core/primitives-core/src/account.rs @@ -5,6 +5,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; pub use near_account_id as id; use near_account_id::AccountId; use near_schema_checker_lib::ProtocolSchema; +use std::borrow::Cow; use std::io; #[derive( @@ -109,6 +110,10 @@ impl AccountContract { AccountContract::Local(code_hash) } } + + pub fn is_none(&self) -> bool { + matches!(self, Self::None) + } } #[derive( @@ -179,10 +184,12 @@ impl Account { } #[inline] - pub fn contract(&self) -> AccountContract { + pub fn contract(&self) -> Cow { match self { - Self::V1(account) => AccountContract::from_local_code_hash(account.code_hash), - Self::V2(account) => account.contract.clone(), + Self::V1(account) => { + Cow::Owned(AccountContract::from_local_code_hash(account.code_hash)) + } + Self::V2(account) => Cow::Borrowed(&account.contract), } } diff --git a/core/primitives-core/src/version.rs b/core/primitives-core/src/version.rs index 17e914148ed..fa9f143a0a1 100644 --- a/core/primitives-core/src/version.rs +++ b/core/primitives-core/src/version.rs @@ -267,6 +267,7 @@ impl ProtocolFeature { // Nightly features: #[cfg(feature = "protocol_feature_fix_contract_loading_cost")] ProtocolFeature::FixContractLoadingCost => 129, + ProtocolFeature::GlobalContracts => 129, // TODO(#11201): When stabilizing this feature in mainnet, also remove the temporary code // that always enables this for mocknet (see config_mocknet function). ProtocolFeature::ShuffleShardAssignments => 143, @@ -275,7 +276,6 @@ impl ProtocolFeature { 149 } // Place features that are not yet in Nightly below this line. - ProtocolFeature::GlobalContracts => 200, } } diff --git a/core/primitives/src/profile_data_v3.rs b/core/primitives/src/profile_data_v3.rs index 5f89e6f2d93..293283df6c1 100644 --- a/core/primitives/src/profile_data_v3.rs +++ b/core/primitives/src/profile_data_v3.rs @@ -245,7 +245,7 @@ mod test { let pretty_debug_str = format!("{profile_data:#?}"); expect_test::expect![[r#" ------------------------------ - Action gas: 16120 + Action gas: 18153 ------ Host functions -------- contract_loading_base -> 1 [0% host] contract_loading_bytes -> 2 [0% host] @@ -348,6 +348,8 @@ mod test { new_data_receipt_base -> 1013 new_data_receipt_byte -> 1014 delegate -> 1015 + deploy_global_contract_base -> 1016 + deploy_global_contract_byte -> 1017 ------------------------------ "#]] .assert_eq(&pretty_debug_str) diff --git a/core/primitives/src/snapshots/near_primitives__views__tests__exec_metadata_v3_view.snap b/core/primitives/src/snapshots/near_primitives__views__tests__exec_metadata_v3_view.snap index 0e4e2845797..450a055ccbf 100644 --- a/core/primitives/src/snapshots/near_primitives__views__tests__exec_metadata_v3_view.snap +++ b/core/primitives/src/snapshots/near_primitives__views__tests__exec_metadata_v3_view.snap @@ -50,6 +50,16 @@ expression: view "cost": "DEPLOY_CONTRACT_BYTE", "gas_used": "1003" }, + { + "cost_category": "ACTION_COST", + "cost": "DEPLOY_GLOBAL_CONTRACT_BASE", + "gas_used": "1016" + }, + { + "cost_category": "ACTION_COST", + "cost": "DEPLOY_GLOBAL_CONTRACT_BYTE", + "gas_used": "1017" + }, { "cost_category": "ACTION_COST", "cost": "FUNCTION_CALL_BASE", diff --git a/core/primitives/src/version.rs b/core/primitives/src/version.rs index c83a79bec58..cf1648bffe9 100644 --- a/core/primitives/src/version.rs +++ b/core/primitives/src/version.rs @@ -6,6 +6,7 @@ use std::sync::LazyLock; pub struct Version { pub version: String, pub build: String, + pub commit: String, #[serde(default)] pub rustc_version: String, } diff --git a/core/primitives/src/views.rs b/core/primitives/src/views.rs index 784dcbea500..92015f5e351 100644 --- a/core/primitives/src/views.rs +++ b/core/primitives/src/views.rs @@ -89,11 +89,12 @@ pub struct ContractCodeView { impl From<&Account> for AccountView { fn from(account: &Account) -> Self { - let (global_contract_hash, global_contract_account_id) = match account.contract() { - AccountContract::Global(contract) => (Some(contract), None), - AccountContract::GlobalByAccount(account_id) => (None, Some(account_id)), - AccountContract::Local(_) | AccountContract::None => (None, None), - }; + let (global_contract_hash, global_contract_account_id) = + match account.contract().into_owned() { + AccountContract::Global(contract) => (Some(contract), None), + AccountContract::GlobalByAccount(account_id) => (None, Some(account_id)), + AccountContract::Local(_) | AccountContract::None => (None, None), + }; AccountView { amount: account.amount(), locked: account.locked(), diff --git a/core/store/src/trie/update.rs b/core/store/src/trie/update.rs index dd57a75cdaf..f0b61eded80 100644 --- a/core/store/src/trie/update.rs +++ b/core/store/src/trie/update.rs @@ -103,22 +103,46 @@ impl TrieUpdate { mode: KeyLookupMode, ) -> Result>, StorageError> { let key = key.to_vec(); - if let Some(key_value) = self.prospective.get(&key) { - return Ok(key_value.value.as_deref().map(TrieUpdateValuePtr::MemoryRef)); - } else if let Some(changes_with_trie_key) = self.committed.get(&key) { - if let Some(RawStateChange { data, .. }) = changes_with_trie_key.changes.last() { - return Ok(data.as_deref().map(TrieUpdateValuePtr::MemoryRef)); - } + if let Some(value_ref) = self.get_ref_from_updates(&key) { + return Ok(value_ref); } let result = self .trie .get_optimized_ref(&key, mode)? .map(|optimized_value_ref| TrieUpdateValuePtr::Ref(&self.trie, optimized_value_ref)); + Ok(result) + } + + pub fn get_ref_no_side_effects( + &self, + key: &TrieKey, + mode: KeyLookupMode, + ) -> Result>, StorageError> { + let key = key.to_vec(); + if let Some(value_ref) = self.get_ref_from_updates(&key) { + return Ok(value_ref); + } + + let result = self + .trie + .get_optimized_ref_no_side_effects(&key, mode)? + .map(|optimized_value_ref| TrieUpdateValuePtr::Ref(&self.trie, optimized_value_ref)); Ok(result) } + fn get_ref_from_updates(&self, key: &[u8]) -> Option>> { + if let Some(key_value) = self.prospective.get(key) { + return Some(key_value.value.as_deref().map(TrieUpdateValuePtr::MemoryRef)); + } else if let Some(changes_with_trie_key) = self.committed.get(key) { + if let Some(RawStateChange { data, .. }) = changes_with_trie_key.changes.last() { + return Some(data.as_deref().map(TrieUpdateValuePtr::MemoryRef)); + } + } + None + } + pub fn contains_key(&self, key: &TrieKey) -> Result { let key = key.to_vec(); if self.prospective.contains_key(&key) { diff --git a/integration-tests/src/test_loop/builder.rs b/integration-tests/src/test_loop/builder.rs index fb5b4dea4c4..53cb1c78a77 100644 --- a/integration-tests/src/test_loop/builder.rs +++ b/integration-tests/src/test_loop/builder.rs @@ -2,7 +2,6 @@ use std::collections::{HashMap, HashSet}; use std::sync::{Arc, Mutex}; use tempfile::TempDir; -use near_async::futures::FutureSpawner; use near_async::messaging::{noop, IntoMultiSender, IntoSender, LateBoundSender}; use near_async::test_loop::sender::TestLoopSender; use near_async::test_loop::TestLoopV2; @@ -751,7 +750,6 @@ impl TestLoopBuilder { let resharding_actor = ReshardingActor::new(runtime_adapter.store().clone(), &chain_genesis); - let future_spawner = self.test_loop.future_spawner(); let state_sync_dumper = StateSyncDumper { clock: self.test_loop.clock(), client_config, @@ -760,10 +758,6 @@ impl TestLoopBuilder { shard_tracker, runtime: runtime_adapter, validator: validator_signer, - dump_future_runner: Box::new(move |future| { - future_spawner.spawn_boxed("state_sync_dumper", future); - Box::new(|| {}) - }), future_spawner: Arc::new(self.test_loop.future_spawner()), handle: None, }; diff --git a/integration-tests/src/tests/client/features/zero_balance_account.rs b/integration-tests/src/tests/client/features/zero_balance_account.rs index 5bb1d146eba..45ea8a53760 100644 --- a/integration-tests/src/tests/client/features/zero_balance_account.rs +++ b/integration-tests/src/tests/client/features/zero_balance_account.rs @@ -126,6 +126,7 @@ fn test_zero_balance_account_add_key() { storage_amount_per_byte: 10u128.pow(19), num_bytes_account: 100, num_extra_bytes_record: 40, + global_contract_storage_amount_per_byte: 10u128.pow(20), }; let wasm_config = Arc::make_mut(&mut runtime_config.wasm_config); wasm_config.ext_costs = ExtCostsConfig::test(); diff --git a/integration-tests/src/tests/client/state_dump.rs b/integration-tests/src/tests/client/state_dump.rs index 476c38ec93b..b85e0e19347 100644 --- a/integration-tests/src/tests/client/state_dump.rs +++ b/integration-tests/src/tests/client/state_dump.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; -use near_async::futures::ActixFutureSpawner; +use near_async::futures::ActixArbiterHandleFutureSpawner; use near_async::time::{Clock, Duration}; use near_chain::near_chain_primitives::error::QueryError; use near_chain::{ChainGenesis, ChainStoreAccess, Provenance}; @@ -58,6 +58,8 @@ fn slow_test_state_dump() { Some(Arc::new(EmptyValidatorSigner::new("test0".parse().unwrap()))), "validator_signer", ); + + let arbiter = actix::Arbiter::new(); let mut state_sync_dumper = StateSyncDumper { clock: Clock::real(), client_config: config, @@ -66,8 +68,7 @@ fn slow_test_state_dump() { shard_tracker, runtime, validator, - dump_future_runner: StateSyncDumper::arbiter_dump_future_runner(), - future_spawner: Arc::new(ActixFutureSpawner), + future_spawner: Arc::new(ActixArbiterHandleFutureSpawner(arbiter.handle())), handle: None, }; state_sync_dumper.start().unwrap(); @@ -164,6 +165,7 @@ fn run_state_sync_with_dumped_parts( iteration_delay: Some(Duration::ZERO), credentials_file: None, }); + let arbiter = actix::Arbiter::new(); let mut state_sync_dumper = StateSyncDumper { clock: Clock::real(), client_config: config.clone(), @@ -172,8 +174,7 @@ fn run_state_sync_with_dumped_parts( shard_tracker, runtime, validator, - dump_future_runner: StateSyncDumper::arbiter_dump_future_runner(), - future_spawner: Arc::new(ActixFutureSpawner), + future_spawner: Arc::new(ActixArbiterHandleFutureSpawner(arbiter.handle())), handle: None, }; state_sync_dumper.start().unwrap(); diff --git a/integration-tests/src/tests/client/sync_state_nodes.rs b/integration-tests/src/tests/client/sync_state_nodes.rs index 43039cec055..820378753e2 100644 --- a/integration-tests/src/tests/client/sync_state_nodes.rs +++ b/integration-tests/src/tests/client/sync_state_nodes.rs @@ -345,7 +345,7 @@ fn ultra_slow_test_sync_state_dump() { let nearcore::NearNode { view_client: view_client1, // State sync dumper should be kept in the scope to avoid dropping it, which stops the state dumper loop. - state_sync_dumper: _dumper, + mut state_sync_dumper, .. } = start_with_config(dir1.path(), near1).expect("start_with_config"); @@ -360,6 +360,7 @@ fn ultra_slow_test_sync_state_dump() { let genesis2 = genesis.clone(); match view_client1.send(GetBlock::latest().with_span_context()).await { + // FIXME: this is not the right check after the sync hash was moved to sync the current epoch's state Ok(Ok(b)) if b.header.height >= genesis.config.epoch_length + 2 => { let mut view_client2_holder2 = view_client2_holder2.write().unwrap(); let mut arbiters_holder2 = arbiters_holder2.write().unwrap(); @@ -433,6 +434,7 @@ fn ultra_slow_test_sync_state_dump() { }) .await .unwrap(); + state_sync_dumper.stop_and_await(); System::current().stop(); }); drop(_dump_dir); diff --git a/nearcore/src/lib.rs b/nearcore/src/lib.rs index acaa75834e1..6cd514e7cc7 100644 --- a/nearcore/src/lib.rs +++ b/nearcore/src/lib.rs @@ -439,7 +439,6 @@ pub fn start_with_config_and_synchronization( shard_tracker: shard_tracker.clone(), runtime, validator: config.validator_signer.clone(), - dump_future_runner: StateSyncDumper::arbiter_dump_future_runner(), future_spawner: state_sync_spawner, handle: None, }; diff --git a/nearcore/src/state_sync.rs b/nearcore/src/state_sync.rs index ca2e703db29..50b31f95bb3 100644 --- a/nearcore/src/state_sync.rs +++ b/nearcore/src/state_sync.rs @@ -1,9 +1,8 @@ use crate::metrics; -use actix_rt::Arbiter; use anyhow::Context; use borsh::BorshSerialize; -use futures::future::{select_all, BoxFuture}; +use futures::future::select_all; use futures::{FutureExt, StreamExt}; use near_async::futures::{respawn_for_parallelism, FutureSpawner}; use near_async::time::{Clock, Duration, Interval}; @@ -29,7 +28,7 @@ use rand::thread_rng; use std::collections::{HashMap, HashSet}; use std::i64; use std::sync::atomic::{AtomicBool, AtomicI64, Ordering}; -use std::sync::{Arc, RwLock}; +use std::sync::{Arc, Condvar, Mutex, RwLock}; use tokio::sync::oneshot; use tokio::sync::Semaphore; @@ -49,9 +48,8 @@ pub struct StateSyncDumper { /// Lock the value of mutable validator signer for the duration of a request to ensure consistency. /// Please note that the locked value should not be stored anywhere or passed through the thread boundary. pub validator: MutableValidatorSigner, - pub dump_future_runner: Box) -> Box>, pub future_spawner: Arc, - pub handle: Option, + pub handle: Option>, } impl StateSyncDumper { @@ -93,7 +91,7 @@ impl StateSyncDumper { }; let chain_id = self.client_config.chain_id.clone(); - let keep_running = Arc::new(AtomicBool::new(true)); + let handle = Arc::new(StateSyncDumpHandle::new()); let chain = Chain::new_for_view_client( self.clock.clone(), @@ -111,7 +109,8 @@ impl StateSyncDumper { tracing::debug!(target: "state_sync_dump", ?shard_id, "Dropped existing progress"); } } - let handle = (self.dump_future_runner)( + self.future_spawner.spawn_boxed( + "state_sync_dump", do_state_sync_dump( self.clock.clone(), chain, @@ -122,36 +121,34 @@ impl StateSyncDumper { external, dump_config.iteration_delay.unwrap_or(Duration::seconds(10)), self.validator.clone(), - keep_running.clone(), + handle.clone(), self.future_spawner.clone(), ) .boxed(), ); - self.handle = Some(StateSyncDumpHandle { handle: Some(handle), keep_running }); + self.handle = Some(handle); Ok(()) } - pub fn arbiter_dump_future_runner() -> Box) -> Box> - { - Box::new(|future| { - let arbiter = Arbiter::new(); - assert!(arbiter.spawn(future)); - Box::new(move || { - arbiter.stop(); - }) - }) - } - pub fn stop(&mut self) { self.handle.take(); } + + // Tell the dumper to stop and wait until it's finished + pub fn stop_and_await(&mut self) { + let Some(handle) = self.handle.take() else { + return; + }; + handle.stop_and_await(); + } } -/// Holds arbiter handles controlling the lifetime of the spawned threads. +/// Cancels the dumper when dropped and allows waiting for the dumper task to finish pub struct StateSyncDumpHandle { - pub handle: Option>, - keep_running: Arc, + keep_running: AtomicBool, + task_running: Mutex, + await_task: Condvar, } impl Drop for StateSyncDumpHandle { @@ -161,10 +158,34 @@ impl Drop for StateSyncDumpHandle { } impl StateSyncDumpHandle { - fn stop(&mut self) { + fn new() -> Self { + Self { + keep_running: AtomicBool::new(true), + task_running: Mutex::new(true), + await_task: Condvar::new(), + } + } + + // Tell the dumper to stop + fn stop(&self) { tracing::warn!(target: "state_sync_dump", "Stopping state dumper"); self.keep_running.store(false, Ordering::Relaxed); - self.handle.take().unwrap()() + } + + // Tell the dumper to stop and wait until it's finished + fn stop_and_await(&self) { + self.stop(); + let mut running = self.task_running.lock().unwrap(); + while *running { + running = self.await_task.wait(running).unwrap(); + } + } + + // Called by the dumper when it's finished, and wakes up any threads waiting on it + fn task_finished(&self) { + let mut running = self.task_running.lock().unwrap(); + *running = false; + self.await_task.notify_all(); } } @@ -262,6 +283,39 @@ impl DumpState { } } } + + /// Waits until all part upload tasks are done for some shard. + async fn await_parts_upload(&mut self) -> (ShardId, anyhow::Result<()>) { + let ((shard_id, result), _, _still_going) = + futures::future::select_all(self.dump_state.iter_mut().map(|(shard_id, s)| { + async { + let r = (&mut s.upload_parts).await.unwrap(); + (*shard_id, r) + } + .boxed() + })) + .await; + + drop(_still_going); + + self.dump_state.remove(&shard_id); + (shard_id, result) + } + + /// Sets the `canceled` variable to true and waits for all tasks to exit + async fn cancel(&mut self) { + self.canceled.store(true, Ordering::Relaxed); + for (_shard_id, d) in self.dump_state.iter() { + // Set it to -1 to tell the existing tasks not to set the metrics anymore + d.parts_dumped.store(-1, Ordering::SeqCst); + } + while !self.dump_state.is_empty() { + let (shard_id, result) = self.await_parts_upload().await; + if let Err(error) = result { + tracing::error!(target: "state_sync_dump", epoch_id = ?&self.epoch_id, %shard_id, ?error, "Shard dump failed after cancellation"); + } + } + } } // Represents the state of the current epoch's state part dump @@ -887,17 +941,7 @@ impl StateDumper { let CurrentDump::InProgress(dump) = &mut self.current_dump else { return std::future::pending().await; }; - let ((shard_id, result), _, _still_going) = - futures::future::select_all(dump.dump_state.iter_mut().map(|(shard_id, s)| { - async { - let r = (&mut s.upload_parts).await.unwrap(); - (*shard_id, r) - } - .boxed() - })) - .await; - - drop(_still_going); + let (shard_id, result) = dump.await_parts_upload().await; match result { Ok(()) => { @@ -918,7 +962,7 @@ impl StateDumper { }), ) .context("failed setting state dump progress")?; - dump.dump_state.remove(&shard_id); + if dump.dump_state.is_empty() { self.current_dump = CurrentDump::Done(dump.epoch_id); } @@ -942,7 +986,7 @@ impl StateDumper { let Some(sync_header) = self.latest_sync_header()? else { return Ok(()); }; - match &self.current_dump { + match &mut self.current_dump { CurrentDump::InProgress(dump) => { if &dump.epoch_id == sync_header.epoch_id() { return Ok(()); @@ -951,11 +995,7 @@ impl StateDumper { target: "state_sync_dump", "Canceling existing dump of state for epoch {} upon new epoch {}", &dump.epoch_id.0, &sync_header.epoch_id().0, ); - dump.canceled.store(true, Ordering::Relaxed); - for (_shard_id, d) in dump.dump_state.iter() { - // Set it to -1 to tell the existing tasks not to set the metrics anymore - d.parts_dumped.store(-1, Ordering::SeqCst); - } + dump.cancel().await; } CurrentDump::Done(epoch_id) => { if epoch_id == sync_header.epoch_id() { @@ -992,7 +1032,7 @@ async fn state_sync_dump( external: ExternalConnection, iteration_delay: Duration, validator: MutableValidatorSigner, - keep_running: Arc, + keep_running: &AtomicBool, future_spawner: Arc, ) -> anyhow::Result<()> { tracing::info!(target: "state_sync_dump", "Running StateSyncDump loop"); @@ -1034,6 +1074,10 @@ async fn state_sync_dump( } } + if let CurrentDump::InProgress(mut dump) = dumper.current_dump { + tracing::debug!(target: "state_sync_dump", "Awaiting upload task cancellation"); + dump.cancel().await; + } tracing::debug!(target: "state_sync_dump", "Stopped state dump thread"); Ok(()) } @@ -1048,7 +1092,7 @@ async fn do_state_sync_dump( external: ExternalConnection, iteration_delay: Duration, validator: MutableValidatorSigner, - keep_running: Arc, + handle: Arc, future_spawner: Arc, ) { if let Err(error) = state_sync_dump( @@ -1061,11 +1105,12 @@ async fn do_state_sync_dump( external, iteration_delay, validator, - keep_running, + &handle.keep_running, future_spawner, ) .await { tracing::error!(target: "state_sync_dump", ?error, "State dumper failed"); } + handle.task_finished(); } diff --git a/neard/build.rs b/neard/build.rs index 0557dc270dc..d3d31baca88 100644 --- a/neard/build.rs +++ b/neard/build.rs @@ -47,14 +47,9 @@ fn command(prog: &str, args: &[&str], cwd: Option) -> Result } } -/// Returns version read from git repository or ‘unknown’ if could not be -/// determined. -/// -/// Uses `git describe --always --dirty=-modified` to get the version. For -/// builds on release tags this will return that tag. In other cases the -/// version will describe the commit by including its hash. If the working -/// directory isn’t clean, the version will include `-modified` suffix. -fn get_git_version() -> Result { +/// Returns version info (build, commit) read from git repository or (unknown, unknown) if could +/// not be determined. +fn get_git_version() -> Result<(String, String)> { // Figure out git directory. Don’t just assume it’s ../.git because that // doesn’t work with git work trees so use `git rev-parse --git-dir` instead. let pkg_dir = std::path::PathBuf::from(env("CARGO_MANIFEST_DIR")?); @@ -66,7 +61,7 @@ fn get_git_version() -> Result { // version as unknown. println!("cargo:warning=unable to determine git version (not in git repository?)"); println!("cargo:warning={}", msg); - return Ok("unknown".to_string()); + return Ok(("unknown".to_string(), "unknown".to_string())); } }; @@ -85,11 +80,19 @@ fn get_git_version() -> Result { // * --match=[0-9]* → only consider tags starting with a digit; this // prevents tags such as `crates-0.14.0` from being considered let args = &["describe", "--always", "--dirty=-modified", "--tags", "--match=[0-9]*"]; - let out = command("git", args, None)?; - match String::from_utf8_lossy(&out) { + let build = command("git", args, None)?; + let build_str = match String::from_utf8_lossy(&build) { std::borrow::Cow::Borrowed(version) => Ok(version.trim().to_string()), std::borrow::Cow::Owned(version) => Err(anyhow!("git: invalid output: {}", version)), - } + }; + + let commit = command("git", &["rev-parse", "HEAD"], None)?; + let commit_hash = match String::from_utf8_lossy(&commit) { + std::borrow::Cow::Borrowed(version) => Ok(version.trim().to_string()), + std::borrow::Cow::Owned(version) => Err(anyhow!("git: invalid output: {}", version)), + }; + + Ok((build_str?, commit_hash?)) } /// Get features enabled using the --features flag. @@ -131,7 +134,9 @@ fn try_main() -> Result<()> { }; println!("cargo:rustc-env=NEARD_VERSION={}", version); - println!("cargo:rustc-env=NEARD_BUILD={}", get_git_version()?); + println!("cargo:rustc-env=NEARD_BUILD={}", get_git_version()?.0); + + println!("cargo:rustc-env=NEARD_COMMIT={}", get_git_version()?.1); println!("cargo:rustc-env=NEARD_RUSTC_VERSION={}", rustc_version::version()?); diff --git a/neard/src/cli.rs b/neard/src/cli.rs index 3463a5209b3..cad827a5fc5 100644 --- a/neard/src/cli.rs +++ b/neard/src/cli.rs @@ -66,6 +66,7 @@ impl NeardCmd { target: "neard", version = crate::NEARD_VERSION, build = crate::NEARD_BUILD, + commit = crate::NEARD_COMMIT, latest_protocol = near_primitives::version::PROTOCOL_VERSION ); @@ -583,7 +584,7 @@ impl RunCmd { if let Some(handle) = cold_store_loop_handle { handle.stop() } - state_sync_dumper.stop(); + state_sync_dumper.stop_and_await(); resharding_handle.stop(); futures::future::join_all(rpc_servers.iter().map(|(name, server)| async move { server.stop(true).await; diff --git a/neard/src/main.rs b/neard/src/main.rs index e1eab0e4b91..d149f1227a2 100644 --- a/neard/src/main.rs +++ b/neard/src/main.rs @@ -12,13 +12,20 @@ use std::time::Duration; static NEARD_VERSION: &str = env!("NEARD_VERSION"); static NEARD_BUILD: &str = env!("NEARD_BUILD"); +static NEARD_COMMIT: &str = env!("NEARD_COMMIT"); static RUSTC_VERSION: &str = env!("NEARD_RUSTC_VERSION"); static NEARD_FEATURES: &str = env!("NEARD_FEATURES"); static NEARD_VERSION_STRING: LazyLock = LazyLock::new(|| { format!( - "(release {}) (build {}) (rustc {}) (protocol {}) (db {})\nfeatures: [{}]", - NEARD_VERSION, NEARD_BUILD, RUSTC_VERSION, PROTOCOL_VERSION, DB_VERSION, NEARD_FEATURES + "(release {}) (build {}) (commit {}) (rustc {}) (protocol {}) (db {})\nfeatures: [{}]", + NEARD_VERSION, + NEARD_BUILD, + NEARD_COMMIT, + RUSTC_VERSION, + PROTOCOL_VERSION, + DB_VERSION, + NEARD_FEATURES ) }); @@ -26,6 +33,7 @@ fn neard_version() -> Version { Version { version: NEARD_VERSION.to_string(), build: NEARD_BUILD.to_string(), + commit: NEARD_COMMIT.to_string(), rustc_version: RUSTC_VERSION.to_string(), } } diff --git a/runtime/near-vm-runner/src/profile.rs b/runtime/near-vm-runner/src/profile.rs index bc3e31c071e..1519771be6e 100644 --- a/runtime/near-vm-runner/src/profile.rs +++ b/runtime/near-vm-runner/src/profile.rs @@ -244,7 +244,7 @@ mod test { let pretty_debug_str = format!("{profile_data:#?}"); expect_test::expect![[r#" ------------------------------ - Action gas: 16120 + Action gas: 18153 ------ Host functions -------- contract_loading_base -> 1 [0% host] contract_loading_bytes -> 2 [0% host] @@ -347,6 +347,8 @@ mod test { new_data_receipt_base -> 1013 new_data_receipt_byte -> 1014 delegate -> 1015 + deploy_global_contract_base -> 1016 + deploy_global_contract_byte -> 1017 ------------------------------ "#]] .assert_eq(&pretty_debug_str) diff --git a/runtime/runtime-params-estimator/src/cost.rs b/runtime/runtime-params-estimator/src/cost.rs index ecfeb58a733..72ba5d6d716 100644 --- a/runtime/runtime-params-estimator/src/cost.rs +++ b/runtime/runtime-params-estimator/src/cost.rs @@ -206,6 +206,16 @@ pub enum Cost { ActionDelegateSendNotSir, ActionDelegateSendSir, ActionDelegateExec, + // Same as ActionDeployContract but for global contracts + ActionDeployGlobalContractBase, + ActionDeployGlobalContractBaseSendNotSir, + ActionDeployGlobalContractBaseSendSir, + ActionDeployGlobalContractBaseExec, + // Same as ActionDeployContractPerByte but for global contracts + ActionDeployGlobalContractPerByte, + ActionDeployGlobalContractPerByteSendNotSir, + ActionDeployGlobalContractPerByteSendSir, + ActionDeployGlobalContractPerByteExec, /// Estimates `wasm_config.ext_costs.base` which is intended to be charged /// once on every host function call. However, this is currently /// inconsistent. First, we do not charge on Math API methods (`sha256`, diff --git a/runtime/runtime-params-estimator/src/costs_to_runtime_config.rs b/runtime/runtime-params-estimator/src/costs_to_runtime_config.rs index 061f3d72139..97a7bc6eb92 100644 --- a/runtime/runtime-params-estimator/src/costs_to_runtime_config.rs +++ b/runtime/runtime-params-estimator/src/costs_to_runtime_config.rs @@ -73,6 +73,8 @@ fn runtime_fees_config(cost_table: &CostTable) -> anyhow::Result fee(Cost::ActionReceiptCreation)?, ActionCosts::new_data_receipt_base => fee(Cost::DataReceiptCreationBase)?, ActionCosts::new_data_receipt_byte => fee(Cost::DataReceiptCreationPerByte)?, + ActionCosts::deploy_global_contract_base => fee(Cost::ActionDeployGlobalContractBase)?, + ActionCosts::deploy_global_contract_byte => fee(Cost::ActionDeployGlobalContractPerByte)?, }, ..RuntimeFeesConfig::clone(&actual_fees_config) }; diff --git a/runtime/runtime/src/actions.rs b/runtime/runtime/src/actions.rs index cc4c3b0ec5e..31f8aa8960c 100644 --- a/runtime/runtime/src/actions.rs +++ b/runtime/runtime/src/actions.rs @@ -622,12 +622,30 @@ pub(crate) fn action_deploy_contract( } pub(crate) fn action_deploy_global_contract( + account: &mut Account, account_id: &AccountId, + apply_state: &ApplyState, deploy_contract: &DeployGlobalContractAction, result: &mut ActionResult, ) { let _span = tracing::debug_span!(target: "runtime", "action_deploy_global_contract").entered(); + let storage_cost = apply_state + .config + .fees + .storage_usage_config + .global_contract_storage_amount_per_byte + .saturating_mul(deploy_contract.code.len() as u128); + let Some(updated_balance) = account.amount().checked_sub(storage_cost) else { + result.result = Err(ActionErrorKind::LackBalanceForState { + account_id: account_id.clone(), + amount: storage_cost, + } + .into()); + return; + }; + account.set_amount(updated_balance); + let id = match deploy_contract.deploy_mode { GlobalContractDeployMode::CodeHash => { GlobalContractIdentifier::CodeHash(hash(&deploy_contract.code)) diff --git a/runtime/runtime/src/config.rs b/runtime/runtime/src/config.rs index afe238ae732..db9645de874 100644 --- a/runtime/runtime/src/config.rs +++ b/runtime/runtime/src/config.rs @@ -1,6 +1,7 @@ //! Settings of the parameters of the runtime. use near_primitives::account::AccessKeyPermission; +use near_primitives::action::DeployGlobalContractAction; use near_primitives::errors::IntegerOverflowError; use near_primitives::version::FIXED_MINIMUM_NEW_RECEIPT_GAS_VERSION; use near_primitives_core::types::ProtocolVersion; @@ -137,7 +138,15 @@ pub fn total_send_fees( &delegate_action.receiver_id, )? } - DeployGlobalContract(_) | UseGlobalContract(_) => { + DeployGlobalContract(DeployGlobalContractAction { code, .. }) => { + let num_bytes = code.len() as u64; + fees.fee(ActionCosts::deploy_global_contract_base).send_fee(sender_is_receiver) + + fees + .fee(ActionCosts::deploy_global_contract_byte) + .send_fee(sender_is_receiver) + * num_bytes + } + UseGlobalContract(_) => { // TODO(#12717): implement send fees for global contracts 1 } @@ -222,7 +231,12 @@ pub fn exec_fee(config: &RuntimeConfig, action: &Action, receiver_id: &AccountId DeleteKey(_) => fees.fee(ActionCosts::delete_key).exec_fee(), DeleteAccount(_) => fees.fee(ActionCosts::delete_account).exec_fee(), Delegate(_) => fees.fee(ActionCosts::delegate).exec_fee(), - DeployGlobalContract(_) | UseGlobalContract(_) => { + DeployGlobalContract(DeployGlobalContractAction { code, .. }) => { + let num_bytes = code.len() as u64; + fees.fee(ActionCosts::deploy_global_contract_base).exec_fee() + + fees.fee(ActionCosts::deploy_global_contract_byte).exec_fee() * num_bytes + } + UseGlobalContract(_) => { // TODO(#12717): implement exec fees for global contracts 1 } diff --git a/runtime/runtime/src/lib.rs b/runtime/runtime/src/lib.rs index 0b98e26dd75..6e4f920f8ea 100644 --- a/runtime/runtime/src/lib.rs +++ b/runtime/runtime/src/lib.rs @@ -471,7 +471,14 @@ impl Runtime { )?; } Action::DeployGlobalContract(deploy_global_contract) => { - action_deploy_global_contract(account_id, deploy_global_contract, &mut result); + let account = account.as_mut().expect(EXPECT_ACCOUNT_EXISTS); + action_deploy_global_contract( + account, + account_id, + apply_state, + deploy_global_contract, + &mut result, + ); } Action::UseGlobalContract(use_global_contract) => { let account = account.as_mut().expect(EXPECT_ACCOUNT_EXISTS); @@ -2676,24 +2683,11 @@ fn schedule_contract_preparation<'b, R: MaybeRefReceipt>( let scheduled_receipt_offset = iterator.position(|peek| { let peek = peek.as_ref(); let account_id = peek.receiver_id(); - let receiver = std::cell::LazyCell::new(|| { - let key = TrieKey::Account { account_id: account_id.clone() }; - let receiver = get_pure::(state_update, &key); - let Ok(Some(receiver)) = receiver else { - // Most likely reason this can happen is because the receipt is for an account that - // does not yet exist. This is a routine occurrence as accounts are created by - // sending some NEAR to a name that's about to be created. - return None; - }; - Some(receiver) - }); - // We need to inspect each receipt recursively in case these are data receipts, thus a // function. fn handle_receipt( mgr: &mut ReceiptPreparationPipeline, state_update: &TrieUpdate, - receiver: &std::cell::LazyCell, impl FnOnce() -> Option>, account_id: &AccountId, receipt: &Receipt, ) -> bool { @@ -2702,7 +2696,7 @@ fn schedule_contract_preparation<'b, R: MaybeRefReceipt>( // This returns `true` if work may have been scheduled (thus we currently // prepare actions in at most 2 "interesting" receipts in parallel due to // staggering.) - mgr.submit(receipt, &receiver, None) + mgr.submit(receipt, state_update, None) } ReceiptEnum::Data(dr) => { let key = TrieKey::PostponedReceiptId { @@ -2729,7 +2723,7 @@ fn schedule_contract_preparation<'b, R: MaybeRefReceipt>( let Ok(Some(pr)) = get_pure::(state_update, &key) else { return false; }; - return handle_receipt(mgr, state_update, receiver, account_id, &pr); + return handle_receipt(mgr, state_update, account_id, &pr); } ReceiptEnum::PromiseResume(dr) => { let key = TrieKey::PromiseYieldReceipt { @@ -2739,12 +2733,12 @@ fn schedule_contract_preparation<'b, R: MaybeRefReceipt>( let Ok(Some(yr)) = get_pure::(state_update, &key) else { return false; }; - return handle_receipt(mgr, state_update, receiver, account_id, &yr); + return handle_receipt(mgr, state_update, account_id, &yr); } ReceiptEnum::GlobalContractDistribution(_) => false, } } - handle_receipt(pipeline_manager, state_update, &receiver, account_id, peek) + handle_receipt(pipeline_manager, state_update, account_id, peek) })?; Some(scheduled_receipt_offset.saturating_add(1)) } diff --git a/runtime/runtime/src/pipelining.rs b/runtime/runtime/src/pipelining.rs index c8df4fa2cbf..cbb90e104be 100644 --- a/runtime/runtime/src/pipelining.rs +++ b/runtime/runtime/src/pipelining.rs @@ -6,16 +6,18 @@ use crate::metrics::{ PIPELINING_ACTIONS_TASK_WORKING_TIME, PIPELINING_ACTIONS_WAITING_TIME, }; use near_parameters::RuntimeConfig; -use near_primitives::account::Account; -use near_primitives::action::Action; +use near_primitives::account::{Account, AccountContract}; +use near_primitives::action::{Action, GlobalContractIdentifier}; use near_primitives::config::ViewConfig; use near_primitives::hash::CryptoHash; use near_primitives::receipt::{Receipt, ReceiptEnum}; +use near_primitives::trie_key::{GlobalContractCodeIdentifier, TrieKey}; use near_primitives::types::{AccountId, Gas}; use near_store::contract::ContractStorage; +use near_store::{get_pure, KeyLookupMode, TrieUpdate}; use near_vm_runner::logic::{GasCounter, ProtocolVersion}; use near_vm_runner::{ContractRuntimeCache, PreparedContract}; -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::sync::{Arc, Condvar, Mutex}; use std::time::Instant; @@ -49,6 +51,10 @@ pub(crate) struct ReceiptPreparationPipeline { /// things, would give the runtime more time to compile the contract. block_accounts: BTreeSet, + /// List of global contract identifiers that must not be prepared in this chunk. + /// This solves the same issue as `block_accounts` but for global contract deployments. + block_global_contracts: HashSet, + /// The Runtime config for these pipelining requests. config: Arc, @@ -90,6 +96,7 @@ impl ReceiptPreparationPipeline { Self { map: Default::default(), block_accounts: Default::default(), + block_global_contracts: Default::default(), config, contract_cache, protocol_version, @@ -111,7 +118,7 @@ impl ReceiptPreparationPipeline { pub(crate) fn submit( &mut self, receipt: &Receipt, - account: &std::cell::LazyCell, impl FnOnce() -> Option>, + state_update: &TrieUpdate, view_config: Option, ) -> bool { let account_id = receipt.receiver_id(); @@ -120,11 +127,14 @@ impl ReceiptPreparationPipeline { } let actions = match receipt.receipt() { ReceiptEnum::Action(a) | ReceiptEnum::PromiseYield(a) => &a.actions, - ReceiptEnum::GlobalContractDistribution(_) - | ReceiptEnum::Data(_) - | ReceiptEnum::PromiseResume(_) => return false, + ReceiptEnum::GlobalContractDistribution(global_contract_data) => { + self.block_global_contracts.insert(global_contract_data.id.clone()); + return false; + } + ReceiptEnum::Data(_) | ReceiptEnum::PromiseResume(_) => return false, }; let mut any_function_calls = false; + let mut account = None; for (action_index, action) in actions.iter().enumerate() { let account_id = account_id.clone(); match action { @@ -135,10 +145,51 @@ impl ReceiptPreparationPipeline { return self.block_accounts.insert(account_id); } Action::FunctionCall(function_call) => { - let Some(account) = &**account else { continue }; - let Some(code_hash) = account.local_contract_hash() else { - // TODO(#12884): support global contracts pipelining - continue; + let account = if let Some(account) = &account { + account + } else { + let key = TrieKey::Account { account_id: account_id.clone() }; + let Ok(Some(receiver)) = get_pure::(state_update, &key) else { + // Most likely reason this can happen is because the receipt is for + // an account that does not yet exist. This is a routine occurrence + // as accounts are created by sending some NEAR to a name that's + // about to be created. + continue; + }; + account.insert(receiver) + }; + let code_hash = match account.contract().as_ref() { + AccountContract::None => continue, + AccountContract::Local(code_hash) => *code_hash, + AccountContract::Global(global_code_hash) => { + if self + .block_global_contracts + .contains(&GlobalContractIdentifier::CodeHash(*global_code_hash)) + { + continue; + } + *global_code_hash + } + AccountContract::GlobalByAccount(global_contract_account_id) => { + if self.block_global_contracts.contains( + &GlobalContractIdentifier::AccountId( + global_contract_account_id.clone(), + ), + ) { + continue; + } + let key = TrieKey::GlobalContractCode { + identifier: GlobalContractCodeIdentifier::AccountId( + global_contract_account_id.clone(), + ), + }; + let Ok(Some(value_ref)) = state_update + .get_ref_no_side_effects(&key, KeyLookupMode::FlatStorage) + else { + continue; + }; + value_ref.value_hash() + } }; let key = PrepareTaskKey { receipt_id: receipt.get_hash(), action_index }; let gas_counter = self.gas_counter(view_config.as_ref(), function_call.gas); diff --git a/runtime/runtime/tests/runtime_group_tools/random_config.rs b/runtime/runtime/tests/runtime_group_tools/random_config.rs index 1ae6f7bb986..5d4c08ecac7 100644 --- a/runtime/runtime/tests/runtime_group_tools/random_config.rs +++ b/runtime/runtime/tests/runtime_group_tools/random_config.rs @@ -18,6 +18,7 @@ pub fn random_config() -> RuntimeConfig { num_bytes_account: rng.next_u64() % 10000, num_extra_bytes_record: rng.next_u64() % 10000, storage_amount_per_byte: rng.next_u64() as u128, + global_contract_storage_amount_per_byte: rng.next_u64() as u128, }, burnt_gas_reward: Rational32::new((rng.next_u32() % 100).try_into().unwrap(), 100), pessimistic_gas_price_inflation_ratio: Rational32::new( diff --git a/tools/amend-genesis/src/lib.rs b/tools/amend-genesis/src/lib.rs index 0cd63303cd3..cfd42438557 100644 --- a/tools/amend-genesis/src/lib.rs +++ b/tools/amend-genesis/src/lib.rs @@ -73,7 +73,7 @@ impl AccountRecords { // records. Set the storage usage to reflect whatever's in the original records, and at the // end we will add to the storage usage with any extra keys added for this account account.set_storage_usage(existing.storage_usage()); - account.set_contract(existing.contract()); + account.set_contract(existing.contract().into_owned()); if self.amount_needed { set_total_balance(account, existing); } @@ -171,7 +171,7 @@ fn parse_extra_records( near_chain_configs::stream_records_from_file(reader, |r| { match r { StateRecord::Account { account_id, account } => { - if account.contract() != AccountContract::None { + if !account.contract().is_none() { result = Err(anyhow::anyhow!( "FIXME: accounts in --extra-records with code_hash set not supported" )); @@ -517,7 +517,7 @@ mod test { ( account.amount(), account.locked(), - account.contract(), + account.contract().into_owned(), account.storage_usage(), ), ) @@ -555,7 +555,7 @@ mod test { ( account.amount(), account.locked(), - account.contract(), + account.contract().into_owned(), account.storage_usage(), ), ); diff --git a/tools/protocol-schema-check/res/protocol_schema.toml b/tools/protocol-schema-check/res/protocol_schema.toml index b4fa2833ea4..9e8f32fe432 100644 --- a/tools/protocol-schema-check/res/protocol_schema.toml +++ b/tools/protocol-schema-check/res/protocol_schema.toml @@ -6,7 +6,7 @@ AccountV1 = 3570440720 AccountV2 = 2593503293 AccountVersion = 3672019478 Action = 708080604 -ActionCosts = 3115555891 +ActionCosts = 1237937737 ActionError = 3315176969 ActionErrorKind = 853762542 ActionReceipt = 882261823 @@ -128,11 +128,11 @@ EpochSyncProofEpochData = 4024593770 EpochSyncProofLastEpochData = 2620439209 EpochSyncProofV1 = 3403222461 EpochValidatorInfo = 1082066685 -ExecutionMetadata = 3853243413 -ExecutionOutcome = 1389616391 -ExecutionOutcomeWithId = 3435887297 -ExecutionOutcomeWithIdAndProof = 3467933830 -ExecutionOutcomeWithProof = 3452427190 +ExecutionMetadata = 166381410 +ExecutionOutcome = 3343116239 +ExecutionOutcomeWithId = 1785592357 +ExecutionOutcomeWithIdAndProof = 2770873083 +ExecutionOutcomeWithProof = 3035442989 ExecutionStatus = 1674342960 ExtCosts = 1172935704 FetchingStateStatus = 2204896805 @@ -197,7 +197,7 @@ Ping = 2783493472 Pong = 3159638327 PrepareError = 4009037507 ProfileDataV2 = 1955507222 -ProfileDataV3 = 1564915364 +ProfileDataV3 = 3638920194 PromiseYieldIndices = 405847541 PromiseYieldTimeout = 3189361393 PublicKey = 601042198