diff --git a/bridges/bin/runtime-common/src/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/refund_relayer_extension.rs index 9efeedf24d..df4eae6f73 100644 --- a/bridges/bin/runtime-common/src/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/refund_relayer_extension.rs @@ -28,9 +28,12 @@ use codec::{Decode, Encode}; use frame_support::{ dispatch::{CallableCallFor, DispatchInfo, Dispatchable, PostDispatchInfo}, traits::IsSubType, + weights::Weight, CloneNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; -use pallet_bridge_grandpa::{CallSubType as GrandpaCallSubType, SubmitFinalityProofHelper}; +use pallet_bridge_grandpa::{ + CallSubType as GrandpaCallSubType, SubmitFinalityProofHelper, SubmitFinalityProofInfo, +}; use pallet_bridge_messages::Config as MessagesConfig; use pallet_bridge_parachains::{ BoundedBridgeGrandpaConfig, CallSubType as ParachainsCallSubType, Config as ParachainsConfig, @@ -140,7 +143,11 @@ pub struct PreDispatchData { #[derive(RuntimeDebugNoBound, PartialEq)] pub enum CallInfo { /// Relay chain finality + parachain finality + message delivery calls. - AllFinalityAndDelivery(RelayBlockNumber, SubmitParachainHeadsInfo, ReceiveMessagesProofInfo), + AllFinalityAndDelivery( + SubmitFinalityProofInfo, + SubmitParachainHeadsInfo, + ReceiveMessagesProofInfo, + ), /// Parachain finality + message delivery calls. ParachainFinalityAndDelivery(SubmitParachainHeadsInfo, ReceiveMessagesProofInfo), /// Standalone message delivery call. @@ -149,7 +156,7 @@ pub enum CallInfo { impl CallInfo { /// Returns the pre-dispatch `finality_target` sent to the `SubmitFinalityProof` call. - fn submit_finality_proof_info(&self) -> Option { + fn submit_finality_proof_info(&self) -> Option> { match *self { Self::AllFinalityAndDelivery(info, _, _) => Some(info), _ => None, @@ -318,6 +325,9 @@ where len: usize, result: &DispatchResult, ) -> Result<(), TransactionValidityError> { + let mut extra_weight = Weight::zero(); + let mut extra_size = 0; + // We don't refund anything if the transaction has failed. if result.is_err() { return Ok(()) @@ -330,8 +340,10 @@ where }; // check if relay chain state has been updated - if let Some(relay_block_number) = call_info.submit_finality_proof_info() { - if !SubmitFinalityProofHelper::::was_successful(relay_block_number) { + if let Some(finality_proof_info) = call_info.submit_finality_proof_info() { + if !SubmitFinalityProofHelper::::was_successful( + finality_proof_info.block_number, + ) { // we only refund relayer if all calls have updated chain state return Ok(()) } @@ -342,6 +354,11 @@ where // `utility.batchAll` transaction always requires payment. But in both cases we'll // refund relayer - either explicitly here, or using `Pays::No` if he's choosing // to submit dedicated transaction. + + // submitter has means to include extra weight/bytes in the `submit_finality_proof` + // call, so let's subtract extra weight/size to avoid refunding for this extra stuff + extra_weight = finality_proof_info.extra_weight; + extra_size = finality_proof_info.extra_size; } // check if parachain state has been updated @@ -370,8 +387,15 @@ where // cost of this attack is nothing. Hence we use zero as tip here. let tip = Zero::zero(); + // decrease post-dispatch weight/size using extra weight/size that we know now + let post_info_len = len.saturating_sub(extra_size as usize); + let mut post_info = *post_info; + post_info.actual_weight = + Some(post_info.actual_weight.unwrap_or(info.weight).saturating_sub(extra_weight)); + // compute the relayer refund - let refund = Refund::compute_refund(info, post_info, len, tip); + let refund = Refund::compute_refund(info, &post_info, post_info_len, tip); + // finally - register refund in relayers pallet RelayersPallet::::register_relayer_reward(Msgs::Id::get(), &relayer, refund); @@ -397,9 +421,9 @@ mod tests { use bp_parachains::{BestParaHeadHash, ParaInfo}; use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; use bp_runtime::HeaderId; - use bp_test_utils::make_default_justification; + use bp_test_utils::{make_default_justification, test_keyring}; use frame_support::{assert_storage_noop, parameter_types, weights::Weight}; - use pallet_bridge_grandpa::Call as GrandpaCall; + use pallet_bridge_grandpa::{Call as GrandpaCall, StoredAuthoritySet}; use pallet_bridge_messages::Call as MessagesCall; use pallet_bridge_parachains::{Call as ParachainsCall, RelayBlockHash}; use sp_runtime::{ @@ -434,7 +458,11 @@ mod tests { parachain_head_hash: ParaHash, best_delivered_message: MessageNonce, ) { + let authorities = test_keyring().into_iter().map(|(a, w)| (a.into(), w)).collect(); let best_relay_header = HeaderId(best_relay_header_number, RelayBlockHash::default()); + pallet_bridge_grandpa::CurrentAuthoritySet::::put( + StoredAuthoritySet::try_new(authorities, 0).unwrap(), + ); pallet_bridge_grandpa::BestFinalized::::put(best_relay_header); let para_id = ParaId(TestParachain::get()); @@ -524,7 +552,11 @@ mod tests { PreDispatchData { relayer: relayer_account_at_this_chain(), call_info: CallInfo::AllFinalityAndDelivery( - 200, + SubmitFinalityProofInfo { + block_number: 200, + extra_weight: Weight::zero(), + extra_size: 0, + }, SubmitParachainHeadsInfo { at_relay_block_number: 200, para_id: ParaId(TestParachain::get()), @@ -823,6 +855,54 @@ mod tests { }); } + #[test] + fn post_dispatch_refunds_relayer_in_all_finality_batch_with_extra_weight() { + run_test(|| { + initialize_environment(200, 200, [1u8; 32].into(), 200); + + let mut dispatch_info = dispatch_info(); + dispatch_info.weight = Weight::from_ref_time( + frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND * 2, + ); + + // without any size/weight refund: we expect regular reward + let pre_dispatch_data = all_finality_pre_dispatch_data(); + let regular_reward = expected_reward(); + run_post_dispatch(Some(pre_dispatch_data), Ok(())); + assert_eq!( + RelayersPallet::::relayer_reward( + relayer_account_at_this_chain(), + TestLaneId::get() + ), + Some(regular_reward), + ); + + // now repeat the same with size+weight refund: we expect smaller reward + let mut pre_dispatch_data = all_finality_pre_dispatch_data(); + match pre_dispatch_data.call_info { + CallInfo::AllFinalityAndDelivery(ref mut info, ..) => { + info.extra_weight.set_ref_time( + frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, + ); + info.extra_size = 32; + }, + _ => unreachable!(), + } + run_post_dispatch(Some(pre_dispatch_data), Ok(())); + let reward_after_two_calls = RelayersPallet::::relayer_reward( + relayer_account_at_this_chain(), + TestLaneId::get(), + ) + .unwrap(); + assert!( + reward_after_two_calls < 2 * regular_reward, + "{} must be < 2 * {}", + reward_after_two_calls, + 2 * regular_reward, + ); + }); + } + #[test] fn post_dispatch_refunds_relayer_in_all_finality_batch() { run_test(|| { diff --git a/bridges/modules/grandpa/src/call_ext.rs b/bridges/modules/grandpa/src/call_ext.rs index 42c276f5f6..b57aebb1ac 100644 --- a/bridges/modules/grandpa/src/call_ext.rs +++ b/bridges/modules/grandpa/src/call_ext.rs @@ -14,17 +14,46 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::{Config, Error, Pallet}; +use crate::{weights::WeightInfo, BridgedBlockNumber, BridgedHeader, Config, Error, Pallet}; +use bp_header_chain::{justification::GrandpaJustification, ChainWithGrandpa}; use bp_runtime::BlockNumberOf; -use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; +use codec::Encode; +use frame_support::{dispatch::CallableCallFor, traits::IsSubType, weights::Weight, RuntimeDebug}; use sp_runtime::{ - traits::Header, + traits::{Header, Zero}, transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, + SaturatedConversion, }; +/// Info about a `SubmitParachainHeads` call which tries to update a single parachain. +#[derive(Copy, Clone, PartialEq, RuntimeDebug)] +pub struct SubmitFinalityProofInfo { + /// Number of the finality target. + pub block_number: N, + /// Extra weight that we assume is included in the call. + /// + /// We have some assumptions about headers and justifications of the bridged chain. + /// We know that if our assumptions are correct, then the call must not have the + /// weight above some limit. The fee paid for weight above that limit, is never refunded. + pub extra_weight: Weight, + /// Extra size (in bytes) that we assume are included in the call. + /// + /// We have some assumptions about headers and justifications of the bridged chain. + /// We know that if our assumptions are correct, then the call must not have the + /// weight above some limit. The fee paid for bytes above that limit, is never refunded. + pub extra_size: u32, +} + +impl SubmitFinalityProofInfo { + /// Returns `true` if call size/weight is below our estimations for regular calls. + pub fn fits_limits(&self) -> bool { + self.extra_weight.is_zero() && self.extra_size.is_zero() + } +} + /// Helper struct that provides methods for working with the `SubmitFinalityProof` call. pub struct SubmitFinalityProofHelper, I: 'static> { - pub _phantom_data: sp_std::marker::PhantomData<(T, I)>, + _phantom_data: sp_std::marker::PhantomData<(T, I)>, } impl, I: 'static> SubmitFinalityProofHelper { @@ -69,12 +98,17 @@ impl, I: 'static> SubmitFinalityProofHelper { pub trait CallSubType, I: 'static>: IsSubType, T>> { - /// Extract the finality target from a `SubmitParachainHeads` call. - fn submit_finality_proof_info(&self) -> Option> { - if let Some(crate::Call::::submit_finality_proof { finality_target, .. }) = + /// Extract finality proof info from a runtime call. + fn submit_finality_proof_info( + &self, + ) -> Option>> { + if let Some(crate::Call::::submit_finality_proof { finality_target, justification }) = self.is_sub_type() { - return Some(*finality_target.number()) + return Some(submit_finality_proof_info_from_args::( + finality_target, + justification, + )) } None @@ -92,7 +126,7 @@ pub trait CallSubType, I: 'static>: _ => return Ok(ValidTransaction::default()), }; - match SubmitFinalityProofHelper::::check_obsolete(finality_target) { + match SubmitFinalityProofHelper::::check_obsolete(finality_target.block_number) { Ok(_) => Ok(ValidTransaction::default()), Err(Error::::OldHeader) => InvalidTransaction::Stale.into(), Err(_) => InvalidTransaction::Call.into(), @@ -105,15 +139,66 @@ impl, I: 'static> CallSubType for T::RuntimeCall where { } +/// Extract finality proof info from the submitted header and justification. +pub(crate) fn submit_finality_proof_info_from_args, I: 'static>( + finality_target: &BridgedHeader, + justification: &GrandpaJustification>, +) -> SubmitFinalityProofInfo> { + let block_number = *finality_target.number(); + + // the `submit_finality_proof` call will reject justifications with invalid, duplicate, + // unknown and extra signatures. It'll also reject justifications with less than necessary + // signatures. So we do not care about extra weight because of additional signatures here. + let precommits_len = justification.commit.precommits.len().saturated_into(); + let required_precommits = precommits_len; + + // We do care about extra weight because of more-than-expected headers in the votes + // ancestries. But we have problems computing extra weight for additional headers (weight of + // additional header is too small, so that our benchmarks aren't detecting that). So if there + // are more than expected headers in votes ancestries, we will treat the whole call weight + // as an extra weight. + let votes_ancestries_len = justification.votes_ancestries.len().saturated_into(); + let extra_weight = + if votes_ancestries_len > T::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY { + T::WeightInfo::submit_finality_proof(precommits_len, votes_ancestries_len) + } else { + Weight::zero() + }; + + // we can estimate extra call size easily, without any additional significant overhead + let actual_call_size: u32 = finality_target + .encoded_size() + .saturating_add(justification.encoded_size()) + .saturated_into(); + let max_expected_call_size = max_expected_call_size::(required_precommits); + let extra_size = actual_call_size.saturating_sub(max_expected_call_size); + + SubmitFinalityProofInfo { block_number, extra_weight, extra_size } +} + +/// Returns maximal expected size of `submit_finality_proof` call arguments. +fn max_expected_call_size, I: 'static>(required_precommits: u32) -> u32 { + let max_expected_justification_size = + GrandpaJustification::max_reasonable_size::(required_precommits); + + // call arguments are header and justification + T::BridgedChain::MAX_HEADER_SIZE.saturating_add(max_expected_justification_size) +} + #[cfg(test)] mod tests { use crate::{ call_ext::CallSubType, - mock::{run_test, test_header, RuntimeCall, TestNumber, TestRuntime}, - BestFinalized, + mock::{run_test, test_header, RuntimeCall, TestBridgedChain, TestNumber, TestRuntime}, + BestFinalized, Config, WeightInfo, }; + use bp_header_chain::ChainWithGrandpa; use bp_runtime::HeaderId; - use bp_test_utils::make_default_justification; + use bp_test_utils::{ + make_default_justification, make_justification_for_header, JustificationGeneratorParams, + }; + use frame_support::weights::Weight; + use sp_runtime::{testing::DigestItem, traits::Header as _, SaturatedConversion}; fn validate_block_submit(num: TestNumber) -> bool { let bridge_grandpa_call = crate::Call::::submit_finality_proof { @@ -160,4 +245,67 @@ mod tests { assert!(validate_block_submit(15)); }); } + + #[test] + fn extension_returns_correct_extra_size_if_call_arguments_are_too_large() { + // when call arguments are below our limit => no refund + let small_finality_target = test_header(1); + let justification_params = JustificationGeneratorParams { + header: small_finality_target.clone(), + ..Default::default() + }; + let small_justification = make_justification_for_header(justification_params); + let small_call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof { + finality_target: Box::new(small_finality_target), + justification: small_justification, + }); + assert_eq!(small_call.submit_finality_proof_info().unwrap().extra_size, 0); + + // when call arguments are too large => partial refund + let mut large_finality_target = test_header(1); + large_finality_target + .digest_mut() + .push(DigestItem::Other(vec![42u8; 1024 * 1024])); + let justification_params = JustificationGeneratorParams { + header: large_finality_target.clone(), + ..Default::default() + }; + let large_justification = make_justification_for_header(justification_params); + let large_call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof { + finality_target: Box::new(large_finality_target), + justification: large_justification, + }); + assert_ne!(large_call.submit_finality_proof_info().unwrap().extra_size, 0); + } + + #[test] + fn extension_returns_correct_extra_weight_if_there_are_too_many_headers_in_votes_ancestry() { + let finality_target = test_header(1); + let mut justification_params = JustificationGeneratorParams { + header: finality_target.clone(), + ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY, + ..Default::default() + }; + + // when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY` headers => no refund + let justification = make_justification_for_header(justification_params.clone()); + let call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof { + finality_target: Box::new(finality_target.clone()), + justification, + }); + assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, Weight::zero()); + + // when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1` headers => full refund + justification_params.ancestors += 1; + let justification = make_justification_for_header(justification_params); + let call_weight = ::WeightInfo::submit_finality_proof( + justification.commit.precommits.len().saturated_into(), + justification.votes_ancestries.len().saturated_into(), + ); + let call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof { + finality_target: Box::new(finality_target), + justification, + }); + assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, call_weight); + } } diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index b11da53f7b..e94d91a5c1 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -36,7 +36,7 @@ // Runtime-generated enums #![allow(clippy::large_enum_variant)] -use storage_types::StoredAuthoritySet; +pub use storage_types::StoredAuthoritySet; use bp_header_chain::{ justification::GrandpaJustification, ChainWithGrandpa, HeaderChain, InitializationData, @@ -180,6 +180,9 @@ pub mod pallet { let is_authorities_change_enacted = try_enact_authority_change::(&finality_target, set_id)?; + let may_refund_call_fee = is_authorities_change_enacted && + submit_finality_proof_info_from_args::(&*finality_target, &justification) + .fits_limits(); >::mutate(|count| *count += 1); insert_header::(*finality_target, hash); log::info!( @@ -193,8 +196,10 @@ pub mod pallet { // // We don't want to charge extra costs for mandatory operations. So relayer is not // paying fee for mandatory headers import transactions. - let is_mandatory_header = is_authorities_change_enacted; - let pays_fee = if is_mandatory_header { Pays::No } else { Pays::Yes }; + // + // If size/weight of the call is exceeds our estimated limits, the relayer still needs + // to pay for the transaction. + let pays_fee = if may_refund_call_fee { Pays::No } else { Pays::Yes }; // the proof size component of the call weight assumes that there are // `MaxBridgedAuthorities` in the `CurrentAuthoritySet` (we use `MaxEncodedLen` @@ -313,7 +318,7 @@ pub mod pallet { /// The current GRANDPA Authority set. #[pallet::storage] - pub(super) type CurrentAuthoritySet, I: 'static = ()> = + pub type CurrentAuthoritySet, I: 'static = ()> = StorageValue<_, StoredAuthoritySet, ValueQuery>; /// Optional pallet owner. @@ -504,7 +509,7 @@ pub mod pallet { init_params; let authority_set_length = authority_list.len(); let authority_set = StoredAuthoritySet::::try_new(authority_list, set_id) - .map_err(|_| { + .map_err(|e| { log::error!( target: LOG_TARGET, "Failed to initialize bridge. Number of authorities in the set {} is larger than the configured value {}", @@ -512,7 +517,7 @@ pub mod pallet { T::BridgedChain::MAX_AUTHORITIES_COUNT, ); - Error::TooManyAuthoritiesInSet + e })?; let initial_hash = header.hash(); @@ -630,8 +635,8 @@ pub fn initialize_for_benchmarks, I: 'static>(header: BridgedHeader mod tests { use super::*; use crate::mock::{ - run_test, test_header, RuntimeOrigin, TestHeader, TestNumber, TestRuntime, - MAX_BRIDGED_AUTHORITIES, + run_test, test_header, RuntimeOrigin, TestBridgedChain, TestHeader, TestNumber, + TestRuntime, MAX_BRIDGED_AUTHORITIES, }; use bp_header_chain::BridgeGrandpaCall; use bp_runtime::BasicOperatingMode; @@ -965,6 +970,64 @@ mod tests { }) } + #[test] + fn relayer_pays_tx_fee_when_submitting_huge_mandatory_header() { + run_test(|| { + initialize_substrate_bridge(); + + // let's prepare a huge authorities change header, which is definitely above size limits + let mut header = test_header(2); + header.digest = change_log(0); + header.digest.push(DigestItem::Other(vec![42u8; 1024 * 1024])); + let justification = make_default_justification(&header); + + // without large digest item ^^^ the relayer would have paid zero transaction fee + // (`Pays::No`) + let result = Pallet::::submit_finality_proof( + RuntimeOrigin::signed(1), + Box::new(header.clone()), + justification, + ); + assert_ok!(result); + assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); + + // Make sure that our header is the best finalized + assert_eq!(>::get().unwrap().1, header.hash()); + assert!(>::contains_key(header.hash())); + }) + } + + #[test] + fn relayer_pays_tx_fee_when_submitting_justification_with_long_ancestry_votes() { + run_test(|| { + initialize_substrate_bridge(); + + // let's prepare a huge authorities change header, which is definitely above weight + // limits + let mut header = test_header(2); + header.digest = change_log(0); + let justification = make_justification_for_header(JustificationGeneratorParams { + header: header.clone(), + ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1, + ..Default::default() + }); + + // without many headers in votes ancestries ^^^ the relayer would have paid zero + // transaction fee (`Pays::No`) + let result = Pallet::::submit_finality_proof( + RuntimeOrigin::signed(1), + Box::new(header.clone()), + justification, + ); + assert_ok!(result); + assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); + + // Make sure that our header is the best finalized + assert_eq!(>::get().unwrap().1, header.hash()); + assert!(>::contains_key(header.hash())); + }) + } + #[test] fn importing_header_rejects_header_with_scheduled_change_delay() { run_test(|| { diff --git a/bridges/modules/grandpa/src/storage_types.rs b/bridges/modules/grandpa/src/storage_types.rs index 918c131289..d674f1a7b0 100644 --- a/bridges/modules/grandpa/src/storage_types.rs +++ b/bridges/modules/grandpa/src/storage_types.rs @@ -16,7 +16,7 @@ //! Wrappers for public types that are implementing `MaxEncodedLen` -use crate::Config; +use crate::{Config, Error}; use bp_header_chain::{AuthoritySet, ChainWithGrandpa}; use codec::{Decode, Encode, MaxEncodedLen}; @@ -52,8 +52,12 @@ impl, I: 'static> StoredAuthoritySet { /// Try to create a new bounded GRANDPA Authority Set from unbounded list. /// /// Returns error if number of authorities in the provided list is too large. - pub fn try_new(authorities: AuthorityList, set_id: SetId) -> Result { - Ok(Self { authorities: TryFrom::try_from(authorities).map_err(drop)?, set_id }) + pub fn try_new(authorities: AuthorityList, set_id: SetId) -> Result> { + Ok(Self { + authorities: TryFrom::try_from(authorities) + .map_err(|_| Error::TooManyAuthoritiesInSet)?, + set_id, + }) } /// Returns number of bytes that may be subtracted from the PoV component of diff --git a/bridges/modules/parachains/src/call_ext.rs b/bridges/modules/parachains/src/call_ext.rs index 5aed9e80ba..ee3770146c 100644 --- a/bridges/modules/parachains/src/call_ext.rs +++ b/bridges/modules/parachains/src/call_ext.rs @@ -23,14 +23,17 @@ use sp_runtime::transaction_validity::{InvalidTransaction, TransactionValidity, /// Info about a `SubmitParachainHeads` call which tries to update a single parachain. #[derive(PartialEq, RuntimeDebug)] pub struct SubmitParachainHeadsInfo { + /// Number of the finalized relay block that has been used to prove parachain finality. pub at_relay_block_number: RelayBlockNumber, + /// Parachain identifier. pub para_id: ParaId, + /// Hash of the bundled parachain head. pub para_head_hash: ParaHash, } /// Helper struct that provides methods for working with the `SubmitParachainHeads` call. pub struct SubmitParachainHeadsHelper, I: 'static> { - pub _phantom_data: sp_std::marker::PhantomData<(T, I)>, + _phantom_data: sp_std::marker::PhantomData<(T, I)>, } impl, I: 'static> SubmitParachainHeadsHelper { diff --git a/bridges/modules/parachains/src/mock.rs b/bridges/modules/parachains/src/mock.rs index 0e8261f689..aabcdcdd36 100644 --- a/bridges/modules/parachains/src/mock.rs +++ b/bridges/modules/parachains/src/mock.rs @@ -187,7 +187,7 @@ impl frame_system::Config for TestRuntime { type BlockLength = (); type SS58Prefix = (); type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; + type MaxConsumers = ConstU32<16>; } parameter_types! { @@ -223,7 +223,7 @@ impl pallet_bridge_parachains::Config for TestRuntime { type ParasPalletName = ParasPalletName; type ParaStoredHeaderDataBuilder = (Parachain1, Parachain2, Parachain3, BigParachain); type HeadsToKeep = HeadsToKeep; - type MaxParaHeadDataSize = frame_support::traits::ConstU32; + type MaxParaHeadDataSize = ConstU32; } #[derive(Debug)] diff --git a/bridges/primitives/header-chain/src/justification.rs b/bridges/primitives/header-chain/src/justification.rs index 43adc80125..1e34f36562 100644 --- a/bridges/primitives/header-chain/src/justification.rs +++ b/bridges/primitives/header-chain/src/justification.rs @@ -19,12 +19,15 @@ //! Adapted copy of substrate/client/finality-grandpa/src/justification.rs. If origin //! will ever be moved to the sp_finality_grandpa, we should reuse that implementation. -use codec::{Decode, Encode}; +use crate::ChainWithGrandpa; + +use bp_runtime::{BlockNumberOf, Chain, HashOf}; +use codec::{Decode, Encode, MaxEncodedLen}; use finality_grandpa::voter_set::VoterSet; use frame_support::RuntimeDebug; use scale_info::TypeInfo; use sp_finality_grandpa::{AuthorityId, AuthoritySignature, SetId}; -use sp_runtime::traits::Header as HeaderT; +use sp_runtime::{traits::Header as HeaderT, SaturatedConversion}; use sp_std::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, prelude::*, @@ -46,6 +49,43 @@ pub struct GrandpaJustification { pub votes_ancestries: Vec
, } +impl GrandpaJustification { + /// Returns reasonable size of justification using constants from the provided chain. + /// + /// An imprecise analogue of `MaxEncodedLen` implementation. We don't use it for + /// any precise calculations - that's just an estimation. + pub fn max_reasonable_size(required_precommits: u32) -> u32 + where + C: Chain
+ ChainWithGrandpa, + { + // we don't need precise results here - just estimations, so some details + // are removed from computations (e.g. bytes required to encode vector length) + + // structures in `finality_grandpa` crate are not implementing `MaxEncodedLength`, so + // here's our estimation for the `finality_grandpa::Commit` struct size + // + // precommit is: hash + number + // signed precommit is: precommit + signature (64b) + authority id + // commit is: hash + number + vec of signed precommits + let signed_precommit_size: u32 = BlockNumberOf::::max_encoded_len() + .saturating_add(HashOf::::max_encoded_len().saturated_into()) + .saturating_add(64) + .saturating_add(AuthorityId::max_encoded_len().saturated_into()) + .saturated_into(); + let max_expected_signed_commit_size = signed_precommit_size + .saturating_mul(required_precommits) + .saturating_add(BlockNumberOf::::max_encoded_len().saturated_into()) + .saturating_add(HashOf::::max_encoded_len().saturated_into()); + + // justification is a signed GRANDPA commit, `votes_ancestries` vector and round number + let max_expected_votes_ancestries_size = C::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + .saturating_mul(C::AVERAGE_HEADER_SIZE_IN_JUSTIFICATION); + + 8u32.saturating_add(max_expected_signed_commit_size) + .saturating_add(max_expected_votes_ancestries_size) + } +} + impl crate::FinalityProof for GrandpaJustification { fn target_header_number(&self) -> H::Number { self.commit.target_number @@ -59,6 +99,12 @@ pub enum Error { JustificationDecode, /// Justification is finalizing unexpected header. InvalidJustificationTarget, + /// Justification contains redundant votes. + RedundantVotesInJustification, + /// Justification contains unknown authority precommit. + UnknownAuthorityVote, + /// Justification contains duplicate authority precommit. + DuplicateAuthorityVote, /// The authority has provided an invalid signature. InvalidAuthoritySignature, /// The justification contains precommit for header that is not a descendant of the commit @@ -124,7 +170,7 @@ where authorities_set_id, authorities_set, justification, - &mut (), + &mut StrictVerificationCallbacks, ) } @@ -138,6 +184,23 @@ trait VerificationCallbacks { fn on_redundant_authority_vote(&mut self, precommit_idx: usize) -> Result<(), Error>; } +/// Verification callbacks that reject all unknown, duplicate or redundant votes. +struct StrictVerificationCallbacks; + +impl VerificationCallbacks for StrictVerificationCallbacks { + fn on_unkown_authority(&mut self, _precommit_idx: usize) -> Result<(), Error> { + Err(Error::UnknownAuthorityVote) + } + + fn on_duplicate_authority_vote(&mut self, _precommit_idx: usize) -> Result<(), Error> { + Err(Error::DuplicateAuthorityVote) + } + + fn on_redundant_authority_vote(&mut self, _precommit_idx: usize) -> Result<(), Error> { + Err(Error::RedundantVotesInJustification) + } +} + /// Verification callbacks for justification optimization. struct OptimizationCallbacks(Vec); @@ -170,21 +233,6 @@ impl VerificationCallbacks for OptimizationCallbacks { } } -// this implementation will be removed in /~https://github.com/paritytech/parity-bridges-common/pull/1882 -impl VerificationCallbacks for () { - fn on_unkown_authority(&mut self, _precommit_idx: usize) -> Result<(), Error> { - Ok(()) - } - - fn on_duplicate_authority_vote(&mut self, _precommit_idx: usize) -> Result<(), Error> { - Ok(()) - } - - fn on_redundant_authority_vote(&mut self, _precommit_idx: usize) -> Result<(), Error> { - Ok(()) - } -} - /// Verify that justification, that is generated by given authority set, finalizes given header. fn verify_justification_with_callbacks( finalized_target: (Header::Hash, Header::Number), @@ -206,11 +254,13 @@ where let mut signature_buffer = Vec::new(); let mut votes = BTreeSet::new(); let mut cumulative_weight = 0u64; + for (precommit_idx, signed) in justification.commit.precommits.iter().enumerate() { // if we have collected enough precommits, we probabably want to fail/remove extra // precommits - if cumulative_weight > threshold { + if cumulative_weight >= threshold { callbacks.on_redundant_authority_vote(precommit_idx)?; + continue } // authority must be in the set diff --git a/bridges/primitives/header-chain/tests/implementation_match.rs b/bridges/primitives/header-chain/tests/implementation_match.rs index aaa19d4b91..5d0fa35c37 100644 --- a/bridges/primitives/header-chain/tests/implementation_match.rs +++ b/bridges/primitives/header-chain/tests/implementation_match.rs @@ -15,7 +15,8 @@ // along with Parity Bridges Common. If not, see . //! Tests inside this module are made to ensure that our custom justification verification -//! implementation works exactly as `fn finality_grandpa::validate_commit`. +//! implementation works similar to the [`finality_grandpa::validate_commit`] and explicitly +//! show where we behave different. //! //! Some of tests in this module may partially duplicate tests from `justification.rs`, //! but their purpose is different. @@ -23,7 +24,7 @@ use bp_header_chain::justification::{verify_justification, Error, GrandpaJustification}; use bp_test_utils::{ header_id, make_justification_for_header, signed_precommit, test_header, Account, - JustificationGeneratorParams, ALICE, BOB, CHARLIE, DAVE, EVE, TEST_GRANDPA_SET_ID, + JustificationGeneratorParams, ALICE, BOB, CHARLIE, DAVE, EVE, FERDIE, TEST_GRANDPA_SET_ID, }; use finality_grandpa::voter_set::VoterSet; use sp_finality_grandpa::{AuthorityId, AuthorityWeight}; @@ -172,7 +173,42 @@ fn same_result_when_precommit_target_is_not_descendant_of_commit_target() { } #[test] -fn same_result_when_justification_contains_duplicate_vote() { +fn same_result_when_there_are_not_enough_cumulative_weight_to_finalize_commit_target() { + // just remove one authority from the minimal set and we shall not reach the threshold + let mut authorities_set = minimal_accounts_set(); + authorities_set.pop(); + let justification = make_justification_for_header(JustificationGeneratorParams { + header: test_header(1), + authorities: authorities_set, + ..Default::default() + }); + + // our implementation returns an error + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &full_voter_set(), + &justification, + ), + Err(Error::TooLowCumulativeWeight), + ); + // original implementation returns `Ok(validation_result)` + // with `validation_result.is_valid() == false`. + let result = finality_grandpa::validate_commit( + &justification.commit, + &full_voter_set(), + &AncestryChain::new(&justification.votes_ancestries), + ) + .unwrap(); + + assert!(!result.is_valid()); +} + +// tests below are our differences with the original implementation + +#[test] +fn different_result_when_justification_contains_duplicate_vote() { let mut justification = make_justification_for_header(JustificationGeneratorParams { header: test_header(1), authorities: minimal_accounts_set(), @@ -181,10 +217,11 @@ fn same_result_when_justification_contains_duplicate_vote() { }); // the justification may contain exactly the same vote (i.e. same precommit and same signature) // multiple times && it isn't treated as an error by original implementation + let last_precommit = justification.commit.precommits.pop().unwrap(); justification.commit.precommits.push(justification.commit.precommits[0].clone()); - justification.commit.precommits.push(justification.commit.precommits[0].clone()); + justification.commit.precommits.push(last_precommit); - // our implementation succeeds + // our implementation fails assert_eq!( verify_justification::( header_id::(1), @@ -192,7 +229,7 @@ fn same_result_when_justification_contains_duplicate_vote() { &full_voter_set(), &justification, ), - Ok(()), + Err(Error::DuplicateAuthorityVote), ); // original implementation returns `Ok(validation_result)` // with `validation_result.is_valid() == true`. @@ -207,7 +244,7 @@ fn same_result_when_justification_contains_duplicate_vote() { } #[test] -fn same_result_when_authority_equivocates_once_in_a_round() { +fn different_results_when_authority_equivocates_once_in_a_round() { let mut justification = make_justification_for_header(JustificationGeneratorParams { header: test_header(1), authorities: minimal_accounts_set(), @@ -216,14 +253,16 @@ fn same_result_when_authority_equivocates_once_in_a_round() { }); // the justification original implementation allows authority to submit two different // votes in a single round, of which only first is 'accepted' + let last_precommit = justification.commit.precommits.pop().unwrap(); justification.commit.precommits.push(signed_precommit::( &ALICE, header_id::(1), justification.round, TEST_GRANDPA_SET_ID, )); + justification.commit.precommits.push(last_precommit); - // our implementation succeeds + // our implementation fails assert_eq!( verify_justification::( header_id::(1), @@ -231,7 +270,7 @@ fn same_result_when_authority_equivocates_once_in_a_round() { &full_voter_set(), &justification, ), - Ok(()), + Err(Error::DuplicateAuthorityVote), ); // original implementation returns `Ok(validation_result)` // with `validation_result.is_valid() == true`. @@ -246,7 +285,7 @@ fn same_result_when_authority_equivocates_once_in_a_round() { } #[test] -fn same_result_when_authority_equivocates_twice_in_a_round() { +fn different_results_when_authority_equivocates_twice_in_a_round() { let mut justification = make_justification_for_header(JustificationGeneratorParams { header: test_header(1), authorities: minimal_accounts_set(), @@ -259,6 +298,8 @@ fn same_result_when_authority_equivocates_twice_in_a_round() { // but there's also a code that prevents this from happening: // /~https://github.com/paritytech/finality-grandpa/blob/6aeea2d1159d0f418f0b86e70739f2130629ca09/src/round.rs#L287 // => so now we are also just ignoring all votes from the same authority, except the first one + let last_precommit = justification.commit.precommits.pop().unwrap(); + let prev_last_precommit = justification.commit.precommits.pop().unwrap(); justification.commit.precommits.push(signed_precommit::( &ALICE, header_id::(1), @@ -271,8 +312,10 @@ fn same_result_when_authority_equivocates_twice_in_a_round() { justification.round, TEST_GRANDPA_SET_ID, )); + justification.commit.precommits.push(last_precommit); + justification.commit.precommits.push(prev_last_precommit); - // our implementation succeeds + // our implementation fails assert_eq!( verify_justification::( header_id::(1), @@ -280,7 +323,7 @@ fn same_result_when_authority_equivocates_twice_in_a_round() { &full_voter_set(), &justification, ), - Ok(()), + Err(Error::DuplicateAuthorityVote), ); // original implementation returns `Ok(validation_result)` // with `validation_result.is_valid() == true`. @@ -295,17 +338,23 @@ fn same_result_when_authority_equivocates_twice_in_a_round() { } #[test] -fn same_result_when_there_are_not_enough_cumulative_weight_to_finalize_commit_target() { - // just remove one authority from the minimal set and we shall not reach the threshold - let mut authorities_set = minimal_accounts_set(); - authorities_set.pop(); - let justification = make_justification_for_header(JustificationGeneratorParams { +fn different_results_when_there_are_more_than_enough_votes() { + let mut justification = make_justification_for_header(JustificationGeneratorParams { header: test_header(1), - authorities: authorities_set, + authorities: minimal_accounts_set(), + ancestors: 0, ..Default::default() }); + // the reference implementation just keep verifying signatures even if we have + // collected enough votes. We are not + justification.commit.precommits.push(signed_precommit::( + &EVE, + header_id::(1), + justification.round, + TEST_GRANDPA_SET_ID, + )); - // our implementation returns an error + // our implementation fails assert_eq!( verify_justification::( header_id::(1), @@ -313,10 +362,10 @@ fn same_result_when_there_are_not_enough_cumulative_weight_to_finalize_commit_ta &full_voter_set(), &justification, ), - Err(Error::TooLowCumulativeWeight), + Err(Error::RedundantVotesInJustification), ); // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == false`. + // with `validation_result.is_valid() == true`. let result = finality_grandpa::validate_commit( &justification.commit, &full_voter_set(), @@ -324,5 +373,46 @@ fn same_result_when_there_are_not_enough_cumulative_weight_to_finalize_commit_ta ) .unwrap(); - assert!(!result.is_valid()); + assert!(result.is_valid()); +} + +#[test] +fn different_results_when_there_is_a_vote_of_unknown_authority() { + let mut justification = make_justification_for_header(JustificationGeneratorParams { + header: test_header(1), + authorities: minimal_accounts_set(), + ancestors: 0, + ..Default::default() + }); + // the reference implementation just keep verifying signatures even if we have + // collected enough votes. We are not + let last_precommit = justification.commit.precommits.pop().unwrap(); + justification.commit.precommits.push(signed_precommit::( + &FERDIE, + header_id::(1), + justification.round, + TEST_GRANDPA_SET_ID, + )); + justification.commit.precommits.push(last_precommit); + + // our implementation fails + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &full_voter_set(), + &justification, + ), + Err(Error::UnknownAuthorityVote), + ); + // original implementation returns `Ok(validation_result)` + // with `validation_result.is_valid() == true`. + let result = finality_grandpa::validate_commit( + &justification.commit, + &full_voter_set(), + &AncestryChain::new(&justification.votes_ancestries), + ) + .unwrap(); + + assert!(result.is_valid()); } diff --git a/bridges/primitives/header-chain/tests/justification.rs b/bridges/primitives/header-chain/tests/justification.rs index e18163313c..f01f4ea2f1 100644 --- a/bridges/primitives/header-chain/tests/justification.rs +++ b/bridges/primitives/header-chain/tests/justification.rs @@ -16,14 +16,16 @@ //! Tests for Grandpa Justification code. -use bp_header_chain::justification::{optimize_justification, verify_justification, Error}; +use bp_header_chain::justification::{ + optimize_justification, required_justification_precommits, verify_justification, Error, +}; use bp_test_utils::*; type TestHeader = sp_runtime::testing::Header; #[test] fn valid_justification_accepted() { - let authorities = vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1)]; + let authorities = vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1)]; let params = JustificationGeneratorParams { header: test_header(1), round: TEST_GRANDPA_ROUND, @@ -54,7 +56,7 @@ fn valid_justification_accepted_with_single_fork() { header: test_header(1), round: TEST_GRANDPA_ROUND, set_id: TEST_GRANDPA_SET_ID, - authorities: vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)], + authorities: vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1)], ancestors: 5, forks: 1, }; @@ -76,15 +78,16 @@ fn valid_justification_accepted_with_arbitrary_number_of_authorities() { use sp_finality_grandpa::AuthorityId; let n = 15; + let required_signatures = required_justification_precommits(n as _); let authorities = accounts(n).iter().map(|k| (*k, 1)).collect::>(); let params = JustificationGeneratorParams { header: test_header(1), round: TEST_GRANDPA_ROUND, set_id: TEST_GRANDPA_SET_ID, - authorities: authorities.clone(), + authorities: authorities.clone().into_iter().take(required_signatures as _).collect(), ancestors: n.into(), - forks: n.into(), + forks: required_signatures, }; let authorities = authorities diff --git a/bridges/primitives/polkadot-core/src/lib.rs b/bridges/primitives/polkadot-core/src/lib.rs index 3b1712042d..10da6cd9b3 100644 --- a/bridges/primitives/polkadot-core/src/lib.rs +++ b/bridges/primitives/polkadot-core/src/lib.rs @@ -50,30 +50,37 @@ pub mod parachains; /// our bridge hub parachains huge. So let's stick to the real-world value here. /// /// Right now both Kusama and Polkadot aim to have around 1000 validators. Let's be safe here and -/// take twice as much here. -pub const MAX_AUTHORITIES_COUNT: u32 = 2_048; +/// take a bit more here. +pub const MAX_AUTHORITIES_COUNT: u32 = 1_256; /// Reasonable number of headers in the `votes_ancestries` on Polkadot-like chains. /// /// See [`bp_header_chain::ChainWithGrandpa`] for more details. -pub const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; +/// +/// This value comes from recent (February, 2023) Kusama and Polkadot headers. There are no +/// justifications with any additional headers in votes ancestry, so reasonable headers may +/// be set to zero. But we assume that there may be small GRANDPA lags, so we're leaving some +/// reserve here. +pub const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2; /// Approximate average header size in `votes_ancestries` field of justification on Polkadot-like /// chains. /// /// See [`bp_header_chain::ChainWithGrandpa`] for more details. -pub const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = 256; +/// +/// This value comes from recent (February, 2023) Kusama headers. Average is `336` there, but some +/// non-mandatory headers has size `40kb` (they contain the BABE epoch descriptor with all +/// authorities - just like our mandatory header). Since we assume `2` headers in justification +/// votes ancestry, let's set average header to `40kb / 2`. +pub const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = 20 * 1024; /// Approximate maximal header size on Polkadot-like chains. /// -/// We expect maximal header to have digest item with the new authorities set for every consensus -/// engine (GRANDPA, Babe, BEEFY, ...) - so we multiply it by 3. And also -/// `AVERAGE_HEADER_SIZE_IN_JUSTIFICATION` bytes for other stuff. -/// /// See [`bp_header_chain::ChainWithGrandpa`] for more details. -pub const MAX_HEADER_SIZE: u32 = MAX_AUTHORITIES_COUNT - .saturating_mul(3) - .saturating_add(AVERAGE_HEADER_SIZE_IN_JUSTIFICATION); +/// +/// This value comes from recent (February, 2023) Kusama headers. Maximal header is a mandatory +/// header. In its SCALE-encoded form it is `80348` bytes. Let's have some reserve here. +pub const MAX_HEADER_SIZE: u32 = 90_000; /// Number of extra bytes (excluding size of storage value itself) of storage proof, built at /// Polkadot-like chain. This mostly depends on number of entries in the storage trie.