From a5441fa3dc42044055b17beb90be088342e38231 Mon Sep 17 00:00:00 2001 From: jasl Date: Thu, 27 Apr 2023 17:10:41 +0800 Subject: [PATCH] backport /~https://github.com/paritytech/polkadot/pull/7013 --- Cargo.lock | 1 - Cargo.toml | 2 + .../node/core/dispute-coordinator/Cargo.toml | 38 + .../core/dispute-coordinator/src/backend.rs | 171 + .../core/dispute-coordinator/src/db/mod.rs | 19 + .../core/dispute-coordinator/src/db/v1.rs | 682 ++++ .../core/dispute-coordinator/src/error.rs | 132 + .../core/dispute-coordinator/src/import.rs | 549 +++ .../dispute-coordinator/src/initialized.rs | 1311 +++++++ .../node/core/dispute-coordinator/src/lib.rs | 566 +++ .../core/dispute-coordinator/src/metrics.rs | 237 ++ .../src/participation/mod.rs | 425 ++ .../src/participation/queues/mod.rs | 415 ++ .../src/participation/queues/tests.rs | 202 + .../src/participation/tests.rs | 547 +++ .../src/scraping/candidates.rs | 167 + .../dispute-coordinator/src/scraping/mod.rs | 457 +++ .../dispute-coordinator/src/scraping/tests.rs | 651 ++++ .../dispute-coordinator/src/spam_slots.rs | 135 + .../core/dispute-coordinator/src/status.rs | 58 + .../core/dispute-coordinator/src/tests.rs | 3432 +++++++++++++++++ 21 files changed, 10196 insertions(+), 1 deletion(-) create mode 100644 polkadot/node/core/dispute-coordinator/Cargo.toml create mode 100644 polkadot/node/core/dispute-coordinator/src/backend.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/db/mod.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/db/v1.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/error.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/import.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/initialized.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/lib.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/metrics.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/participation/mod.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/participation/queues/tests.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/participation/tests.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/scraping/candidates.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/scraping/mod.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/scraping/tests.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/spam_slots.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/status.rs create mode 100644 polkadot/node/core/dispute-coordinator/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index b5a99af4..cc222d21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8062,7 +8062,6 @@ dependencies = [ [[package]] name = "polkadot-node-core-dispute-coordinator" version = "0.9.41" -source = "git+/~https://github.com/paritytech/polkadot?branch=release-v0.9.41#e203bfb396ed949f102720debf32fb98166787af" dependencies = [ "fatality", "futures", diff --git a/Cargo.toml b/Cargo.toml index 7f0856e3..a76f7022 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,6 +56,8 @@ members = [ [patch."/~https://github.com/paritytech/polkadot"] polkadot-service = { path = "polkadot/node/service" } +# TODO: Remove after upgrade to Polkdaot v0.9.42 +polkadot-node-core-dispute-coordinator = { path = "polkadot/node/core/dispute-coordinator" } # TODO: Remove after upgrade to Polkdaot v0.9.42 [patch."/~https://github.com/paritytech/cumulus"] diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml new file mode 100644 index 00000000..49c99bce --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "polkadot-node-core-dispute-coordinator" +version = "0.9.41" +authors = ["Parity Technologies "] +edition = "2021" + +[dependencies] +futures = "0.3.21" +gum = { package = "tracing-gum", git = "/~https://github.com/paritytech/polkadot", branch = "release-v0.9.41" } +parity-scale-codec = "3.3.0" +kvdb = "0.13.0" +thiserror = "1.0.31" +lru = "0.9.0" +fatality = "0.0.6" + +polkadot-primitives = { git = "/~https://github.com/paritytech/polkadot", branch = "release-v0.9.41" } +polkadot-node-primitives = { git = "/~https://github.com/paritytech/polkadot", branch = "release-v0.9.41" } +polkadot-node-subsystem = { git = "/~https://github.com/paritytech/polkadot", branch = "release-v0.9.41" } +polkadot-node-subsystem-util = { git = "/~https://github.com/paritytech/polkadot", branch = "release-v0.9.41" } + +sc-keystore = { git = "/~https://github.com/paritytech/substrate", branch = "polkadot-v0.9.41" } + + +[dev-dependencies] +kvdb-memorydb = "0.13.0" +polkadot-node-subsystem-test-helpers = { git = "/~https://github.com/paritytech/polkadot", branch = "release-v0.9.41" } +sp-keyring = { git = "/~https://github.com/paritytech/substrate", branch = "polkadot-v0.9.41" } +sp-core = { git = "/~https://github.com/paritytech/substrate", branch = "polkadot-v0.9.41" } +sp-keystore = { git = "/~https://github.com/paritytech/substrate", branch = "polkadot-v0.9.41" } +assert_matches = "1.4.0" +test-helpers = { package = "polkadot-primitives-test-helpers", git = "/~https://github.com/paritytech/polkadot", branch = "release-v0.9.41" } +futures-timer = "3.0.2" +sp-application-crypto = { git = "/~https://github.com/paritytech/substrate", branch = "polkadot-v0.9.41" } +sp-tracing = { git = "/~https://github.com/paritytech/substrate", branch = "polkadot-v0.9.41" } + +[features] +# If not enabled, the dispute coordinator will do nothing. +disputes = [] diff --git a/polkadot/node/core/dispute-coordinator/src/backend.rs b/polkadot/node/core/dispute-coordinator/src/backend.rs new file mode 100644 index 00000000..d49ace49 --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/backend.rs @@ -0,0 +1,171 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! An abstraction over storage used by the chain selection subsystem. +//! +//! This provides both a [`Backend`] trait and an [`OverlayedBackend`] +//! struct which allows in-memory changes to be applied on top of a +//! [`Backend`], maintaining consistency between queries and temporary writes, +//! before any commit to the underlying storage is made. + +use polkadot_primitives::{CandidateHash, SessionIndex}; + +use std::collections::HashMap; + +use super::db::v1::{CandidateVotes, RecentDisputes}; +use crate::error::FatalResult; + +#[derive(Debug)] +pub enum BackendWriteOp { + WriteEarliestSession(SessionIndex), + WriteRecentDisputes(RecentDisputes), + WriteCandidateVotes(SessionIndex, CandidateHash, CandidateVotes), + DeleteCandidateVotes(SessionIndex, CandidateHash), +} + +/// An abstraction over backend storage for the logic of this subsystem. +pub trait Backend { + /// Load the earliest session, if any. + fn load_earliest_session(&self) -> FatalResult>; + + /// Load the recent disputes, if any. + fn load_recent_disputes(&self) -> FatalResult>; + + /// Load the candidate votes for the specific session-candidate pair, if any. + fn load_candidate_votes( + &self, + session: SessionIndex, + candidate_hash: &CandidateHash, + ) -> FatalResult>; + + /// Atomically writes the list of operations, with later operations taking precedence over + /// prior. + fn write(&mut self, ops: I) -> FatalResult<()> + where + I: IntoIterator; +} + +/// An in-memory overlay for the backend. +/// +/// This maintains read-only access to the underlying backend, but can be converted into a set of +/// write operations which will, when written to the underlying backend, give the same view as the +/// state of the overlay. +pub struct OverlayedBackend<'a, B: 'a> { + inner: &'a B, + + // `None` means unchanged. + earliest_session: Option, + // `None` means unchanged. + recent_disputes: Option, + // `None` means deleted, missing means query inner. + candidate_votes: HashMap<(SessionIndex, CandidateHash), Option>, +} + +impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { + pub fn new(backend: &'a B) -> Self { + Self { + inner: backend, + earliest_session: None, + recent_disputes: None, + candidate_votes: HashMap::new(), + } + } + + /// Returns true if the are no write operations to perform. + pub fn is_empty(&self) -> bool { + self.earliest_session.is_none() && + self.recent_disputes.is_none() && + self.candidate_votes.is_empty() + } + + /// Load the earliest session, if any. + pub fn load_earliest_session(&self) -> FatalResult> { + if let Some(val) = self.earliest_session { + return Ok(Some(val)) + } + + self.inner.load_earliest_session() + } + + /// Load the recent disputes, if any. + pub fn load_recent_disputes(&self) -> FatalResult> { + if let Some(val) = &self.recent_disputes { + return Ok(Some(val.clone())) + } + + self.inner.load_recent_disputes() + } + + /// Load the candidate votes for the specific session-candidate pair, if any. + pub fn load_candidate_votes( + &self, + session: SessionIndex, + candidate_hash: &CandidateHash, + ) -> FatalResult> { + if let Some(val) = self.candidate_votes.get(&(session, *candidate_hash)) { + return Ok(val.clone()) + } + + self.inner.load_candidate_votes(session, candidate_hash) + } + + /// Prepare a write to the "earliest session" field of the DB. + /// + /// Later calls to this function will override earlier ones. + pub fn write_earliest_session(&mut self, session: SessionIndex) { + self.earliest_session = Some(session); + } + + /// Prepare a write to the recent disputes stored in the DB. + /// + /// Later calls to this function will override earlier ones. + pub fn write_recent_disputes(&mut self, recent_disputes: RecentDisputes) { + self.recent_disputes = Some(recent_disputes) + } + + /// Prepare a write of the candidate votes under the indicated candidate. + /// + /// Later calls to this function for the same candidate will override earlier ones. + pub fn write_candidate_votes( + &mut self, + session: SessionIndex, + candidate_hash: CandidateHash, + votes: CandidateVotes, + ) { + self.candidate_votes.insert((session, candidate_hash), Some(votes)); + } + + /// Transform this backend into a set of write-ops to be written to the inner backend. + pub fn into_write_ops(self) -> impl Iterator { + let earliest_session_ops = self + .earliest_session + .map(|s| BackendWriteOp::WriteEarliestSession(s)) + .into_iter(); + + let recent_dispute_ops = + self.recent_disputes.map(|d| BackendWriteOp::WriteRecentDisputes(d)).into_iter(); + + let candidate_vote_ops = + self.candidate_votes + .into_iter() + .map(|((session, candidate), votes)| match votes { + Some(votes) => BackendWriteOp::WriteCandidateVotes(session, candidate, votes), + None => BackendWriteOp::DeleteCandidateVotes(session, candidate), + }); + + earliest_session_ops.chain(recent_dispute_ops).chain(candidate_vote_ops) + } +} diff --git a/polkadot/node/core/dispute-coordinator/src/db/mod.rs b/polkadot/node/core/dispute-coordinator/src/db/mod.rs new file mode 100644 index 00000000..9b79bd5b --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/db/mod.rs @@ -0,0 +1,19 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Database component for the dispute coordinator. + +pub(super) mod v1; diff --git a/polkadot/node/core/dispute-coordinator/src/db/v1.rs b/polkadot/node/core/dispute-coordinator/src/db/v1.rs new file mode 100644 index 00000000..c0f3c992 --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/db/v1.rs @@ -0,0 +1,682 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! `V1` database for the dispute coordinator. + +use polkadot_node_primitives::DisputeStatus; +use polkadot_node_subsystem_util::database::{DBTransaction, Database}; +use polkadot_primitives::{ + CandidateHash, CandidateReceipt, Hash, InvalidDisputeStatementKind, SessionIndex, + ValidDisputeStatementKind, ValidatorIndex, ValidatorSignature, +}; + +use std::sync::Arc; + +use parity_scale_codec::{Decode, Encode}; + +use crate::{ + backend::{Backend, BackendWriteOp, OverlayedBackend}, + error::{FatalError, FatalResult}, + metrics::Metrics, + LOG_TARGET, +}; + +const RECENT_DISPUTES_KEY: &[u8; 15] = b"recent-disputes"; +const EARLIEST_SESSION_KEY: &[u8; 16] = b"earliest-session"; +const CANDIDATE_VOTES_SUBKEY: &[u8; 15] = b"candidate-votes"; +/// Until what session have votes been cleaned up already? +const CLEANED_VOTES_WATERMARK_KEY: &[u8; 23] = b"cleaned-votes-watermark"; + +/// Restrict number of cleanup operations. +/// +/// On the first run we are starting at session 0 going up all the way to the current session - +/// this should not be done at once, but rather in smaller batches so nodes won't get stalled by +/// this. +/// +/// 300 is with session duration of 1 hour and 30 parachains around <3_000_000 key purges in the worst +/// case. Which is already quite a lot, at the same time we have around 21_000 sessions on +/// Kusama. This means at 300 purged sessions per session, cleaning everything up will take +/// around 3 days. Depending on how severe disk usage becomes, we might want to bump the batch +/// size, at the cost of risking issues at session boundaries (performance). +#[cfg(test)] +const MAX_CLEAN_BATCH_SIZE: u32 = 10; +#[cfg(not(test))] +const MAX_CLEAN_BATCH_SIZE: u32 = 300; + +pub struct DbBackend { + inner: Arc, + config: ColumnConfiguration, + metrics: Metrics, +} + +impl DbBackend { + pub fn new(db: Arc, config: ColumnConfiguration, metrics: Metrics) -> Self { + Self { inner: db, config, metrics } + } + + /// Cleanup old votes. + /// + /// Should be called whenever a new earliest session gets written. + fn add_vote_cleanup_tx( + &mut self, + tx: &mut DBTransaction, + earliest_session: SessionIndex, + ) -> FatalResult<()> { + // Cleanup old votes in db: + let watermark = load_cleaned_votes_watermark(&*self.inner, &self.config)?.unwrap_or(0); + let clean_until = if earliest_session.saturating_sub(watermark) > MAX_CLEAN_BATCH_SIZE { + watermark + MAX_CLEAN_BATCH_SIZE + } else { + earliest_session + }; + gum::trace!( + target: LOG_TARGET, + ?watermark, + ?clean_until, + ?earliest_session, + ?MAX_CLEAN_BATCH_SIZE, + "WriteEarliestSession" + ); + + for index in watermark..clean_until { + gum::trace!( + target: LOG_TARGET, + ?index, + encoded = ?candidate_votes_session_prefix(index), + "Cleaning votes for session index" + ); + tx.delete_prefix(self.config.col_dispute_data, &candidate_votes_session_prefix(index)); + } + // New watermark: + tx.put_vec(self.config.col_dispute_data, CLEANED_VOTES_WATERMARK_KEY, clean_until.encode()); + Ok(()) + } +} + +impl Backend for DbBackend { + /// Load the earliest session, if any. + fn load_earliest_session(&self) -> FatalResult> { + load_earliest_session(&*self.inner, &self.config) + } + + /// Load the recent disputes, if any. + fn load_recent_disputes(&self) -> FatalResult> { + load_recent_disputes(&*self.inner, &self.config) + } + + /// Load the candidate votes for the specific session-candidate pair, if any. + fn load_candidate_votes( + &self, + session: SessionIndex, + candidate_hash: &CandidateHash, + ) -> FatalResult> { + load_candidate_votes(&*self.inner, &self.config, session, candidate_hash) + } + + /// Atomically writes the list of operations, with later operations taking precedence over + /// prior. + /// + /// This also takes care of purging old votes (of obsolete sessions). + fn write(&mut self, ops: I) -> FatalResult<()> + where + I: IntoIterator, + { + let mut tx = DBTransaction::new(); + // Make sure the whole process is timed, including the actual transaction flush: + let mut cleanup_timer = None; + for op in ops { + match op { + BackendWriteOp::WriteEarliestSession(session) => { + cleanup_timer = match cleanup_timer.take() { + None => Some(self.metrics.time_vote_cleanup()), + Some(t) => Some(t), + }; + self.add_vote_cleanup_tx(&mut tx, session)?; + + // Actually write the earliest session. + tx.put_vec( + self.config.col_dispute_data, + EARLIEST_SESSION_KEY, + session.encode(), + ); + }, + BackendWriteOp::WriteRecentDisputes(recent_disputes) => { + tx.put_vec( + self.config.col_dispute_data, + RECENT_DISPUTES_KEY, + recent_disputes.encode(), + ); + }, + BackendWriteOp::WriteCandidateVotes(session, candidate_hash, votes) => { + gum::trace!(target: LOG_TARGET, ?session, "Writing candidate votes"); + tx.put_vec( + self.config.col_dispute_data, + &candidate_votes_key(session, &candidate_hash), + votes.encode(), + ); + }, + BackendWriteOp::DeleteCandidateVotes(session, candidate_hash) => { + tx.delete( + self.config.col_dispute_data, + &candidate_votes_key(session, &candidate_hash), + ); + }, + } + } + + self.inner.write(tx).map_err(FatalError::DbWriteFailed) + } +} + +fn candidate_votes_key(session: SessionIndex, candidate_hash: &CandidateHash) -> [u8; 15 + 4 + 32] { + let mut buf = [0u8; 15 + 4 + 32]; + buf[..15].copy_from_slice(CANDIDATE_VOTES_SUBKEY); + + // big-endian encoding is used to ensure lexicographic ordering. + buf[15..][..4].copy_from_slice(&session.to_be_bytes()); + candidate_hash.using_encoded(|s| buf[(15 + 4)..].copy_from_slice(s)); + + buf +} + +fn candidate_votes_session_prefix(session: SessionIndex) -> [u8; 15 + 4] { + let mut buf = [0u8; 15 + 4]; + buf[..15].copy_from_slice(CANDIDATE_VOTES_SUBKEY); + + // big-endian encoding is used to ensure lexicographic ordering. + buf[15..][..4].copy_from_slice(&session.to_be_bytes()); + buf +} + +/// Column configuration information for the DB. +#[derive(Debug, Clone)] +pub struct ColumnConfiguration { + /// The column in the key-value DB where data is stored. + pub col_dispute_data: u32, + /// The column in the key-value DB where session data is stored. + pub col_session_data: u32, +} + +/// Tracked votes on candidates, for the purposes of dispute resolution. +#[derive(Debug, Clone, Encode, Decode)] +pub struct CandidateVotes { + /// The receipt of the candidate itself. + pub candidate_receipt: CandidateReceipt, + /// Votes of validity, sorted by validator index. + pub valid: Vec<(ValidDisputeStatementKind, ValidatorIndex, ValidatorSignature)>, + /// Votes of invalidity, sorted by validator index. + pub invalid: Vec<(InvalidDisputeStatementKind, ValidatorIndex, ValidatorSignature)>, +} + +impl From for polkadot_node_primitives::CandidateVotes { + fn from(db_votes: CandidateVotes) -> polkadot_node_primitives::CandidateVotes { + polkadot_node_primitives::CandidateVotes { + candidate_receipt: db_votes.candidate_receipt, + valid: db_votes.valid.into_iter().map(|(kind, i, sig)| (i, (kind, sig))).collect(), + invalid: db_votes.invalid.into_iter().map(|(kind, i, sig)| (i, (kind, sig))).collect(), + } + } +} + +impl From for CandidateVotes { + fn from(primitive_votes: polkadot_node_primitives::CandidateVotes) -> CandidateVotes { + CandidateVotes { + candidate_receipt: primitive_votes.candidate_receipt, + valid: primitive_votes + .valid + .into_iter() + .map(|(i, (kind, sig))| (kind, i, sig)) + .collect(), + invalid: primitive_votes.invalid.into_iter().map(|(i, (k, sig))| (k, i, sig)).collect(), + } + } +} + +/// The mapping for recent disputes; any which have not yet been pruned for being ancient. +pub type RecentDisputes = std::collections::BTreeMap<(SessionIndex, CandidateHash), DisputeStatus>; + +/// Errors while accessing things from the DB. +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error(transparent)] + Io(#[from] std::io::Error), + #[error(transparent)] + Codec(#[from] parity_scale_codec::Error), +} + +impl From for crate::error::Error { + fn from(err: Error) -> Self { + match err { + Error::Io(io) => Self::Io(io), + Error::Codec(e) => Self::Codec(e), + } + } +} + +/// Result alias for DB errors. +pub type Result = std::result::Result; + +fn load_decode( + db: &dyn Database, + col_dispute_data: u32, + key: &[u8], +) -> Result> { + match db.get(col_dispute_data, key)? { + None => Ok(None), + Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into), + } +} + +/// Load the candidate votes for the specific session-candidate pair, if any. +pub(crate) fn load_candidate_votes( + db: &dyn Database, + config: &ColumnConfiguration, + session: SessionIndex, + candidate_hash: &CandidateHash, +) -> FatalResult> { + load_decode(db, config.col_dispute_data, &candidate_votes_key(session, candidate_hash)) + .map_err(|e| FatalError::DbReadFailed(e)) +} + +/// Load the earliest session, if any. +pub(crate) fn load_earliest_session( + db: &dyn Database, + config: &ColumnConfiguration, +) -> FatalResult> { + load_decode(db, config.col_dispute_data, EARLIEST_SESSION_KEY) + .map_err(|e| FatalError::DbReadFailed(e)) +} + +/// Load the recent disputes, if any. +pub(crate) fn load_recent_disputes( + db: &dyn Database, + config: &ColumnConfiguration, +) -> FatalResult> { + load_decode(db, config.col_dispute_data, RECENT_DISPUTES_KEY) + .map_err(|e| FatalError::DbReadFailed(e)) +} + +/// Maybe prune data in the DB based on the provided session index. +/// +/// This is intended to be called on every block, and as such will be used to populate the DB on +/// first launch. If the on-disk data does not need to be pruned, only a single storage read +/// will be performed. +/// +/// If one or more ancient sessions are pruned, all metadata on candidates within the ancient +/// session will be deleted. +pub(crate) fn note_earliest_session( + overlay_db: &mut OverlayedBackend<'_, impl Backend>, + new_earliest_session: SessionIndex, +) -> FatalResult<()> { + match overlay_db.load_earliest_session()? { + None => { + // First launch - write new-earliest. + overlay_db.write_earliest_session(new_earliest_session); + }, + Some(prev_earliest) if new_earliest_session > prev_earliest => { + // Prune all data in the outdated sessions. + overlay_db.write_earliest_session(new_earliest_session); + + // Clear recent disputes metadata. + { + let mut recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default(); + + let lower_bound = (new_earliest_session, CandidateHash(Hash::repeat_byte(0x00))); + + let new_recent_disputes = recent_disputes.split_off(&lower_bound); + // Any remanining disputes are considered ancient and must be pruned. + let pruned_disputes = recent_disputes; + + if pruned_disputes.len() != 0 { + overlay_db.write_recent_disputes(new_recent_disputes); + // Note: Deleting old candidate votes is handled in `write` based on the earliest session. + } + } + }, + Some(_) => { + // nothing to do. + }, + } + + Ok(()) +} + +/// Until what session votes have been cleaned up already. +/// +/// That is the db has already been purged of votes for sessions older than the returned +/// `SessionIndex`. +fn load_cleaned_votes_watermark( + db: &dyn Database, + config: &ColumnConfiguration, +) -> FatalResult> { + load_decode(db, config.col_dispute_data, CLEANED_VOTES_WATERMARK_KEY) + .map_err(|e| FatalError::DbReadFailed(e)) +} + +#[cfg(test)] +mod tests { + + use super::*; + use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; + use polkadot_node_primitives::DISPUTE_WINDOW; + use polkadot_primitives::{Hash, Id as ParaId}; + + fn make_db() -> DbBackend { + let db = kvdb_memorydb::create(1); + let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[0]); + let store = Arc::new(db); + let config = ColumnConfiguration { col_dispute_data: 0, col_session_data: 1 }; + DbBackend::new(store, config, Metrics::default()) + } + + #[test] + fn max_clean_batch_size_is_honored() { + let mut backend = make_db(); + + let mut overlay_db = OverlayedBackend::new(&backend); + let current_session = MAX_CLEAN_BATCH_SIZE + DISPUTE_WINDOW.get() + 3; + let earliest_session = current_session - DISPUTE_WINDOW.get(); + + overlay_db.write_earliest_session(0); + let candidate_hash = CandidateHash(Hash::repeat_byte(1)); + + for session in 0..current_session + 1 { + overlay_db.write_candidate_votes( + session, + candidate_hash, + CandidateVotes { + candidate_receipt: dummy_candidate_receipt(dummy_hash()), + valid: Vec::new(), + invalid: Vec::new(), + }, + ); + } + assert!(overlay_db.load_candidate_votes(0, &candidate_hash).unwrap().is_some()); + assert!(overlay_db + .load_candidate_votes(MAX_CLEAN_BATCH_SIZE - 1, &candidate_hash) + .unwrap() + .is_some()); + assert!(overlay_db + .load_candidate_votes(MAX_CLEAN_BATCH_SIZE, &candidate_hash) + .unwrap() + .is_some()); + + // Cleanup only works for votes that have been written already - so write. + let write_ops = overlay_db.into_write_ops(); + backend.write(write_ops).unwrap(); + + let mut overlay_db = OverlayedBackend::new(&backend); + + gum::trace!(target: LOG_TARGET, ?current_session, "Noting current session"); + note_earliest_session(&mut overlay_db, earliest_session).unwrap(); + + let write_ops = overlay_db.into_write_ops(); + backend.write(write_ops).unwrap(); + + let mut overlay_db = OverlayedBackend::new(&backend); + + assert!(overlay_db + .load_candidate_votes(MAX_CLEAN_BATCH_SIZE - 1, &candidate_hash) + .unwrap() + .is_none()); + // After batch size votes should still be there: + assert!(overlay_db + .load_candidate_votes(MAX_CLEAN_BATCH_SIZE, &candidate_hash) + .unwrap() + .is_some()); + + let current_session = current_session + 1; + let earliest_session = earliest_session + 1; + + note_earliest_session(&mut overlay_db, earliest_session).unwrap(); + + let write_ops = overlay_db.into_write_ops(); + backend.write(write_ops).unwrap(); + + let overlay_db = OverlayedBackend::new(&backend); + + // All should be gone now: + assert!(overlay_db + .load_candidate_votes(earliest_session - 1, &candidate_hash) + .unwrap() + .is_none()); + // Earliest session should still be there: + assert!(overlay_db + .load_candidate_votes(earliest_session, &candidate_hash) + .unwrap() + .is_some()); + // Old current session should still be there as well: + assert!(overlay_db + .load_candidate_votes(current_session - 1, &candidate_hash) + .unwrap() + .is_some()); + } + + #[test] + fn overlay_pre_and_post_commit_consistency() { + let mut backend = make_db(); + + let mut overlay_db = OverlayedBackend::new(&backend); + + overlay_db.write_earliest_session(0); + overlay_db.write_earliest_session(1); + + overlay_db.write_recent_disputes( + vec![((0, CandidateHash(Hash::repeat_byte(0))), DisputeStatus::Active)] + .into_iter() + .collect(), + ); + + overlay_db.write_recent_disputes( + vec![((1, CandidateHash(Hash::repeat_byte(1))), DisputeStatus::Active)] + .into_iter() + .collect(), + ); + + overlay_db.write_candidate_votes( + 1, + CandidateHash(Hash::repeat_byte(1)), + CandidateVotes { + candidate_receipt: dummy_candidate_receipt(dummy_hash()), + valid: Vec::new(), + invalid: Vec::new(), + }, + ); + overlay_db.write_candidate_votes( + 1, + CandidateHash(Hash::repeat_byte(1)), + CandidateVotes { + candidate_receipt: { + let mut receipt = dummy_candidate_receipt(dummy_hash()); + receipt.descriptor.para_id = ParaId::from(5_u32); + + receipt + }, + valid: Vec::new(), + invalid: Vec::new(), + }, + ); + + // Test that overlay returns the correct values before committing. + assert_eq!(overlay_db.load_earliest_session().unwrap().unwrap(), 1); + + assert_eq!( + overlay_db.load_recent_disputes().unwrap().unwrap(), + vec![((1, CandidateHash(Hash::repeat_byte(1))), DisputeStatus::Active),] + .into_iter() + .collect() + ); + + assert_eq!( + overlay_db + .load_candidate_votes(1, &CandidateHash(Hash::repeat_byte(1))) + .unwrap() + .unwrap() + .candidate_receipt + .descriptor + .para_id, + ParaId::from(5), + ); + + let write_ops = overlay_db.into_write_ops(); + backend.write(write_ops).unwrap(); + + // Test that subsequent writes were written. + assert_eq!(backend.load_earliest_session().unwrap().unwrap(), 1); + + assert_eq!( + backend.load_recent_disputes().unwrap().unwrap(), + vec![((1, CandidateHash(Hash::repeat_byte(1))), DisputeStatus::Active),] + .into_iter() + .collect() + ); + + assert_eq!( + backend + .load_candidate_votes(1, &CandidateHash(Hash::repeat_byte(1))) + .unwrap() + .unwrap() + .candidate_receipt + .descriptor + .para_id, + ParaId::from(5), + ); + } + + #[test] + fn overlay_preserves_candidate_votes_operation_order() { + let mut backend = make_db(); + + let mut overlay_db = OverlayedBackend::new(&backend); + + overlay_db.write_candidate_votes( + 1, + CandidateHash(Hash::repeat_byte(1)), + CandidateVotes { + candidate_receipt: dummy_candidate_receipt(Hash::random()), + valid: Vec::new(), + invalid: Vec::new(), + }, + ); + + let receipt = dummy_candidate_receipt(dummy_hash()); + + overlay_db.write_candidate_votes( + 1, + CandidateHash(Hash::repeat_byte(1)), + CandidateVotes { + candidate_receipt: receipt.clone(), + valid: Vec::new(), + invalid: Vec::new(), + }, + ); + + let write_ops = overlay_db.into_write_ops(); + backend.write(write_ops).unwrap(); + + assert_eq!( + backend + .load_candidate_votes(1, &CandidateHash(Hash::repeat_byte(1))) + .unwrap() + .unwrap() + .candidate_receipt, + receipt, + ); + } + + #[test] + fn note_earliest_session_prunes_old() { + let mut backend = make_db(); + + let hash_a = CandidateHash(Hash::repeat_byte(0x0a)); + let hash_b = CandidateHash(Hash::repeat_byte(0x0b)); + let hash_c = CandidateHash(Hash::repeat_byte(0x0c)); + let hash_d = CandidateHash(Hash::repeat_byte(0x0d)); + + let prev_earliest_session = 0; + let new_earliest_session = 5; + let current_session = 5 + DISPUTE_WINDOW.get(); + + let super_old_no_dispute = 1; + let very_old = 3; + let slightly_old = 4; + let very_recent = current_session - 1; + + let blank_candidate_votes = || CandidateVotes { + candidate_receipt: dummy_candidate_receipt(dummy_hash()), + valid: Vec::new(), + invalid: Vec::new(), + }; + + let mut overlay_db = OverlayedBackend::new(&backend); + overlay_db.write_earliest_session(prev_earliest_session); + overlay_db.write_recent_disputes( + vec![ + ((very_old, hash_a), DisputeStatus::Active), + ((slightly_old, hash_b), DisputeStatus::Active), + ((new_earliest_session, hash_c), DisputeStatus::Active), + ((very_recent, hash_d), DisputeStatus::Active), + ] + .into_iter() + .collect(), + ); + + overlay_db.write_candidate_votes(super_old_no_dispute, hash_a, blank_candidate_votes()); + overlay_db.write_candidate_votes(very_old, hash_a, blank_candidate_votes()); + + overlay_db.write_candidate_votes(slightly_old, hash_b, blank_candidate_votes()); + + overlay_db.write_candidate_votes(new_earliest_session, hash_c, blank_candidate_votes()); + + overlay_db.write_candidate_votes(very_recent, hash_d, blank_candidate_votes()); + + let write_ops = overlay_db.into_write_ops(); + backend.write(write_ops).unwrap(); + + let mut overlay_db = OverlayedBackend::new(&backend); + note_earliest_session(&mut overlay_db, new_earliest_session).unwrap(); + + assert_eq!(overlay_db.load_earliest_session().unwrap(), Some(new_earliest_session)); + + assert_eq!( + overlay_db.load_recent_disputes().unwrap().unwrap(), + vec![ + ((new_earliest_session, hash_c), DisputeStatus::Active), + ((very_recent, hash_d), DisputeStatus::Active), + ] + .into_iter() + .collect(), + ); + + // Votes are only cleaned up after actual write: + let write_ops = overlay_db.into_write_ops(); + backend.write(write_ops).unwrap(); + + let overlay_db = OverlayedBackend::new(&backend); + + assert!(overlay_db + .load_candidate_votes(super_old_no_dispute, &hash_a) + .unwrap() + .is_none()); + assert!(overlay_db.load_candidate_votes(very_old, &hash_a).unwrap().is_none()); + assert!(overlay_db.load_candidate_votes(slightly_old, &hash_b).unwrap().is_none()); + assert!(overlay_db + .load_candidate_votes(new_earliest_session, &hash_c) + .unwrap() + .is_some()); + assert!(overlay_db.load_candidate_votes(very_recent, &hash_d).unwrap().is_some()); + } +} diff --git a/polkadot/node/core/dispute-coordinator/src/error.rs b/polkadot/node/core/dispute-coordinator/src/error.rs new file mode 100644 index 00000000..7a059b88 --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/error.rs @@ -0,0 +1,132 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use fatality::Nested; +use futures::channel::oneshot; + +use polkadot_node_subsystem::{errors::ChainApiError, SubsystemError}; +use polkadot_node_subsystem_util::{rolling_session_window::SessionsUnavailable, runtime}; + +use crate::{db, participation, LOG_TARGET}; +use parity_scale_codec::Error as CodecError; + +pub type Result = std::result::Result; +pub type FatalResult = std::result::Result; +pub type JfyiResult = std::result::Result; + +#[allow(missing_docs)] +#[fatality::fatality(splitable)] +pub enum Error { + /// We received a legacy `SubystemError::Context` error which is considered fatal. + #[fatal] + #[error("SubsystemError::Context error: {0}")] + SubsystemContext(String), + + /// `ctx.spawn` failed with an error. + #[fatal] + #[error("Spawning a task failed: {0}")] + SpawnFailed(#[source] SubsystemError), + + #[fatal] + #[error("Participation worker receiver exhausted.")] + ParticipationWorkerReceiverExhausted, + + /// Receiving subsystem message from overseer failed. + #[fatal] + #[error("Receiving message from overseer failed: {0}")] + SubsystemReceive(#[source] SubsystemError), + + #[fatal] + #[error("Writing to database failed: {0}")] + DbWriteFailed(std::io::Error), + + #[fatal] + #[error("Reading from database failed: {0}")] + DbReadFailed(db::v1::Error), + + #[fatal] + #[error("Oneshot for receiving block number from chain API got cancelled")] + CanceledBlockNumber, + + #[fatal] + #[error("Retrieving block number from chain API failed with error: {0}")] + ChainApiBlockNumber(ChainApiError), + + #[fatal] + #[error(transparent)] + ChainApiAncestors(ChainApiError), + + #[fatal] + #[error("Chain API dropped response channel sender")] + ChainApiSenderDropped, + + #[fatal(forward)] + #[error("Error while accessing runtime information {0}")] + Runtime(#[from] runtime::Error), + + #[error(transparent)] + ChainApi(#[from] ChainApiError), + + #[error(transparent)] + Io(#[from] std::io::Error), + + #[error(transparent)] + Oneshot(#[from] oneshot::Canceled), + + #[error("Could not send import confirmation (receiver canceled)")] + DisputeImportOneshotSend, + + #[error(transparent)] + Subsystem(#[from] SubsystemError), + + #[error(transparent)] + Codec(#[from] CodecError), + + /// `RollingSessionWindow` was not able to retrieve `SessionInfo`s. + #[error("Sessions unavailable in `RollingSessionWindow`: {0}")] + RollingSessionWindow(#[from] SessionsUnavailable), + + #[error(transparent)] + QueueError(#[from] participation::QueueError), +} + +/// Utility for eating top level errors and log them. +/// +/// We basically always want to try and continue on error. This utility function is meant to +/// consume top-level errors by simply logging them +pub fn log_error(result: Result<()>) -> std::result::Result<(), FatalError> { + match result.into_nested()? { + Ok(()) => Ok(()), + Err(jfyi) => { + jfyi.log(); + Ok(()) + }, + } +} + +impl JfyiError { + /// Log a `JfyiError`. + pub fn log(self) { + match self { + // don't spam the log with spurious errors + Self::Runtime(runtime::Error::RuntimeRequestCanceled(_)) | Self::Oneshot(_) => { + gum::debug!(target: LOG_TARGET, error = ?self) + }, + // it's worth reporting otherwise + _ => gum::warn!(target: LOG_TARGET, error = ?self), + } + } +} diff --git a/polkadot/node/core/dispute-coordinator/src/import.rs b/polkadot/node/core/dispute-coordinator/src/import.rs new file mode 100644 index 00000000..4f6edc5f --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/import.rs @@ -0,0 +1,549 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Vote import logic. +//! +//! This module encapsulates the actual logic for importing new votes and provides easy access of +//! the current state for votes for a particular candidate. +//! +//! In particular there is `CandidateVoteState` which tells what can be concluded for a particular set of +//! votes. E.g. whether a dispute is ongoing, whether it is confirmed, concluded, .. +//! +//! Then there is `ImportResult` which reveals information about what changed once additional votes +//! got imported on top of an existing `CandidateVoteState` and reveals "dynamic" information, like whether +//! due to the import a dispute was raised/got confirmed, ... + +use std::collections::{BTreeMap, HashMap, HashSet}; + +use polkadot_node_primitives::{ + disputes::ValidCandidateVotes, CandidateVotes, DisputeStatus, SignedDisputeStatement, Timestamp, +}; +use polkadot_node_subsystem_util::rolling_session_window::RollingSessionWindow; +use polkadot_primitives::{ + CandidateReceipt, DisputeStatement, IndexedVec, SessionIndex, SessionInfo, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, +}; +use sc_keystore::LocalKeystore; + +use crate::LOG_TARGET; + +/// (Session) environment of a candidate. +pub struct CandidateEnvironment<'a> { + /// The session the candidate appeared in. + session_index: SessionIndex, + /// Session for above index. + session: &'a SessionInfo, + /// Validator indices controlled by this node. + controlled_indices: HashSet, +} + +impl<'a> CandidateEnvironment<'a> { + /// Create `CandidateEnvironment`. + /// + /// Return: `None` in case session is outside of session window. + pub fn new( + keystore: &LocalKeystore, + session_window: &'a RollingSessionWindow, + session_index: SessionIndex, + ) -> Option { + let session = session_window.session_info(session_index)?; + let controlled_indices = find_controlled_validator_indices(keystore, &session.validators); + Some(Self { session_index, session, controlled_indices }) + } + + /// Validators in the candidate's session. + pub fn validators(&self) -> &IndexedVec { + &self.session.validators + } + + /// `SessionInfo` for the candidate's session. + pub fn session_info(&self) -> &SessionInfo { + &self.session + } + + /// Retrieve `SessionIndex` for this environment. + pub fn session_index(&self) -> SessionIndex { + self.session_index + } + + /// Indices controlled by this node. + pub fn controlled_indices(&'a self) -> &'a HashSet { + &self.controlled_indices + } +} + +/// Whether or not we already issued some statement about a candidate. +pub enum OwnVoteState { + /// Our votes, if any. + Voted(Vec<(ValidatorIndex, (DisputeStatement, ValidatorSignature))>), + + /// We are not a parachain validator in the session. + /// + /// Hence we cannot vote. + CannotVote, +} + +impl OwnVoteState { + fn new<'a>(votes: &CandidateVotes, env: &CandidateEnvironment<'a>) -> Self { + let controlled_indices = env.controlled_indices(); + if controlled_indices.is_empty() { + return Self::CannotVote + } + + let our_valid_votes = controlled_indices + .iter() + .filter_map(|i| votes.valid.raw().get_key_value(i)) + .map(|(index, (kind, sig))| (*index, (DisputeStatement::Valid(*kind), sig.clone()))); + let our_invalid_votes = controlled_indices + .iter() + .filter_map(|i| votes.invalid.get_key_value(i)) + .map(|(index, (kind, sig))| (*index, (DisputeStatement::Invalid(*kind), sig.clone()))); + + Self::Voted(our_valid_votes.chain(our_invalid_votes).collect()) + } + + /// Is a vote from us missing but we are a validator able to vote? + fn vote_missing(&self) -> bool { + match self { + Self::Voted(votes) if votes.is_empty() => true, + Self::Voted(_) | Self::CannotVote => false, + } + } + + /// Get own approval votes, if any. + /// + /// Empty iterator means, no approval votes. `None` means, there will never be any (we cannot + /// vote). + fn approval_votes( + &self, + ) -> Option> { + match self { + Self::Voted(votes) => Some(votes.iter().filter_map(|(index, (kind, sig))| { + if let DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) = kind { + Some((*index, sig)) + } else { + None + } + })), + Self::CannotVote => None, + } + } + + /// Get our votes if there are any. + /// + /// Empty iterator means, no votes. `None` means, there will never be any (we cannot + /// vote). + fn votes(&self) -> Option<&Vec<(ValidatorIndex, (DisputeStatement, ValidatorSignature))>> { + match self { + Self::Voted(votes) => Some(&votes), + Self::CannotVote => None, + } + } +} + +/// Complete state of votes for a candidate. +/// +/// All votes + information whether a dispute is ongoing, confirmed, concluded, whether we already +/// voted, ... +pub struct CandidateVoteState { + /// Votes already existing for the candidate + receipt. + votes: Votes, + + /// Information about own votes: + own_vote: OwnVoteState, + + /// Current dispute status, if there is any. + dispute_status: Option, +} + +impl CandidateVoteState { + /// Create an empty `CandidateVoteState` + /// + /// in case there have not been any previous votes. + pub fn new_from_receipt(candidate_receipt: CandidateReceipt) -> Self { + let votes = CandidateVotes { + candidate_receipt, + valid: ValidCandidateVotes::new(), + invalid: BTreeMap::new(), + }; + Self { votes, own_vote: OwnVoteState::CannotVote, dispute_status: None } + } + + /// Create a new `CandidateVoteState` from already existing votes. + pub fn new(votes: CandidateVotes, env: &CandidateEnvironment, now: Timestamp) -> Self { + let own_vote = OwnVoteState::new(&votes, env); + + let n_validators = env.validators().len(); + + let supermajority_threshold = polkadot_primitives::supermajority_threshold(n_validators); + + // We have a dispute, if we have votes on both sides: + let is_disputed = !votes.invalid.is_empty() && !votes.valid.raw().is_empty(); + + let dispute_status = if is_disputed { + let mut status = DisputeStatus::active(); + let byzantine_threshold = polkadot_primitives::byzantine_threshold(n_validators); + let is_confirmed = votes.voted_indices().len() > byzantine_threshold; + if is_confirmed { + status = status.confirm(); + }; + let concluded_for = votes.valid.raw().len() >= supermajority_threshold; + if concluded_for { + status = status.conclude_for(now); + }; + + let concluded_against = votes.invalid.len() >= supermajority_threshold; + if concluded_against { + status = status.conclude_against(now); + }; + Some(status) + } else { + None + }; + + Self { votes, own_vote, dispute_status } + } + + /// Import fresh statements. + /// + /// Result will be a new state plus information about things that changed due to the import. + pub fn import_statements( + self, + env: &CandidateEnvironment, + statements: Vec<(SignedDisputeStatement, ValidatorIndex)>, + now: Timestamp, + ) -> ImportResult { + let (mut votes, old_state) = self.into_old_state(); + + let mut new_invalid_voters = Vec::new(); + let mut imported_invalid_votes = 0; + let mut imported_valid_votes = 0; + + let expected_candidate_hash = votes.candidate_receipt.hash(); + + for (statement, val_index) in statements { + if env + .validators() + .get(val_index) + .map_or(true, |v| v != statement.validator_public()) + { + gum::error!( + target: LOG_TARGET, + ?val_index, + session= ?env.session_index, + claimed_key = ?statement.validator_public(), + "Validator index doesn't match claimed key", + ); + + continue + } + if statement.candidate_hash() != &expected_candidate_hash { + gum::error!( + target: LOG_TARGET, + ?val_index, + session= ?env.session_index, + given_candidate_hash = ?statement.candidate_hash(), + ?expected_candidate_hash, + "Vote is for unexpected candidate!", + ); + continue + } + if statement.session_index() != env.session_index() { + gum::error!( + target: LOG_TARGET, + ?val_index, + session= ?env.session_index, + given_candidate_hash = ?statement.candidate_hash(), + ?expected_candidate_hash, + "Vote is for unexpected session!", + ); + continue + } + + match statement.statement() { + DisputeStatement::Valid(valid_kind) => { + let fresh = votes.valid.insert_vote( + val_index, + *valid_kind, + statement.into_validator_signature(), + ); + if fresh { + imported_valid_votes += 1; + } + }, + DisputeStatement::Invalid(invalid_kind) => { + let fresh = votes + .invalid + .insert(val_index, (*invalid_kind, statement.into_validator_signature())) + .is_none(); + if fresh { + new_invalid_voters.push(val_index); + imported_invalid_votes += 1; + } + }, + } + } + + let new_state = Self::new(votes, env, now); + + ImportResult { + old_state, + new_state, + imported_invalid_votes, + imported_valid_votes, + imported_approval_votes: 0, + new_invalid_voters, + } + } + + /// Retrieve `CandidateReceipt` in `CandidateVotes`. + pub fn candidate_receipt(&self) -> &CandidateReceipt { + &self.votes.candidate_receipt + } + + /// Extract `CandidateVotes` for handling import of new statements. + fn into_old_state(self) -> (CandidateVotes, CandidateVoteState<()>) { + let CandidateVoteState { votes, own_vote, dispute_status } = self; + (votes, CandidateVoteState { votes: (), own_vote, dispute_status }) + } +} + +impl CandidateVoteState { + /// Whether or not we have an ongoing dispute. + pub fn is_disputed(&self) -> bool { + self.dispute_status.is_some() + } + + /// Whether there is an ongoing confirmed dispute. + /// + /// This checks whether there is a dispute ongoing and we have more than byzantine threshold + /// votes. + pub fn is_confirmed(&self) -> bool { + self.dispute_status.map_or(false, |s| s.is_confirmed_concluded()) + } + + /// Are we a validator in the session, but have not yet voted? + pub fn own_vote_missing(&self) -> bool { + self.own_vote.vote_missing() + } + + /// Own approval votes if any: + pub fn own_approval_votes( + &self, + ) -> Option> { + self.own_vote.approval_votes() + } + + /// Get own votes if there are any. + pub fn own_votes( + &self, + ) -> Option<&Vec<(ValidatorIndex, (DisputeStatement, ValidatorSignature))>> { + self.own_vote.votes() + } + + /// Whether or not there is a dispute and it has already enough valid votes to conclude. + pub fn has_concluded_for(&self) -> bool { + self.dispute_status.map_or(false, |s| s.has_concluded_for()) + } + + /// Whether or not there is a dispute and it has already enough invalid votes to conclude. + pub fn has_concluded_against(&self) -> bool { + self.dispute_status.map_or(false, |s| s.has_concluded_against()) + } + + /// Get access to the dispute status, in case there is one. + pub fn dispute_status(&self) -> &Option { + &self.dispute_status + } + + /// Access to underlying votes. + pub fn votes(&self) -> &V { + &self.votes + } +} + +/// An ongoing statement/vote import. +pub struct ImportResult { + /// The state we had before importing new statements. + old_state: CandidateVoteState<()>, + /// The new state after importing the new statements. + new_state: CandidateVoteState, + /// New invalid voters as of this import. + new_invalid_voters: Vec, + /// Number of successfully imported valid votes. + imported_invalid_votes: u32, + /// Number of successfully imported invalid votes. + imported_valid_votes: u32, + /// Number of approval votes imported via `import_approval_votes()`. + /// + /// And only those: If normal import included approval votes, those are not counted here. + /// + /// In other words, without a call `import_approval_votes()` this will always be 0. + imported_approval_votes: u32, +} + +impl ImportResult { + /// Whether or not anything has changed due to the import. + pub fn votes_changed(&self) -> bool { + self.imported_valid_votes != 0 || self.imported_invalid_votes != 0 + } + + /// The dispute state has changed in some way. + /// + /// - freshly disputed + /// - freshly confirmed + /// - freshly concluded (valid or invalid) + pub fn dispute_state_changed(&self) -> bool { + self.is_freshly_disputed() || self.is_freshly_confirmed() || self.is_freshly_concluded() + } + + /// State as it was before import. + pub fn old_state(&self) -> &CandidateVoteState<()> { + &self.old_state + } + + /// State after import + pub fn new_state(&self) -> &CandidateVoteState { + &self.new_state + } + + /// New "invalid" voters encountered during import. + pub fn new_invalid_voters(&self) -> &Vec { + &self.new_invalid_voters + } + + /// Number of imported valid votes. + pub fn imported_valid_votes(&self) -> u32 { + self.imported_valid_votes + } + + /// Number of imported invalid votes. + pub fn imported_invalid_votes(&self) -> u32 { + self.imported_invalid_votes + } + + /// Number of imported approval votes. + pub fn imported_approval_votes(&self) -> u32 { + self.imported_approval_votes + } + + /// Whether we now have a dispute and did not prior to the import. + pub fn is_freshly_disputed(&self) -> bool { + !self.old_state().is_disputed() && self.new_state().is_disputed() + } + + /// Whether we just surpassed the byzantine threshold. + pub fn is_freshly_confirmed(&self) -> bool { + !self.old_state().is_confirmed() && self.new_state().is_confirmed() + } + + /// Whether or not any dispute just concluded valid due to the import. + pub fn is_freshly_concluded_for(&self) -> bool { + !self.old_state().has_concluded_for() && self.new_state().has_concluded_for() + } + + /// Whether or not any dispute just concluded invalid due to the import. + pub fn is_freshly_concluded_against(&self) -> bool { + !self.old_state().has_concluded_against() && self.new_state().has_concluded_against() + } + + /// Whether or not any dispute just concluded either invalid or valid due to the import. + pub fn is_freshly_concluded(&self) -> bool { + self.is_freshly_concluded_against() || self.is_freshly_concluded_for() + } + + /// Modify this `ImportResult`s, by importing additional approval votes. + /// + /// Both results and `new_state` will be changed as if those approval votes had been in the + /// original import. + pub fn import_approval_votes( + self, + env: &CandidateEnvironment, + approval_votes: HashMap, + now: Timestamp, + ) -> Self { + let Self { + old_state, + new_state, + new_invalid_voters, + mut imported_valid_votes, + imported_invalid_votes, + mut imported_approval_votes, + } = self; + + let (mut votes, _) = new_state.into_old_state(); + + for (index, sig) in approval_votes.into_iter() { + debug_assert!( + { + let pub_key = &env.session_info().validators.get(index).expect("indices are validated by approval-voting subsystem; qed"); + let candidate_hash = votes.candidate_receipt.hash(); + let session_index = env.session_index(); + DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) + .check_signature(pub_key, candidate_hash, session_index, &sig) + .is_ok() + }, + "Signature check for imported approval votes failed! This is a serious bug. Session: {:?}, candidate hash: {:?}, validator index: {:?}", env.session_index(), votes.candidate_receipt.hash(), index + ); + if votes.valid.insert_vote(index, ValidDisputeStatementKind::ApprovalChecking, sig) { + imported_valid_votes += 1; + imported_approval_votes += 1; + } + } + + let new_state = CandidateVoteState::new(votes, env, now); + + Self { + old_state, + new_state, + new_invalid_voters, + imported_valid_votes, + imported_invalid_votes, + imported_approval_votes, + } + } + + /// All done, give me those votes. + /// + /// Returns: `None` in case nothing has changed (import was redundant). + pub fn into_updated_votes(self) -> Option { + if self.votes_changed() { + let CandidateVoteState { votes, .. } = self.new_state; + Some(votes) + } else { + None + } + } +} + +/// Find indices controlled by this validator. +/// +/// That is all `ValidatorIndex`es we have private keys for. Usually this will only be one. +fn find_controlled_validator_indices( + keystore: &LocalKeystore, + validators: &IndexedVec, +) -> HashSet { + let mut controlled = HashSet::new(); + for (index, validator) in validators.iter().enumerate() { + if keystore.key_pair::(validator).ok().flatten().is_none() { + continue + } + + controlled.insert(ValidatorIndex(index as _)); + } + + controlled +} diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs new file mode 100644 index 00000000..620c58fb --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -0,0 +1,1311 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Dispute coordinator subsystem in initialized state (after first active leaf is received). + +use std::{collections::BTreeMap, sync::Arc}; + +use futures::{ + channel::{mpsc, oneshot}, + FutureExt, StreamExt, +}; + +use sc_keystore::LocalKeystore; + +use polkadot_node_primitives::{ + disputes::ValidCandidateVotes, CandidateVotes, DisputeStatus, SignedDisputeStatement, Timestamp, +}; +use polkadot_node_subsystem::{ + messages::{ + ApprovalVotingMessage, BlockDescription, ChainSelectionMessage, DisputeCoordinatorMessage, + DisputeDistributionMessage, ImportStatementsResult, + }, + overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, +}; +use polkadot_node_subsystem_util::rolling_session_window::{ + RollingSessionWindow, SessionWindowUpdate, SessionsUnavailable, +}; +use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, + DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, +}; + +use crate::{ + error::{log_error, Error, FatalError, FatalResult, JfyiError, JfyiResult, Result}, + import::{CandidateEnvironment, CandidateVoteState}, + is_potential_spam, + metrics::Metrics, + status::{get_active_with_status, Clock}, + DisputeCoordinatorSubsystem, LOG_TARGET, +}; + +use super::{ + backend::Backend, + db, make_dispute_message, + participation::{ + self, Participation, ParticipationPriority, ParticipationRequest, ParticipationStatement, + WorkerMessageReceiver, + }, + scraping::ChainScraper, + spam_slots::SpamSlots, + OverlayedBackend, +}; + +/// After the first active leaves update we transition to `Initialized` state. +/// +/// Before the first active leaves update we can't really do much. We cannot check incoming +/// statements for validity, we cannot query orderings, we have no valid `RollingSessionWindow`, +/// ... +pub struct Initialized { + keystore: Arc, + rolling_session_window: RollingSessionWindow, + highest_session: SessionIndex, + spam_slots: SpamSlots, + participation: Participation, + scraper: ChainScraper, + participation_receiver: WorkerMessageReceiver, + metrics: Metrics, + // This tracks only rolling session window failures. + // It can be a `Vec` if the need to track more arises. + error: Option, +} + +#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] +impl Initialized { + /// Make initialized subsystem, ready to `run`. + pub fn new( + subsystem: DisputeCoordinatorSubsystem, + rolling_session_window: RollingSessionWindow, + spam_slots: SpamSlots, + scraper: ChainScraper, + ) -> Self { + let DisputeCoordinatorSubsystem { config: _, store: _, keystore, metrics } = subsystem; + + let (participation_sender, participation_receiver) = mpsc::channel(1); + let participation = Participation::new(participation_sender, metrics.clone()); + let highest_session = rolling_session_window.latest_session(); + + Self { + keystore, + rolling_session_window, + highest_session, + spam_slots, + scraper, + participation, + participation_receiver, + metrics, + error: None, + } + } + + /// Run the initialized subsystem. + /// + /// Optionally supply initial participations and a first leaf to process. + pub async fn run( + mut self, + mut ctx: Context, + mut backend: B, + mut participations: Vec<(ParticipationPriority, ParticipationRequest)>, + mut votes: Vec, + mut first_leaf: Option, + clock: Box, + ) -> FatalResult<()> + where + B: Backend, + { + loop { + let res = self + .run_until_error( + &mut ctx, + &mut backend, + &mut participations, + &mut votes, + &mut first_leaf, + &*clock, + ) + .await; + if let Ok(()) = res { + gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting"); + return Ok(()) + } + log_error(res)?; + } + } + + // Run the subsystem until an error is encountered or a `conclude` signal is received. + // Most errors are non-fatal and should lead to another call to this function. + // + // A return value of `Ok` indicates that an exit should be made, while non-fatal errors + // lead to another call to this function. + async fn run_until_error( + &mut self, + ctx: &mut Context, + backend: &mut B, + participations: &mut Vec<(ParticipationPriority, ParticipationRequest)>, + on_chain_votes: &mut Vec, + first_leaf: &mut Option, + clock: &dyn Clock, + ) -> Result<()> + where + B: Backend, + { + for (priority, request) in participations.drain(..) { + self.participation.queue_participation(ctx, priority, request).await?; + } + + { + let mut overlay_db = OverlayedBackend::new(backend); + for votes in on_chain_votes.drain(..) { + let _ = self + .process_on_chain_votes(ctx, &mut overlay_db, votes, clock.now()) + .await + .map_err(|error| { + gum::warn!( + target: LOG_TARGET, + ?error, + "Skipping scraping block due to error", + ); + }); + } + if !overlay_db.is_empty() { + let ops = overlay_db.into_write_ops(); + backend.write(ops)?; + } + } + + if let Some(first_leaf) = first_leaf.take() { + // Also provide first leaf to participation for good measure. + self.participation + .process_active_leaves_update(ctx, &ActiveLeavesUpdate::start_work(first_leaf)) + .await?; + } + + loop { + gum::trace!(target: LOG_TARGET, "Waiting for message"); + let mut overlay_db = OverlayedBackend::new(backend); + let default_confirm = Box::new(|| Ok(())); + let confirm_write = match MuxedMessage::receive(ctx, &mut self.participation_receiver) + .await? + { + MuxedMessage::Participation(msg) => { + gum::trace!(target: LOG_TARGET, "MuxedMessage::Participation"); + let ParticipationStatement { + session, + candidate_hash, + candidate_receipt, + outcome, + } = self.participation.get_participation_result(ctx, msg).await?; + if let Some(valid) = outcome.validity() { + gum::trace!( + target: LOG_TARGET, + ?session, + ?candidate_hash, + ?valid, + "Issuing local statement based on participation outcome." + ); + self.issue_local_statement( + ctx, + &mut overlay_db, + candidate_hash, + candidate_receipt, + session, + valid, + clock.now(), + ) + .await?; + } else { + gum::warn!(target: LOG_TARGET, ?outcome, "Dispute participation failed"); + } + default_confirm + }, + MuxedMessage::Subsystem(msg) => match msg { + FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), + FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { + gum::trace!(target: LOG_TARGET, "OverseerSignal::ActiveLeaves"); + self.process_active_leaves_update( + ctx, + &mut overlay_db, + update, + clock.now(), + ) + .await?; + default_confirm + }, + FromOrchestra::Signal(OverseerSignal::BlockFinalized(_, n)) => { + gum::trace!(target: LOG_TARGET, "OverseerSignal::BlockFinalized"); + self.scraper.process_finalized_block(&n); + default_confirm + }, + FromOrchestra::Communication { msg } => + self.handle_incoming(ctx, &mut overlay_db, msg, clock.now()).await?, + }, + }; + + if !overlay_db.is_empty() { + let ops = overlay_db.into_write_ops(); + backend.write(ops)?; + } + // even if the changeset was empty, + // otherwise the caller will error. + confirm_write()?; + } + } + + async fn process_active_leaves_update( + &mut self, + ctx: &mut Context, + overlay_db: &mut OverlayedBackend<'_, impl Backend>, + update: ActiveLeavesUpdate, + now: u64, + ) -> Result<()> { + gum::trace!(target: LOG_TARGET, timestamp = now, "Processing ActiveLeavesUpdate"); + let scraped_updates = + self.scraper.process_active_leaves_update(ctx.sender(), &update).await?; + log_error( + self.participation + .bump_to_priority_for_candidates(ctx, &scraped_updates.included_receipts) + .await, + )?; + self.participation.process_active_leaves_update(ctx, &update).await?; + + if let Some(new_leaf) = update.activated { + match self + .rolling_session_window + .cache_session_info_for_head(ctx.sender(), new_leaf.hash) + .await + { + Err(e) => { + gum::warn!( + target: LOG_TARGET, + err = ?e, + "Failed to update session cache for disputes", + ); + self.error = Some(e); + }, + Ok(SessionWindowUpdate::Advanced { + new_window_end: window_end, + new_window_start, + .. + }) => { + self.error = None; + let session = window_end; + if self.highest_session < session { + gum::trace!(target: LOG_TARGET, session, "Observed new session. Pruning"); + + self.highest_session = session; + + db::v1::note_earliest_session(overlay_db, new_window_start)?; + self.spam_slots.prune_old(new_window_start); + } + }, + Ok(SessionWindowUpdate::Unchanged) => {}, + }; + + gum::trace!( + target: LOG_TARGET, + timestamp = now, + "Will process {} onchain votes", + scraped_updates.on_chain_votes.len() + ); + + // The `runtime-api` subsystem has an internal queue which serializes the execution, + // so there is no point in running these in parallel + for votes in scraped_updates.on_chain_votes { + let _ = self.process_on_chain_votes(ctx, overlay_db, votes, now).await.map_err( + |error| { + gum::warn!( + target: LOG_TARGET, + ?error, + "Skipping scraping block due to error", + ); + }, + ); + } + } + + gum::trace!(target: LOG_TARGET, timestamp = now, "Done processing ActiveLeavesUpdate"); + Ok(()) + } + + /// Scrapes on-chain votes (backing votes and concluded disputes) for a active leaf of the + /// relay chain. + async fn process_on_chain_votes( + &mut self, + ctx: &mut Context, + overlay_db: &mut OverlayedBackend<'_, impl Backend>, + votes: ScrapedOnChainVotes, + now: u64, + ) -> Result<()> { + let ScrapedOnChainVotes { session, backing_validators_per_candidate, disputes } = votes; + + if backing_validators_per_candidate.is_empty() && disputes.is_empty() { + return Ok(()) + } + + // Scraped on-chain backing votes for the candidates with + // the new active leaf as if we received them via gossip. + for (candidate_receipt, backers) in backing_validators_per_candidate { + // Obtain the session info, for sake of `ValidatorId`s + // either from the rolling session window. + // Must be called _after_ `fn cache_session_info_for_head` + // which guarantees that the session info is available + // for the current session. + let session_info: &SessionInfo = + if let Some(session_info) = self.rolling_session_window.session_info(session) { + session_info + } else { + gum::warn!( + target: LOG_TARGET, + ?session, + "Could not retrieve session info from rolling session window", + ); + return Ok(()) + }; + + let relay_parent = candidate_receipt.descriptor.relay_parent; + let candidate_hash = candidate_receipt.hash(); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?relay_parent, + "Importing backing votes from chain for candidate" + ); + let statements = backers + .into_iter() + .filter_map(|(validator_index, attestation)| { + let validator_public: ValidatorId = session_info + .validators + .get(validator_index) + .or_else(|| { + gum::error!( + target: LOG_TARGET, + ?session, + ?validator_index, + "Missing public key for validator", + ); + None + }) + .cloned()?; + let validator_signature = attestation.signature().clone(); + let valid_statement_kind = + match attestation.to_compact_statement(candidate_hash) { + CompactStatement::Seconded(_) => + ValidDisputeStatementKind::BackingSeconded(relay_parent), + CompactStatement::Valid(_) => + ValidDisputeStatementKind::BackingValid(relay_parent), + }; + debug_assert!( + SignedDisputeStatement::new_checked( + DisputeStatement::Valid(valid_statement_kind), + candidate_hash, + session, + validator_public.clone(), + validator_signature.clone(), + ).is_ok(), + "Scraped backing votes had invalid signature! candidate: {:?}, session: {:?}, validator_public: {:?}", + candidate_hash, + session, + validator_public, + ); + let signed_dispute_statement = + SignedDisputeStatement::new_unchecked_from_trusted_source( + DisputeStatement::Valid(valid_statement_kind), + candidate_hash, + session, + validator_public, + validator_signature, + ); + Some((signed_dispute_statement, validator_index)) + }) + .collect(); + + // Importantly, handling import statements for backing votes also + // clears spam slots for any newly backed candidates + let import_result = self + .handle_import_statements( + ctx, + overlay_db, + MaybeCandidateReceipt::Provides(candidate_receipt), + session, + statements, + now, + ) + .await?; + match import_result { + ImportStatementsResult::ValidImport => gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?session, + "Imported backing votes from chain" + ), + ImportStatementsResult::InvalidImport => gum::warn!( + target: LOG_TARGET, + ?relay_parent, + ?session, + "Attempted import of on-chain backing votes failed" + ), + } + } + + // Import disputes from on-chain, this already went through a vote so it's assumed + // as verified. This will only be stored, gossiping it is not necessary. + for DisputeStatementSet { candidate_hash, session, statements } in disputes { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + "Importing dispute votes from chain for candidate" + ); + let session_info = + if let Some(session_info) = self.rolling_session_window.session_info(session) { + session_info + } else { + gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + "Could not retrieve session info from rolling session window for recently concluded dispute" + ); + continue + }; + + let statements = statements + .into_iter() + .filter_map(|(dispute_statement, validator_index, validator_signature)| { + let validator_public: ValidatorId = session_info + .validators + .get(validator_index) + .or_else(|| { + gum::error!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + "Missing public key for validator {:?} that participated in concluded dispute", + &validator_index + ); + None + }) + .cloned()?; + + Some(( + SignedDisputeStatement::new_unchecked_from_trusted_source( + dispute_statement, + candidate_hash, + session, + validator_public, + validator_signature, + ), + validator_index, + )) + }) + .collect::>(); + if statements.is_empty() { + gum::debug!(target: LOG_TARGET, "Skipping empty from chain dispute import"); + continue + } + let import_result = self + .handle_import_statements( + ctx, + overlay_db, + // TODO + MaybeCandidateReceipt::AssumeBackingVotePresent(candidate_hash), + session, + statements, + now, + ) + .await?; + match import_result { + ImportStatementsResult::ValidImport => gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + "Imported statement of dispute from on-chain" + ), + ImportStatementsResult::InvalidImport => gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + "Attempted import of on-chain statement of dispute failed" + ), + } + } + + Ok(()) + } + + async fn handle_incoming( + &mut self, + ctx: &mut Context, + overlay_db: &mut OverlayedBackend<'_, impl Backend>, + message: DisputeCoordinatorMessage, + now: Timestamp, + ) -> Result JfyiResult<()>>> { + match message { + DisputeCoordinatorMessage::ImportStatements { + candidate_receipt, + session, + statements, + pending_confirmation, + } => { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?candidate_receipt.hash(), + ?session, + "DisputeCoordinatorMessage::ImportStatements" + ); + let outcome = self + .handle_import_statements( + ctx, + overlay_db, + MaybeCandidateReceipt::Provides(candidate_receipt), + session, + statements, + now, + ) + .await?; + let report = move || match pending_confirmation { + Some(pending_confirmation) => pending_confirmation + .send(outcome) + .map_err(|_| JfyiError::DisputeImportOneshotSend), + None => Ok(()), + }; + + match outcome { + ImportStatementsResult::InvalidImport => { + report()?; + }, + // In case of valid import, delay confirmation until actual disk write: + ImportStatementsResult::ValidImport => return Ok(Box::new(report)), + } + }, + DisputeCoordinatorMessage::RecentDisputes(tx) => { + // Return error if session information is missing. + self.ensure_available_session_info()?; + + gum::trace!(target: LOG_TARGET, "Loading recent disputes from db"); + let recent_disputes = if let Some(disputes) = overlay_db.load_recent_disputes()? { + disputes + } else { + BTreeMap::new() + }; + gum::trace!(target: LOG_TARGET, "Loaded recent disputes from db"); + + let _ = tx.send( + recent_disputes.into_iter().map(|(k, v)| (k.0, k.1, v)).collect::>(), + ); + }, + DisputeCoordinatorMessage::ActiveDisputes(tx) => { + // Return error if session information is missing. + self.ensure_available_session_info()?; + + gum::trace!(target: LOG_TARGET, "DisputeCoordinatorMessage::ActiveDisputes"); + + let recent_disputes = if let Some(disputes) = overlay_db.load_recent_disputes()? { + disputes + } else { + BTreeMap::new() + }; + + let _ = tx.send( + get_active_with_status(recent_disputes.into_iter(), now) + .map(|((session_idx, candidate_hash), dispute_status)| { + (session_idx, candidate_hash, dispute_status) + }) + .collect(), + ); + }, + DisputeCoordinatorMessage::QueryCandidateVotes(query, tx) => { + // Return error if session information is missing. + self.ensure_available_session_info()?; + + gum::trace!(target: LOG_TARGET, "DisputeCoordinatorMessage::QueryCandidateVotes"); + + let mut query_output = Vec::new(); + for (session_index, candidate_hash) in query { + if let Some(v) = + overlay_db.load_candidate_votes(session_index, &candidate_hash)? + { + query_output.push((session_index, candidate_hash, v.into())); + } else { + gum::debug!( + target: LOG_TARGET, + session_index, + "No votes found for candidate", + ); + } + } + let _ = tx.send(query_output); + }, + DisputeCoordinatorMessage::IssueLocalStatement( + session, + candidate_hash, + candidate_receipt, + valid, + ) => { + gum::trace!(target: LOG_TARGET, "DisputeCoordinatorMessage::IssueLocalStatement"); + self.issue_local_statement( + ctx, + overlay_db, + candidate_hash, + candidate_receipt, + session, + valid, + now, + ) + .await?; + }, + DisputeCoordinatorMessage::DetermineUndisputedChain { + base: (base_number, base_hash), + block_descriptions, + tx, + } => { + // Return error if session information is missing. + self.ensure_available_session_info()?; + gum::trace!( + target: LOG_TARGET, + "DisputeCoordinatorMessage::DetermineUndisputedChain" + ); + + let undisputed_chain = determine_undisputed_chain( + overlay_db, + base_number, + base_hash, + block_descriptions, + )?; + + let _ = tx.send(undisputed_chain); + }, + } + + Ok(Box::new(|| Ok(()))) + } + + // Helper function for checking subsystem errors in message processing. + fn ensure_available_session_info(&self) -> Result<()> { + if let Some(subsystem_error) = self.error.clone() { + return Err(Error::RollingSessionWindow(subsystem_error)) + } + + Ok(()) + } + + // We use fatal result rather than result here. Reason being, We for example increase + // spam slots in this function. If then the import fails for some non fatal and + // unrelated reason, we should likely actually decrement previously incremented spam + // slots again, for non fatal errors - which is cumbersome and actually not needed + async fn handle_import_statements( + &mut self, + ctx: &mut Context, + overlay_db: &mut OverlayedBackend<'_, impl Backend>, + candidate_receipt: MaybeCandidateReceipt, + session: SessionIndex, + statements: Vec<(SignedDisputeStatement, ValidatorIndex)>, + now: Timestamp, + ) -> FatalResult { + gum::trace!(target: LOG_TARGET, ?statements, "In handle import statements"); + if !self.rolling_session_window.contains(session) { + // It is not valid to participate in an ancient dispute (spam?) or too new. + return Ok(ImportStatementsResult::InvalidImport) + } + + let env = match CandidateEnvironment::new( + &self.keystore, + &self.rolling_session_window, + session, + ) { + None => { + gum::warn!( + target: LOG_TARGET, + session, + "We are lacking a `SessionInfo` for handling import of statements." + ); + + return Ok(ImportStatementsResult::InvalidImport) + }, + Some(env) => env, + }; + + let candidate_hash = candidate_receipt.hash(); + + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + num_validators = ?env.session_info().validators.len(), + "Number of validators" + ); + + // In case we are not provided with a candidate receipt + // we operate under the assumption, that a previous vote + // which included a `CandidateReceipt` was seen. + // This holds since every block is preceded by the `Backing`-phase. + // + // There is one exception: A sufficiently sophisticated attacker could prevent + // us from seeing the backing votes by withholding arbitrary blocks, and hence we do + // not have a `CandidateReceipt` available. + let old_state = match overlay_db + .load_candidate_votes(session, &candidate_hash)? + .map(CandidateVotes::from) + { + Some(votes) => CandidateVoteState::new(votes, &env, now), + None => + if let MaybeCandidateReceipt::Provides(candidate_receipt) = candidate_receipt { + CandidateVoteState::new_from_receipt(candidate_receipt) + } else { + gum::warn!( + target: LOG_TARGET, + session, + ?candidate_hash, + "Cannot import votes, without `CandidateReceipt` available!" + ); + return Ok(ImportStatementsResult::InvalidImport) + }, + }; + + gum::trace!(target: LOG_TARGET, ?candidate_hash, ?session, "Loaded votes"); + + let import_result = { + let intermediate_result = old_state.import_statements(&env, statements, now); + + // Handle approval vote import: + // + // See guide: We import on fresh disputes to maximize likelihood of fetching votes for + // dead forks and once concluded to maximize time for approval votes to trickle in. + if intermediate_result.is_freshly_disputed() || + intermediate_result.is_freshly_concluded() + { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + "Requesting approval signatures" + ); + let (tx, rx) = oneshot::channel(); + // Use of unbounded channels justified because: + // 1. Only triggered twice per dispute. + // 2. Raising a dispute is costly (requires validation + recovery) by honest nodes, + // dishonest nodes are limited by spam slots. + // 3. Concluding a dispute is even more costly. + // Therefore it is reasonable to expect a simple vote request to succeed way faster + // than disputes are raised. + // 4. We are waiting (and blocking the whole subsystem) on a response right after - + // therefore even with all else failing we will never have more than + // one message in flight at any given time. + ctx.send_unbounded_message( + ApprovalVotingMessage::GetApprovalSignaturesForCandidate(candidate_hash, tx), + ); + match rx.await { + Err(_) => { + gum::warn!( + target: LOG_TARGET, + "Fetch for approval votes got cancelled, only expected during shutdown!" + ); + intermediate_result + }, + Ok(votes) => { + gum::trace!( + target: LOG_TARGET, + count = votes.len(), + "Successfully received approval votes." + ); + intermediate_result.import_approval_votes(&env, votes, now) + }, + } + } else { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + "Not requested approval signatures" + ); + intermediate_result + } + }; + + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + num_validators = ?env.session_info().validators.len(), + "Import result ready" + ); + let new_state = import_result.new_state(); + + let is_included = self.scraper.is_candidate_included(&candidate_hash); + let is_backed = self.scraper.is_candidate_backed(&candidate_hash); + let own_vote_missing = new_state.own_vote_missing(); + let is_disputed = new_state.is_disputed(); + let is_confirmed = new_state.is_confirmed(); + let potential_spam = is_potential_spam(&self.scraper, &new_state, &candidate_hash); + // We participate only in disputes which are not potential spam. + let allow_participation = !potential_spam; + + gum::trace!( + target: LOG_TARGET, + ?own_vote_missing, + ?potential_spam, + ?is_included, + ?candidate_hash, + confirmed = ?new_state.is_confirmed(), + has_invalid_voters = ?!import_result.new_invalid_voters().is_empty(), + "Is spam?" + ); + + // This check is responsible for all clearing of spam slots. It runs + // whenever a vote is imported from on or off chain, and decrements + // slots whenever a candidate is newly backed, confirmed, or has our + // own vote. + if !potential_spam { + self.spam_slots.clear(&(session, candidate_hash)); + + // Potential spam: + } else if !import_result.new_invalid_voters().is_empty() { + let mut free_spam_slots_available = false; + // Only allow import if at least one validator voting invalid, has not exceeded + // its spam slots: + for index in import_result.new_invalid_voters() { + // Disputes can only be triggered via an invalidity stating vote, thus we only + // need to increase spam slots on invalid votes. (If we did not, we would also + // increase spam slots for backing validators for example - as validators have to + // provide some opposing vote for dispute-distribution). + free_spam_slots_available |= + self.spam_slots.add_unconfirmed(session, candidate_hash, *index); + } + if !free_spam_slots_available { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + invalid_voters = ?import_result.new_invalid_voters(), + "Rejecting import because of full spam slots." + ); + return Ok(ImportStatementsResult::InvalidImport) + } + } + + // Participate in dispute if we did not cast a vote before and actually have keys to cast a + // local vote. Disputes should fall in one of the categories below, otherwise we will refrain + // from participation: + // - `is_included` lands in prioritised queue + // - `is_confirmed` | `is_backed` lands in best effort queue + // We don't participate in disputes on finalized candidates. + if own_vote_missing && is_disputed && allow_participation { + let priority = ParticipationPriority::with_priority_if(is_included); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?priority, + "Queuing participation for candidate" + ); + if priority.is_priority() { + self.metrics.on_queued_priority_participation(); + } else { + self.metrics.on_queued_best_effort_participation(); + } + let request_timer = Arc::new(self.metrics.time_participation_pipeline()); + let r = self + .participation + .queue_participation( + ctx, + priority, + ParticipationRequest::new( + new_state.candidate_receipt().clone(), + session, + request_timer, + ), + ) + .await; + log_error(r)?; + } else { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?is_confirmed, + ?own_vote_missing, + ?is_disputed, + ?allow_participation, + ?is_included, + ?is_backed, + "Will not queue participation for candidate" + ); + + if !allow_participation { + self.metrics.on_refrained_participation(); + } + } + + // Also send any already existing approval vote on new disputes: + if import_result.is_freshly_disputed() { + let our_approval_votes = new_state.own_approval_votes().into_iter().flatten(); + for (validator_index, sig) in our_approval_votes { + let pub_key = match env.validators().get(validator_index) { + None => { + gum::error!( + target: LOG_TARGET, + ?validator_index, + ?session, + "Could not find pub key in `SessionInfo` for our own approval vote!" + ); + continue + }, + Some(k) => k, + }; + let statement = SignedDisputeStatement::new_unchecked_from_trusted_source( + DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking), + candidate_hash, + session, + pub_key.clone(), + sig.clone(), + ); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + ?validator_index, + "Sending out own approval vote" + ); + match make_dispute_message( + env.session_info(), + &new_state.votes(), + statement, + validator_index, + ) { + Err(err) => { + gum::error!( + target: LOG_TARGET, + ?err, + "No ongoing dispute, but we checked there is one!" + ); + }, + Ok(dispute_message) => { + ctx.send_message(DisputeDistributionMessage::SendDispute(dispute_message)) + .await; + }, + }; + } + } + + // All good, update recent disputes if state has changed: + if let Some(new_status) = new_state.dispute_status() { + // Only bother with db access, if there was an actual change. + if import_result.dispute_state_changed() { + let mut recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default(); + + let status = + recent_disputes.entry((session, candidate_hash)).or_insert_with(|| { + gum::info!( + target: LOG_TARGET, + ?candidate_hash, + session, + "New dispute initiated for candidate.", + ); + DisputeStatus::active() + }); + + *status = *new_status; + + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?status, + has_concluded_for = ?new_state.has_concluded_for(), + has_concluded_against = ?new_state.has_concluded_against(), + "Writing recent disputes with updates for candidate" + ); + overlay_db.write_recent_disputes(recent_disputes); + } + } + + // Notify ChainSelection if a dispute has concluded against a candidate. ChainSelection + // will need to mark the candidate's relay parent as reverted. + if import_result.is_freshly_concluded_against() { + let blocks_including = self.scraper.get_blocks_including_candidate(&candidate_hash); + if blocks_including.len() > 0 { + ctx.send_message(ChainSelectionMessage::RevertBlocks(blocks_including)).await; + } else { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + "Could not find an including block for candidate against which a dispute has concluded." + ); + } + } + + // Update metrics: + if import_result.is_freshly_disputed() { + self.metrics.on_open(); + } + self.metrics.on_valid_votes(import_result.imported_valid_votes()); + self.metrics.on_invalid_votes(import_result.imported_invalid_votes()); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + imported_approval_votes = ?import_result.imported_approval_votes(), + imported_valid_votes = ?import_result.imported_valid_votes(), + imported_invalid_votes = ?import_result.imported_invalid_votes(), + total_valid_votes = ?import_result.new_state().votes().valid.raw().len(), + total_invalid_votes = ?import_result.new_state().votes().invalid.len(), + confirmed = ?import_result.new_state().is_confirmed(), + "Import summary" + ); + + self.metrics.on_approval_votes(import_result.imported_approval_votes()); + if import_result.is_freshly_concluded_for() { + gum::info!( + target: LOG_TARGET, + ?candidate_hash, + session, + "Dispute on candidate concluded with 'valid' result", + ); + self.metrics.on_concluded_valid(); + } + if import_result.is_freshly_concluded_against() { + gum::info!( + target: LOG_TARGET, + ?candidate_hash, + session, + "Dispute on candidate concluded with 'invalid' result", + ); + self.metrics.on_concluded_invalid(); + } + + // Only write when votes have changed. + if let Some(votes) = import_result.into_updated_votes() { + overlay_db.write_candidate_votes(session, candidate_hash, votes.into()); + } + + Ok(ImportStatementsResult::ValidImport) + } + + async fn issue_local_statement( + &mut self, + ctx: &mut Context, + overlay_db: &mut OverlayedBackend<'_, impl Backend>, + candidate_hash: CandidateHash, + candidate_receipt: CandidateReceipt, + session: SessionIndex, + valid: bool, + now: Timestamp, + ) -> Result<()> { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + ?valid, + ?now, + "Issuing local statement for candidate!" + ); + // Load environment: + let env = match CandidateEnvironment::new( + &self.keystore, + &self.rolling_session_window, + session, + ) { + None => { + gum::warn!( + target: LOG_TARGET, + session, + "Missing info for session which has an active dispute", + ); + + return Ok(()) + }, + Some(env) => env, + }; + + let votes = overlay_db + .load_candidate_votes(session, &candidate_hash)? + .map(CandidateVotes::from) + .unwrap_or_else(|| CandidateVotes { + candidate_receipt: candidate_receipt.clone(), + valid: ValidCandidateVotes::new(), + invalid: BTreeMap::new(), + }); + + // Sign a statement for each validator index we control which has + // not already voted. This should generally be maximum 1 statement. + let voted_indices = votes.voted_indices(); + let mut statements = Vec::new(); + + let controlled_indices = env.controlled_indices(); + for index in controlled_indices { + if voted_indices.contains(&index) { + continue + } + + let keystore = self.keystore.clone() as Arc<_>; + let res = SignedDisputeStatement::sign_explicit( + &keystore, + valid, + candidate_hash, + session, + env.validators() + .get(*index) + .expect("`controlled_indices` are derived from `validators`; qed") + .clone(), + ) + .await; + + match res { + Ok(Some(signed_dispute_statement)) => { + statements.push((signed_dispute_statement, *index)); + }, + Ok(None) => {}, + Err(e) => { + gum::error!( + target: LOG_TARGET, + err = ?e, + "Encountered keystore error while signing dispute statement", + ); + }, + } + } + + // Get our message out: + for (statement, index) in &statements { + let dispute_message = + match make_dispute_message(env.session_info(), &votes, statement.clone(), *index) { + Err(err) => { + gum::debug!(target: LOG_TARGET, ?err, "Creating dispute message failed."); + continue + }, + Ok(dispute_message) => dispute_message, + }; + + ctx.send_message(DisputeDistributionMessage::SendDispute(dispute_message)).await; + } + + // Do import + if !statements.is_empty() { + match self + .handle_import_statements( + ctx, + overlay_db, + MaybeCandidateReceipt::Provides(candidate_receipt), + session, + statements, + now, + ) + .await? + { + ImportStatementsResult::InvalidImport => { + gum::error!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + "`handle_import_statements` considers our own votes invalid!" + ); + }, + ImportStatementsResult::ValidImport => { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + "`handle_import_statements` successfully imported our vote!" + ); + }, + } + } + + Ok(()) + } +} + +/// Messages to be handled in this subsystem. +enum MuxedMessage { + /// Messages from other subsystems. + Subsystem(FromOrchestra), + /// Messages from participation workers. + Participation(participation::WorkerMessage), +} + +#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] +impl MuxedMessage { + async fn receive( + ctx: &mut Context, + from_sender: &mut participation::WorkerMessageReceiver, + ) -> FatalResult { + // We are only fusing here to make `select` happy, in reality we will quit if the stream + // ends. + let from_overseer = ctx.recv().fuse(); + futures::pin_mut!(from_overseer, from_sender); + futures::select!( + msg = from_overseer => Ok(Self::Subsystem(msg.map_err(FatalError::SubsystemReceive)?)), + msg = from_sender.next() => Ok(Self::Participation(msg.ok_or(FatalError::ParticipationWorkerReceiverExhausted)?)), + ) + } +} + +#[derive(Debug, Clone)] +enum MaybeCandidateReceipt { + /// Directly provides the candidate receipt. + Provides(CandidateReceipt), + /// Assumes it was seen before by means of seconded message. + AssumeBackingVotePresent(CandidateHash), +} + +impl MaybeCandidateReceipt { + /// Retrieve `CandidateHash` for the corresponding candidate. + pub fn hash(&self) -> CandidateHash { + match self { + Self::Provides(receipt) => receipt.hash(), + Self::AssumeBackingVotePresent(hash) => *hash, + } + } +} + +/// Determine the best block and its block number. +/// Assumes `block_descriptions` are sorted from the one +/// with the lowest `BlockNumber` to the highest. +fn determine_undisputed_chain( + overlay_db: &mut OverlayedBackend<'_, impl Backend>, + base_number: BlockNumber, + base_hash: Hash, + block_descriptions: Vec, +) -> Result<(BlockNumber, Hash)> { + let last = block_descriptions + .last() + .map(|e| (base_number + block_descriptions.len() as BlockNumber, e.block_hash)) + .unwrap_or((base_number, base_hash)); + + // Fast path for no disputes. + let recent_disputes = match overlay_db.load_recent_disputes()? { + None => return Ok(last), + Some(a) if a.is_empty() => return Ok(last), + Some(a) => a, + }; + + let is_possibly_invalid = |session, candidate_hash| { + recent_disputes + .get(&(session, candidate_hash)) + .map_or(false, |status| status.is_possibly_invalid()) + }; + + for (i, BlockDescription { session, candidates, .. }) in block_descriptions.iter().enumerate() { + if candidates.iter().any(|c| is_possibly_invalid(*session, *c)) { + if i == 0 { + return Ok((base_number, base_hash)) + } else { + return Ok((base_number + i as BlockNumber, block_descriptions[i - 1].block_hash)) + } + } + } + + Ok(last) +} diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs new file mode 100644 index 00000000..de155c1e --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/lib.rs @@ -0,0 +1,566 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implements the dispute coordinator subsystem. +//! +//! This is the central subsystem of the node-side components which participate in disputes. +//! This subsystem wraps a database which tracks all statements observed by all validators over some window of sessions. +//! Votes older than this session window are pruned. +//! +//! This subsystem will be the point which produce dispute votes, either positive or negative, based on locally-observed +//! validation results as well as a sink for votes received by other subsystems. When importing a dispute vote from +//! another node, this will trigger dispute participation to recover and validate the block. + +use std::sync::Arc; + +use futures::FutureExt; + +use gum::CandidateHash; +use sc_keystore::LocalKeystore; + +use polkadot_node_primitives::{ + CandidateVotes, DisputeMessage, DisputeMessageCheckError, SignedDisputeStatement, +}; +use polkadot_node_subsystem::{ + messages::DisputeDistributionMessage, overseer, ActivatedLeaf, FromOrchestra, OverseerSignal, + SpawnedSubsystem, SubsystemError, +}; +use polkadot_node_subsystem_util::{ + database::Database, + rolling_session_window::{DatabaseParams, RollingSessionWindow}, +}; +use polkadot_primitives::{DisputeStatement, ScrapedOnChainVotes, SessionInfo, ValidatorIndex}; + +use crate::{ + error::{FatalResult, JfyiError, Result}, + metrics::Metrics, + status::{get_active_with_status, SystemClock}, +}; +use backend::{Backend, OverlayedBackend}; +use db::v1::DbBackend; +use fatality::Split; + +use self::{ + import::{CandidateEnvironment, CandidateVoteState}, + participation::{ParticipationPriority, ParticipationRequest}, + spam_slots::{SpamSlots, UnconfirmedDisputes}, +}; + +pub(crate) mod backend; +pub(crate) mod db; +pub(crate) mod error; + +/// Subsystem after receiving the first active leaf. +mod initialized; +use initialized::Initialized; + +/// Provider of data scraped from chain. +/// +/// If we have seen a candidate included somewhere, we should treat it as priority and will be able +/// to provide an ordering for participation. Thus a dispute for a candidate where we can get some +/// ordering is high-priority (we know it is a valid dispute) and those can be ordered by +/// `participation` based on `relay_parent` block number and other metrics, so each validator will +/// participate in disputes in a similar order, which ensures we will be resolving disputes, even +/// under heavy load. +mod scraping; +use scraping::ChainScraper; + +/// When importing votes we will check via the `ordering` module, whether or not we know of the +/// candidate to be included somewhere. If not, the votes might be spam, in this case we want to +/// limit the amount of locally imported votes, to prevent DoS attacks/resource exhaustion. The +/// `spam_slots` module helps keeping track of unconfirmed disputes per validators, if a spam slot +/// gets full, we will drop any further potential spam votes from that validator and report back +/// that the import failed. Which will lead to any honest validator to retry, thus the spam slots +/// can be relatively small, as a drop is not fatal. +mod spam_slots; + +/// Handling of participation requests via `Participation`. +/// +/// `Participation` provides an API (`Participation::queue_participation`) for queuing of dispute participations and will process those +/// participation requests, such that most important/urgent disputes will be resolved and processed +/// first and more importantly it will order requests in a way so disputes will get resolved, even +/// if there are lots of them. +pub(crate) mod participation; + +/// Pure processing of vote imports. +pub(crate) mod import; + +/// Metrics types. +mod metrics; + +/// Status tracking of disputes (`DisputeStatus`). +mod status; + +use crate::status::Clock; + +#[cfg(test)] +mod tests; + +pub(crate) const LOG_TARGET: &str = "parachain::dispute-coordinator"; + +/// An implementation of the dispute coordinator subsystem. +pub struct DisputeCoordinatorSubsystem { + config: Config, + store: Arc, + keystore: Arc, + metrics: Metrics, +} + +/// Configuration for the dispute coordinator subsystem. +#[derive(Debug, Clone, Copy)] +pub struct Config { + /// The data column in the store to use for dispute data. + pub col_dispute_data: u32, + /// The data column in the store to use for session data. + pub col_session_data: u32, +} + +impl Config { + fn column_config(&self) -> db::v1::ColumnConfiguration { + db::v1::ColumnConfiguration { + col_dispute_data: self.col_dispute_data, + col_session_data: self.col_session_data, + } + } +} + +#[overseer::subsystem(DisputeCoordinator, error=SubsystemError, prefix=self::overseer)] +impl DisputeCoordinatorSubsystem { + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = async { + let backend = DbBackend::new( + self.store.clone(), + self.config.column_config(), + self.metrics.clone(), + ); + self.run(ctx, backend, Box::new(SystemClock)) + .await + .map_err(|e| SubsystemError::with_origin("dispute-coordinator", e)) + } + .boxed(); + + SpawnedSubsystem { name: "dispute-coordinator-subsystem", future } + } +} + +#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] +impl DisputeCoordinatorSubsystem { + /// Create a new instance of the subsystem. + pub fn new( + store: Arc, + config: Config, + keystore: Arc, + metrics: Metrics, + ) -> Self { + Self { store, config, keystore, metrics } + } + + /// Initialize and afterwards run `Initialized::run`. + async fn run( + self, + mut ctx: Context, + backend: B, + clock: Box, + ) -> FatalResult<()> + where + B: Backend + 'static, + { + let res = self.initialize(&mut ctx, backend, &*clock).await?; + + let (participations, votes, first_leaf, initialized, backend) = match res { + // Concluded: + None => return Ok(()), + Some(r) => r, + }; + + initialized + .run(ctx, backend, participations, votes, Some(first_leaf), clock) + .await + } + + /// Make sure to recover participations properly on startup. + async fn initialize( + self, + ctx: &mut Context, + mut backend: B, + clock: &(dyn Clock), + ) -> FatalResult< + Option<( + Vec<(ParticipationPriority, ParticipationRequest)>, + Vec, + ActivatedLeaf, + Initialized, + B, + )>, + > + where + B: Backend + 'static, + { + loop { + let db_params = + DatabaseParams { db: self.store.clone(), db_column: self.config.col_session_data }; + + let (first_leaf, rolling_session_window) = + match get_rolling_session_window(ctx, db_params).await { + Ok(Some(update)) => update, + Ok(None) => { + gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting"); + return Ok(None) + }, + Err(e) => { + e.split()?.log(); + continue + }, + }; + + let mut overlay_db = OverlayedBackend::new(&mut backend); + let (participations, votes, spam_slots, ordering_provider) = match self + .handle_startup( + ctx, + first_leaf.clone(), + &rolling_session_window, + &mut overlay_db, + clock, + ) + .await + { + Ok(v) => v, + Err(e) => { + e.split()?.log(); + continue + }, + }; + if !overlay_db.is_empty() { + let ops = overlay_db.into_write_ops(); + backend.write(ops)?; + } + + return Ok(Some(( + participations, + votes, + first_leaf, + Initialized::new(self, rolling_session_window, spam_slots, ordering_provider), + backend, + ))) + } + } + + // Restores the subsystem's state before proceeding with the main event loop. + // + // - Prune any old disputes. + // - Find disputes we need to participate in. + // - Initialize spam slots & OrderingProvider. + async fn handle_startup( + &self, + ctx: &mut Context, + initial_head: ActivatedLeaf, + rolling_session_window: &RollingSessionWindow, + overlay_db: &mut OverlayedBackend<'_, impl Backend>, + clock: &dyn Clock, + ) -> Result<( + Vec<(ParticipationPriority, ParticipationRequest)>, + Vec, + SpamSlots, + ChainScraper, + )> { + // Prune obsolete disputes: + db::v1::note_earliest_session(overlay_db, rolling_session_window.earliest_session())?; + + let now = clock.now(); + + let active_disputes = match overlay_db.load_recent_disputes() { + Ok(disputes) => disputes + .map(|disputes| get_active_with_status(disputes.into_iter(), now)) + .into_iter() + .flatten(), + Err(e) => { + gum::error!(target: LOG_TARGET, "Failed initial load of recent disputes: {:?}", e); + return Err(e.into()) + }, + }; + + let mut participation_requests = Vec::new(); + let mut spam_disputes: UnconfirmedDisputes = UnconfirmedDisputes::new(); + let (scraper, votes) = ChainScraper::new(ctx.sender(), initial_head).await?; + for ((session, ref candidate_hash), _) in active_disputes { + let env = + match CandidateEnvironment::new(&self.keystore, &rolling_session_window, session) { + None => { + gum::warn!( + target: LOG_TARGET, + session, + "We are lacking a `SessionInfo` for handling db votes on startup." + ); + + continue + }, + Some(env) => env, + }; + + let votes: CandidateVotes = + match overlay_db.load_candidate_votes(session, candidate_hash) { + Ok(Some(votes)) => votes.into(), + Ok(None) => continue, + Err(e) => { + gum::error!( + target: LOG_TARGET, + "Failed initial load of candidate votes: {:?}", + e + ); + continue + }, + }; + let vote_state = CandidateVoteState::new(votes, &env, now); + + let potential_spam = is_potential_spam(&scraper, &vote_state, candidate_hash); + let is_included = + scraper.is_candidate_included(&vote_state.votes().candidate_receipt.hash()); + + if potential_spam { + gum::trace!( + target: LOG_TARGET, + ?session, + ?candidate_hash, + "Found potential spam dispute on startup" + ); + spam_disputes + .insert((session, *candidate_hash), vote_state.votes().voted_indices()); + } else { + // Participate if need be: + if vote_state.own_vote_missing() { + gum::trace!( + target: LOG_TARGET, + ?session, + ?candidate_hash, + "Found valid dispute, with no vote from us on startup - participating." + ); + let request_timer = Arc::new(self.metrics.time_participation_pipeline()); + participation_requests.push(( + ParticipationPriority::with_priority_if(is_included), + ParticipationRequest::new( + vote_state.votes().candidate_receipt.clone(), + session, + request_timer, + ), + )); + } + // Else make sure our own vote is distributed: + else { + gum::trace!( + target: LOG_TARGET, + ?session, + ?candidate_hash, + "Found valid dispute, with vote from us on startup - send vote." + ); + send_dispute_messages(ctx, &env, &vote_state).await; + } + } + } + + Ok((participation_requests, votes, SpamSlots::recover_from_state(spam_disputes), scraper)) + } +} + +/// Wait for `ActiveLeavesUpdate` on startup, returns `None` if `Conclude` signal came first. +#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] +async fn get_rolling_session_window( + ctx: &mut Context, + db_params: DatabaseParams, +) -> Result> { + if let Some(leaf) = { wait_for_first_leaf(ctx) }.await? { + let sender = ctx.sender().clone(); + Ok(Some(( + leaf.clone(), + RollingSessionWindow::new(sender, leaf.hash, db_params) + .await + .map_err(JfyiError::RollingSessionWindow)?, + ))) + } else { + Ok(None) + } +} + +/// Wait for `ActiveLeavesUpdate`, returns `None` if `Conclude` signal came first. +#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] +async fn wait_for_first_leaf(ctx: &mut Context) -> Result> { + loop { + match ctx.recv().await? { + FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(None), + FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { + if let Some(activated) = update.activated { + return Ok(Some(activated)) + } + }, + FromOrchestra::Signal(OverseerSignal::BlockFinalized(_, _)) => {}, + FromOrchestra::Communication { msg } => + // NOTE: We could technically actually handle a couple of message types, even if + // not initialized (e.g. all requests that only query the database). The problem + // is, we would deliver potentially outdated information, especially in the event + // of bugs where initialization fails for a while (e.g. `SessionInfo`s are not + // available). So instead of telling subsystems, everything is fine, because of an + // hour old database state, we should rather cancel contained oneshots and delay + // finality until we are fully functional. + { + gum::warn!( + target: LOG_TARGET, + ?msg, + "Received msg before first active leaves update. This is not expected - message will be dropped." + ) + }, + } + } +} + +/// Check wheter a dispute for the given candidate could be spam. +/// +/// That is the candidate could be made up. +pub fn is_potential_spam( + scraper: &ChainScraper, + vote_state: &CandidateVoteState, + candidate_hash: &CandidateHash, +) -> bool { + let is_disputed = vote_state.is_disputed(); + let is_included = scraper.is_candidate_included(candidate_hash); + let is_backed = scraper.is_candidate_backed(candidate_hash); + let is_confirmed = vote_state.is_confirmed(); + + is_disputed && !is_included && !is_backed && !is_confirmed +} + +/// Tell dispute-distribution to send all our votes. +/// +/// Should be called on startup for all active disputes where there are votes from us already. +#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] +async fn send_dispute_messages( + ctx: &mut Context, + env: &CandidateEnvironment<'_>, + vote_state: &CandidateVoteState, +) { + for own_vote in vote_state.own_votes().into_iter().flatten() { + let (validator_index, (kind, sig)) = own_vote; + let public_key = if let Some(key) = env.session_info().validators.get(*validator_index) { + key.clone() + } else { + gum::error!( + target: LOG_TARGET, + ?validator_index, + session_index = ?env.session_index(), + "Could not find our own key in `SessionInfo`" + ); + continue + }; + let our_vote_signed = SignedDisputeStatement::new_checked( + kind.clone(), + vote_state.votes().candidate_receipt.hash(), + env.session_index(), + public_key, + sig.clone(), + ); + let our_vote_signed = match our_vote_signed { + Ok(signed) => signed, + Err(()) => { + gum::error!( + target: LOG_TARGET, + "Checking our own signature failed - db corruption?" + ); + continue + }, + }; + let dispute_message = match make_dispute_message( + env.session_info(), + vote_state.votes(), + our_vote_signed, + *validator_index, + ) { + Err(err) => { + gum::debug!(target: LOG_TARGET, ?err, "Creating dispute message failed."); + continue + }, + Ok(dispute_message) => dispute_message, + }; + + ctx.send_message(DisputeDistributionMessage::SendDispute(dispute_message)).await; + } +} + +#[derive(Debug, thiserror::Error)] +pub enum DisputeMessageCreationError { + #[error("There was no opposite vote available")] + NoOppositeVote, + #[error("Found vote had an invalid validator index that could not be found")] + InvalidValidatorIndex, + #[error("Statement found in votes had invalid signature.")] + InvalidStoredStatement, + #[error(transparent)] + InvalidStatementCombination(DisputeMessageCheckError), +} + +/// Create a `DisputeMessage` to be sent to `DisputeDistribution`. +pub fn make_dispute_message( + info: &SessionInfo, + votes: &CandidateVotes, + our_vote: SignedDisputeStatement, + our_index: ValidatorIndex, +) -> std::result::Result { + let validators = &info.validators; + + let (valid_statement, valid_index, invalid_statement, invalid_index) = + if let DisputeStatement::Valid(_) = our_vote.statement() { + let (validator_index, (statement_kind, validator_signature)) = + votes.invalid.iter().next().ok_or(DisputeMessageCreationError::NoOppositeVote)?; + let other_vote = SignedDisputeStatement::new_checked( + DisputeStatement::Invalid(*statement_kind), + *our_vote.candidate_hash(), + our_vote.session_index(), + validators + .get(*validator_index) + .ok_or(DisputeMessageCreationError::InvalidValidatorIndex)? + .clone(), + validator_signature.clone(), + ) + .map_err(|()| DisputeMessageCreationError::InvalidStoredStatement)?; + (our_vote, our_index, other_vote, *validator_index) + } else { + let (validator_index, (statement_kind, validator_signature)) = votes + .valid + .raw() + .iter() + .next() + .ok_or(DisputeMessageCreationError::NoOppositeVote)?; + let other_vote = SignedDisputeStatement::new_checked( + DisputeStatement::Valid(*statement_kind), + *our_vote.candidate_hash(), + our_vote.session_index(), + validators + .get(*validator_index) + .ok_or(DisputeMessageCreationError::InvalidValidatorIndex)? + .clone(), + validator_signature.clone(), + ) + .map_err(|()| DisputeMessageCreationError::InvalidStoredStatement)?; + (other_vote, *validator_index, our_vote, our_index) + }; + + DisputeMessage::from_signed_statements( + valid_statement, + valid_index, + invalid_statement, + invalid_index, + votes.candidate_receipt.clone(), + info, + ) + .map_err(DisputeMessageCreationError::InvalidStatementCombination) +} diff --git a/polkadot/node/core/dispute-coordinator/src/metrics.rs b/polkadot/node/core/dispute-coordinator/src/metrics.rs new file mode 100644 index 00000000..977f5cc7 --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/metrics.rs @@ -0,0 +1,237 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use polkadot_node_subsystem_util::metrics::{self, prometheus}; + +#[derive(Clone)] +struct MetricsInner { + /// Number of opened disputes. + open: prometheus::Counter, + /// Votes of all disputes. + votes: prometheus::CounterVec, + /// Number of approval votes explicitly fetched from approval voting. + approval_votes: prometheus::Counter, + /// Conclusion across all disputes. + concluded: prometheus::CounterVec, + /// Number of participations that have been queued. + queued_participations: prometheus::CounterVec, + /// How long vote cleanup batches take. + vote_cleanup_time: prometheus::Histogram, + /// Number of refrained participations. + refrained_participations: prometheus::Counter, + /// Distribution of participation durations. + participation_durations: prometheus::Histogram, + /// Measures the duration of the full participation pipeline: From when + /// a participation request is first queued to when participation in the + /// requested dispute is complete. + participation_pipeline_durations: prometheus::Histogram, + /// Size of participation priority queue + participation_priority_queue_size: prometheus::Gauge, + /// Size of participation best effort queue + participation_best_effort_queue_size: prometheus::Gauge, +} + +/// Candidate validation metrics. +#[derive(Default, Clone)] +pub struct Metrics(Option); + +impl Metrics { + pub(crate) fn on_open(&self) { + if let Some(metrics) = &self.0 { + metrics.open.inc(); + } + } + + pub(crate) fn on_valid_votes(&self, vote_count: u32) { + if let Some(metrics) = &self.0 { + metrics.votes.with_label_values(&["valid"]).inc_by(vote_count as _); + } + } + + pub(crate) fn on_invalid_votes(&self, vote_count: u32) { + if let Some(metrics) = &self.0 { + metrics.votes.with_label_values(&["invalid"]).inc_by(vote_count as _); + } + } + + pub(crate) fn on_approval_votes(&self, vote_count: u32) { + if let Some(metrics) = &self.0 { + metrics.approval_votes.inc_by(vote_count as _); + } + } + + pub(crate) fn on_concluded_valid(&self) { + if let Some(metrics) = &self.0 { + metrics.concluded.with_label_values(&["valid"]).inc(); + } + } + + pub(crate) fn on_concluded_invalid(&self) { + if let Some(metrics) = &self.0 { + metrics.concluded.with_label_values(&["invalid"]).inc(); + } + } + + pub(crate) fn on_queued_priority_participation(&self) { + if let Some(metrics) = &self.0 { + metrics.queued_participations.with_label_values(&["priority"]).inc(); + } + } + + pub(crate) fn on_queued_best_effort_participation(&self) { + if let Some(metrics) = &self.0 { + metrics.queued_participations.with_label_values(&["best-effort"]).inc(); + } + } + + pub(crate) fn time_vote_cleanup(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.vote_cleanup_time.start_timer()) + } + + pub(crate) fn on_refrained_participation(&self) { + if let Some(metrics) = &self.0 { + metrics.refrained_participations.inc(); + } + } + + /// Provide a timer for participation durations which updates on drop. + pub(crate) fn time_participation( + &self, + ) -> Option { + self.0.as_ref().map(|metrics| metrics.participation_durations.start_timer()) + } + + /// Provide a timer for participation pipeline durations which updates on drop. + pub(crate) fn time_participation_pipeline( + &self, + ) -> Option { + self.0 + .as_ref() + .map(|metrics| metrics.participation_pipeline_durations.start_timer()) + } + + /// Set the priority_queue_size metric + pub fn report_priority_queue_size(&self, size: u64) { + if let Some(metrics) = &self.0 { + metrics.participation_priority_queue_size.set(size); + } + } + + /// Set the best_effort_queue_size metric + pub fn report_best_effort_queue_size(&self, size: u64) { + if let Some(metrics) = &self.0 { + metrics.participation_best_effort_queue_size.set(size); + } + } +} + +impl metrics::Metrics for Metrics { + fn try_register(registry: &prometheus::Registry) -> Result { + let metrics = MetricsInner { + open: prometheus::register( + prometheus::Counter::with_opts(prometheus::Opts::new( + "polkadot_parachain_candidate_disputes_total", + "Total number of raised disputes.", + ))?, + registry, + )?, + concluded: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_candidate_dispute_concluded", + "Concluded dispute votes, sorted by candidate is `valid` and `invalid`.", + ), + &["validity"], + )?, + registry, + )?, + votes: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_candidate_dispute_votes", + "Accumulated dispute votes, sorted by candidate is `valid` and `invalid`.", + ), + &["validity"], + )?, + registry, + )?, + approval_votes: prometheus::register( + prometheus::Counter::with_opts(prometheus::Opts::new( + "polkadot_parachain_dispute_candidate_approval_votes_fetched_total", + "Number of approval votes fetched from approval voting.", + ))?, + registry, + )?, + queued_participations: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_dispute_participations", + "Total number of queued participations, grouped by priority and best-effort. (Not every queueing will necessarily lead to an actual participation because of duplicates.)", + ), + &["priority"], + )?, + registry, + )?, + vote_cleanup_time: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_dispute_coordinator_vote_cleanup", + "Time spent cleaning up old votes per batch.", + ) + .buckets([0.01, 0.1, 0.5, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0].into()), + )?, + registry, + )?, + refrained_participations: prometheus::register( + prometheus::Counter::with_opts( + prometheus::Opts::new( + "polkadot_parachain_dispute_refrained_participations", + "Number of refrained participations. We refrain from participation if all of the following conditions are met: disputed candidate is not included, not backed and not confirmed.", + ))?, + registry, + )?, + participation_durations: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_dispute_participation_durations", + "Time spent within fn Participation::participate", + ) + )?, + registry, + )?, + participation_pipeline_durations: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_dispute_participation_pipeline_durations", + "Measures the duration of the full participation pipeline: From when a participation request is first queued to when participation in the requested dispute is complete.", + ) + )?, + registry, + )?, + participation_priority_queue_size: prometheus::register( + prometheus::Gauge::new("polkadot_parachain_dispute_participation_priority_queue_size", + "Number of disputes waiting for local participation in the priority queue.")?, + registry, + )?, + participation_best_effort_queue_size: prometheus::register( + prometheus::Gauge::new("polkadot_parachain_dispute_participation_best_effort_queue_size", + "Number of disputes waiting for local participation in the best effort queue.")?, + registry, + )?, + }; + Ok(Metrics(Some(metrics))) + } +} diff --git a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs new file mode 100644 index 00000000..e366adc5 --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs @@ -0,0 +1,425 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use std::collections::HashSet; +#[cfg(test)] +use std::time::Duration; + +use futures::{ + channel::{mpsc, oneshot}, + FutureExt, SinkExt, +}; +#[cfg(test)] +use futures_timer::Delay; + +use polkadot_node_primitives::ValidationResult; +use polkadot_node_subsystem::{ + messages::{AvailabilityRecoveryMessage, CandidateValidationMessage}, + overseer, ActiveLeavesUpdate, RecoveryError, +}; +use polkadot_node_subsystem_util::runtime::get_validation_code_by_hash; +use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateReceipt, Hash, PvfExecTimeoutKind, SessionIndex, +}; + +use crate::LOG_TARGET; + +use crate::error::{FatalError, FatalResult, Result}; + +#[cfg(test)] +mod tests; +#[cfg(test)] +pub use tests::{participation_full_happy_path, participation_missing_availability}; + +mod queues; +use queues::Queues; +pub use queues::{ParticipationPriority, ParticipationRequest, QueueError}; + +use crate::metrics::Metrics; +use polkadot_node_subsystem_util::metrics::prometheus::prometheus; + +/// How many participation processes do we want to run in parallel the most. +/// +/// This should be a relatively low value, while we might have a speedup once we fetched the data, +/// due to multi-core architectures, but the fetching itself can not be improved by parallel +/// requests. This means that higher numbers make it harder for a single dispute to resolve fast. +#[cfg(not(test))] +const MAX_PARALLEL_PARTICIPATIONS: usize = 3; +#[cfg(test)] +pub(crate) const MAX_PARALLEL_PARTICIPATIONS: usize = 1; + +/// Keep track of disputes we need to participate in. +/// +/// - Prioritize and queue participations +/// - Dequeue participation requests in order and launch participation worker. +pub struct Participation { + /// Participations currently being processed. + running_participations: HashSet, + /// Priority and best effort queues. + queue: Queues, + /// Sender to be passed to worker tasks. + worker_sender: WorkerMessageSender, + /// Some recent block for retrieving validation code from chain. + recent_block: Option<(BlockNumber, Hash)>, + /// Metrics handle cloned from Initialized + metrics: Metrics, +} + +/// Message from worker tasks. +#[derive(Debug)] +pub struct WorkerMessage(ParticipationStatement); + +/// Sender use by worker tasks. +pub type WorkerMessageSender = mpsc::Sender; + +/// Receiver to receive messages from worker tasks. +pub type WorkerMessageReceiver = mpsc::Receiver; + +/// Statement as result of the validation process. +#[derive(Debug)] +pub struct ParticipationStatement { + /// Relevant session. + pub session: SessionIndex, + /// The candidate the worker has been spawned for. + pub candidate_hash: CandidateHash, + /// Used receipt. + pub candidate_receipt: CandidateReceipt, + /// Actual result. + pub outcome: ParticipationOutcome, +} + +/// Outcome of the validation process. +#[derive(Copy, Clone, Debug)] +pub enum ParticipationOutcome { + /// Candidate was found to be valid. + Valid, + /// Candidate was found to be invalid. + Invalid, + /// Candidate was found to be unavailable. + Unavailable, + /// Something went wrong (bug), details can be found in the logs. + Error, +} + +impl ParticipationOutcome { + /// If validation was successful, get whether the candidate was valid or invalid. + pub fn validity(self) -> Option { + match self { + Self::Valid => Some(true), + Self::Invalid => Some(false), + Self::Unavailable | Self::Error => None, + } + } +} + +impl WorkerMessage { + fn from_request(req: ParticipationRequest, outcome: ParticipationOutcome) -> Self { + let session = req.session(); + let (candidate_hash, candidate_receipt) = req.into_candidate_info(); + Self(ParticipationStatement { session, candidate_hash, candidate_receipt, outcome }) + } +} + +#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] +impl Participation { + /// Get ready for managing dispute participation requests. + /// + /// The passed in sender will be used by background workers to communicate back their results. + /// The calling context should make sure to call `Participation::on_worker_message()` for the + /// received messages. + pub fn new(sender: WorkerMessageSender, metrics: Metrics) -> Self { + Self { + running_participations: HashSet::new(), + queue: Queues::new(metrics.clone()), + worker_sender: sender, + recent_block: None, + metrics, + } + } + + /// Queue a dispute for the node to participate in. + /// + /// If capacity is available right now and we already got some relay chain head via + /// `on_active_leaves_update`, the participation will be launched right away. + /// + /// Returns: false, if queues are already full. + pub async fn queue_participation( + &mut self, + ctx: &mut Context, + priority: ParticipationPriority, + req: ParticipationRequest, + ) -> Result<()> { + // Participation already running - we can ignore that request: + if self.running_participations.contains(req.candidate_hash()) { + return Ok(()) + } + // Available capacity - participate right away (if we already have a recent block): + if let Some((_, h)) = self.recent_block { + if self.running_participations.len() < MAX_PARALLEL_PARTICIPATIONS { + self.fork_participation(ctx, req, h)?; + return Ok(()) + } + } + // Out of capacity/no recent block yet - queue: + self.queue.queue(ctx.sender(), priority, req).await + } + + /// Message from a worker task was received - get the outcome. + /// + /// Call this function to keep participations going and to receive `ParticipationStatement`s. + /// + /// This message has to be called for each received worker message, in order to make sure + /// enough participation processes are running at any given time. + /// + /// Returns: The received `ParticipationStatement` or a fatal error, in case + /// something went wrong when dequeuing more requests (tasks could not be spawned). + pub async fn get_participation_result( + &mut self, + ctx: &mut Context, + msg: WorkerMessage, + ) -> FatalResult { + let WorkerMessage(statement) = msg; + self.running_participations.remove(&statement.candidate_hash); + let recent_block = self.recent_block.expect("We never ever reset recent_block to `None` and we already received a result, so it must have been set before. qed."); + self.dequeue_until_capacity(ctx, recent_block.1).await?; + Ok(statement) + } + + /// Process active leaves update. + /// + /// Make sure we to dequeue participations if that became possible and update most recent + /// block. + pub async fn process_active_leaves_update( + &mut self, + ctx: &mut Context, + update: &ActiveLeavesUpdate, + ) -> FatalResult<()> { + if let Some(activated) = &update.activated { + match self.recent_block { + None => { + self.recent_block = Some((activated.number, activated.hash)); + // Work got potentially unblocked: + self.dequeue_until_capacity(ctx, activated.hash).await?; + }, + Some((number, _)) if activated.number > number => { + self.recent_block = Some((activated.number, activated.hash)); + }, + Some(_) => {}, + } + } + Ok(()) + } + + /// Moving any request concerning the given candidates from best-effort to + /// priority, ignoring any candidates that don't have any queued participation requests. + pub async fn bump_to_priority_for_candidates( + &mut self, + ctx: &mut Context, + included_receipts: &Vec, + ) -> Result<()> { + for receipt in included_receipts { + self.queue.prioritize_if_present(ctx.sender(), receipt).await?; + } + Ok(()) + } + + /// Dequeue until `MAX_PARALLEL_PARTICIPATIONS` is reached. + async fn dequeue_until_capacity( + &mut self, + ctx: &mut Context, + recent_head: Hash, + ) -> FatalResult<()> { + while self.running_participations.len() < MAX_PARALLEL_PARTICIPATIONS { + if let Some(req) = self.queue.dequeue() { + self.fork_participation(ctx, req, recent_head)?; + } else { + break + } + } + Ok(()) + } + + /// Fork a participation task in the background. + fn fork_participation( + &mut self, + ctx: &mut Context, + req: ParticipationRequest, + recent_head: Hash, + ) -> FatalResult<()> { + let participation_timer = self.metrics.time_participation(); + if self.running_participations.insert(*req.candidate_hash()) { + let sender = ctx.sender().clone(); + ctx.spawn( + "participation-worker", + participate( + self.worker_sender.clone(), + sender, + recent_head, + req, + participation_timer, + ) + .boxed(), + ) + .map_err(FatalError::SpawnFailed)?; + } + Ok(()) + } +} + +async fn participate( + mut result_sender: WorkerMessageSender, + mut sender: impl overseer::DisputeCoordinatorSenderTrait, + block_hash: Hash, + req: ParticipationRequest, // Sends metric data via request_timer field when dropped + _participation_timer: Option, // Sends metric data when dropped +) { + #[cfg(test)] + // Hack for tests, so we get recovery messages not too early. + Delay::new(Duration::from_millis(100)).await; + // in order to validate a candidate we need to start by recovering the + // available data + let (recover_available_data_tx, recover_available_data_rx) = oneshot::channel(); + sender + .send_message(AvailabilityRecoveryMessage::RecoverAvailableData( + req.candidate_receipt().clone(), + req.session(), + None, + recover_available_data_tx, + )) + .await; + + let available_data = match recover_available_data_rx.await { + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + "`Oneshot` got cancelled when recovering available data {:?}", + req.candidate_hash(), + ); + send_result(&mut result_sender, req, ParticipationOutcome::Error).await; + return + }, + Ok(Ok(data)) => data, + Ok(Err(RecoveryError::Invalid)) => { + // the available data was recovered but it is invalid, therefore we'll + // vote negatively for the candidate dispute + send_result(&mut result_sender, req, ParticipationOutcome::Invalid).await; + return + }, + Ok(Err(RecoveryError::Unavailable)) => { + send_result(&mut result_sender, req, ParticipationOutcome::Unavailable).await; + return + }, + }; + + // we also need to fetch the validation code which we can reference by its + // hash as taken from the candidate descriptor + let validation_code = match get_validation_code_by_hash( + &mut sender, + block_hash, + req.candidate_receipt().descriptor.validation_code_hash, + ) + .await + { + Ok(Some(code)) => code, + Ok(None) => { + gum::warn!( + target: LOG_TARGET, + "Validation code unavailable for code hash {:?} in the state of block {:?}", + req.candidate_receipt().descriptor.validation_code_hash, + block_hash, + ); + + send_result(&mut result_sender, req, ParticipationOutcome::Error).await; + return + }, + Err(err) => { + gum::warn!(target: LOG_TARGET, ?err, "Error when fetching validation code."); + send_result(&mut result_sender, req, ParticipationOutcome::Error).await; + return + }, + }; + + // Issue a request to validate the candidate with the provided exhaustive + // parameters + // + // We use the approval execution timeout because this is intended to + // be run outside of backing and therefore should be subject to the + // same level of leeway. + let (validation_tx, validation_rx) = oneshot::channel(); + sender + .send_message(CandidateValidationMessage::ValidateFromExhaustive( + available_data.validation_data, + validation_code, + req.candidate_receipt().clone(), + available_data.pov, + PvfExecTimeoutKind::Approval, + validation_tx, + )) + .await; + + // we cast votes (either positive or negative) depending on the outcome of + // the validation and if valid, whether the commitments hash matches + match validation_rx.await { + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + "`Oneshot` got cancelled when validating candidate {:?}", + req.candidate_hash(), + ); + send_result(&mut result_sender, req, ParticipationOutcome::Error).await; + return + }, + Ok(Err(err)) => { + gum::warn!( + target: LOG_TARGET, + "Candidate {:?} validation failed with: {:?}", + req.candidate_hash(), + err, + ); + + send_result(&mut result_sender, req, ParticipationOutcome::Error).await; + }, + + Ok(Ok(ValidationResult::Invalid(invalid))) => { + gum::warn!( + target: LOG_TARGET, + "Candidate {:?} considered invalid: {:?}", + req.candidate_hash(), + invalid, + ); + + send_result(&mut result_sender, req, ParticipationOutcome::Invalid).await; + }, + Ok(Ok(ValidationResult::Valid(_, _))) => { + send_result(&mut result_sender, req, ParticipationOutcome::Valid).await; + }, + } +} + +/// Helper function for sending the result back and report any error. +async fn send_result( + sender: &mut WorkerMessageSender, + req: ParticipationRequest, + outcome: ParticipationOutcome, +) { + if let Err(err) = sender.feed(WorkerMessage::from_request(req, outcome)).await { + gum::error!( + target: LOG_TARGET, + ?err, + "Sending back participation result failed. Dispute coordinator not working properly!" + ); + } +} diff --git a/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs new file mode 100644 index 00000000..cbfb71e2 --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs @@ -0,0 +1,415 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use std::{cmp::Ordering, collections::BTreeMap, sync::Arc}; + +use futures::channel::oneshot; +use polkadot_node_subsystem::{messages::ChainApiMessage, overseer}; +use polkadot_primitives::{BlockNumber, CandidateHash, CandidateReceipt, Hash, SessionIndex}; + +use crate::{ + error::{FatalError, FatalResult, Result}, + LOG_TARGET, +}; + +use crate::metrics::Metrics; +use polkadot_node_subsystem_util::metrics::prometheus::prometheus; + +#[cfg(test)] +mod tests; + +/// How many potential garbage disputes we want to queue, before starting to drop requests. +#[cfg(not(test))] +const BEST_EFFORT_QUEUE_SIZE: usize = 100; +#[cfg(test)] +const BEST_EFFORT_QUEUE_SIZE: usize = 3; + +/// How many priority disputes can be queued. +/// +/// Once the queue exceeds that size, we will start to drop the newest participation requests in +/// the queue. Note that for each vote import the request will be re-added, if there is free +/// capacity. This limit just serves as a safe guard, it is not expected to ever really be reached. +/// +/// For 100 parachains, this would allow for every single candidate in 100 blocks on +/// two forks to get disputed, which should be plenty to deal with any realistic attack. +#[cfg(not(test))] +const PRIORITY_QUEUE_SIZE: usize = 20_000; +#[cfg(test)] +const PRIORITY_QUEUE_SIZE: usize = 2; + +/// Queues for dispute participation. +/// In both queues we have a strict ordering of candidates and participation will +/// happen in that order. Refer to `CandidateComparator` for details on the ordering. +pub struct Queues { + /// Set of best effort participation requests. + best_effort: BTreeMap, + + /// Priority queue. + priority: BTreeMap, + + /// Handle for recording queues data in metrics + metrics: Metrics, +} + +/// A dispute participation request that can be queued. +#[derive(Debug, Clone)] +pub struct ParticipationRequest { + candidate_hash: CandidateHash, + candidate_receipt: CandidateReceipt, + session: SessionIndex, + _request_timer: Arc>, // Sends metric data when request is dropped +} + +/// Whether a `ParticipationRequest` should be put on best-effort or the priority queue. +#[derive(Debug)] +pub enum ParticipationPriority { + BestEffort, + Priority, +} + +impl ParticipationPriority { + /// Create `ParticipationPriority` with either `Priority` + /// + /// or `BestEffort`. + pub fn with_priority_if(is_priority: bool) -> Self { + if is_priority { + Self::Priority + } else { + Self::BestEffort + } + } + + /// Whether or not this is a priority entry. + /// + /// If false, it is best effort. + pub fn is_priority(&self) -> bool { + match self { + Self::Priority => true, + Self::BestEffort => false, + } + } +} + +/// What can go wrong when queuing a request. +#[derive(Debug, thiserror::Error)] +pub enum QueueError { + #[error("Request could not be queued, because best effort queue was already full.")] + BestEffortFull, + #[error("Request could not be queued, because priority queue was already full.")] + PriorityFull, +} + +impl ParticipationRequest { + /// Create a new `ParticipationRequest` to be queued. + pub fn new( + candidate_receipt: CandidateReceipt, + session: SessionIndex, + request_timer: Arc>, + ) -> Self { + Self { + candidate_hash: candidate_receipt.hash(), + candidate_receipt, + session, + _request_timer: request_timer, + } + } + + pub fn candidate_receipt(&'_ self) -> &'_ CandidateReceipt { + &self.candidate_receipt + } + pub fn candidate_hash(&'_ self) -> &'_ CandidateHash { + &self.candidate_hash + } + pub fn session(&self) -> SessionIndex { + self.session + } + pub fn into_candidate_info(self) -> (CandidateHash, CandidateReceipt) { + let Self { candidate_hash, candidate_receipt, .. } = self; + (candidate_hash, candidate_receipt) + } +} + +// We want to compare participation requests in unit tests, so we +// only implement Eq for tests. +#[cfg(test)] +impl PartialEq for ParticipationRequest { + fn eq(&self, other: &Self) -> bool { + let ParticipationRequest { + candidate_receipt, + candidate_hash, + session: _session, + _request_timer, + } = self; + candidate_receipt == other.candidate_receipt() && + candidate_hash == other.candidate_hash() && + self.session == other.session() + } +} +#[cfg(test)] +impl Eq for ParticipationRequest {} + +impl Queues { + /// Create new `Queues`. + pub fn new(metrics: Metrics) -> Self { + Self { best_effort: BTreeMap::new(), priority: BTreeMap::new(), metrics } + } + + /// Will put message in queue, either priority or best effort depending on priority. + /// + /// If the message was already previously present on best effort, it will be moved to priority + /// if it is considered priority now. + /// + /// Returns error in case a queue was found full already. + pub async fn queue( + &mut self, + sender: &mut impl overseer::DisputeCoordinatorSenderTrait, + priority: ParticipationPriority, + req: ParticipationRequest, + ) -> Result<()> { + let comparator = CandidateComparator::new(sender, &req.candidate_receipt).await?; + + self.queue_with_comparator(comparator, priority, req)?; + Ok(()) + } + + /// Get the next best request for dispute participation if any. + /// First the priority queue is considered and then the best effort one. + pub fn dequeue(&mut self) -> Option { + if let Some(req) = self.pop_priority() { + self.metrics.report_priority_queue_size(self.priority.len() as u64); + return Some(req.1) + } + if let Some(req) = self.pop_best_effort() { + self.metrics.report_best_effort_queue_size(self.best_effort.len() as u64); + return Some(req.1) + } + None + } + + /// Reprioritizes any participation requests pertaining to the + /// passed candidates from best effort to priority. + pub async fn prioritize_if_present( + &mut self, + sender: &mut impl overseer::DisputeCoordinatorSenderTrait, + receipt: &CandidateReceipt, + ) -> Result<()> { + let comparator = CandidateComparator::new(sender, receipt).await?; + self.prioritize_with_comparator(comparator)?; + Ok(()) + } + + fn prioritize_with_comparator( + &mut self, + comparator: CandidateComparator, + ) -> std::result::Result<(), QueueError> { + if self.priority.len() >= PRIORITY_QUEUE_SIZE { + return Err(QueueError::PriorityFull) + } + if let Some(request) = self.best_effort.remove(&comparator) { + self.priority.insert(comparator, request); + // Report changes to both queue sizes + self.metrics.report_priority_queue_size(self.priority.len() as u64); + self.metrics.report_best_effort_queue_size(self.best_effort.len() as u64); + } + Ok(()) + } + + fn queue_with_comparator( + &mut self, + comparator: CandidateComparator, + priority: ParticipationPriority, + req: ParticipationRequest, + ) -> std::result::Result<(), QueueError> { + if priority.is_priority() { + if self.priority.len() >= PRIORITY_QUEUE_SIZE { + return Err(QueueError::PriorityFull) + } + // Remove any best effort entry: + self.best_effort.remove(&comparator); + self.priority.insert(comparator, req); + self.metrics.report_priority_queue_size(self.priority.len() as u64); + self.metrics.report_best_effort_queue_size(self.best_effort.len() as u64); + } else { + if self.priority.contains_key(&comparator) { + // The candidate is already in priority queue - don't + // add in in best effort too. + return Ok(()) + } + if self.best_effort.len() >= BEST_EFFORT_QUEUE_SIZE { + return Err(QueueError::BestEffortFull) + } + self.best_effort.insert(comparator, req); + self.metrics.report_best_effort_queue_size(self.best_effort.len() as u64); + } + Ok(()) + } + + /// Get best from the best effort queue. + fn pop_best_effort(&mut self) -> Option<(CandidateComparator, ParticipationRequest)> { + return Self::pop_impl(&mut self.best_effort) + } + + /// Get best priority queue entry. + fn pop_priority(&mut self) -> Option<(CandidateComparator, ParticipationRequest)> { + return Self::pop_impl(&mut self.priority) + } + + // `pop_best_effort` and `pop_priority` do the same but on different `BTreeMap`s. This function has + // the extracted implementation + fn pop_impl( + target: &mut BTreeMap, + ) -> Option<(CandidateComparator, ParticipationRequest)> { + // Once /~https://github.com/rust-lang/rust/issues/62924 is there, we can use a simple: + // target.pop_first(). + if let Some((comparator, _)) = target.iter().next() { + let comparator = *comparator; + target + .remove(&comparator) + .map(|participation_request| (comparator, participation_request)) + } else { + None + } + } +} + +/// `Comparator` for ordering of disputes for candidates. +/// +/// This `comparator` makes it possible to order disputes based on age and to ensure some fairness +/// between chains in case of equally old disputes. +/// +/// Objective ordering between nodes is important in case of lots disputes, so nodes will pull in +/// the same direction and work on resolving the same disputes first. This ensures that we will +/// conclude some disputes, even if there are lots of them. While any objective ordering would +/// suffice for this goal, ordering by age ensures we are not only resolving disputes, but also +/// resolve the oldest one first, which are also the most urgent and important ones to resolve. +/// +/// Note: That by `oldest` we mean oldest in terms of relay chain block number, for any block +/// number that has not yet been finalized. If a block has been finalized already it should be +/// treated as low priority when it comes to disputes, as even in the case of a negative outcome, +/// we are already too late. The ordering mechanism here serves to prevent this from happening in +/// the first place. +#[derive(Copy, Clone)] +#[cfg_attr(test, derive(Debug))] +struct CandidateComparator { + /// Block number of the relay parent. It's wrapped in an `Option<>` because there are cases when + /// it can't be obtained. For example when the node is lagging behind and new leaves are received + /// with a slight delay. Candidates with unknown relay parent are treated with the lowest priority. + /// + /// The order enforced by `CandidateComparator` is important because we want to participate in + /// the oldest disputes first. + /// + /// Note: In theory it would make more sense to use the `BlockNumber` of the including + /// block, as inclusion time is the actual relevant event when it comes to ordering. The + /// problem is, that a candidate can get included multiple times on forks, so the `BlockNumber` + /// of the including block is not unique. We could theoretically work around that problem, by + /// just using the lowest `BlockNumber` of all available including blocks - the problem is, + /// that is not stable. If a new fork appears after the fact, we would start ordering the same + /// candidate differently, which would result in the same candidate getting queued twice. + relay_parent_block_number: Option, + /// By adding the `CandidateHash`, we can guarantee a unique ordering across candidates with the + /// same relay parent block number. Candidates without `relay_parent_block_number` are ordered by + /// the `candidate_hash` (and treated with the lowest priority, as already mentioned). + candidate_hash: CandidateHash, +} + +impl CandidateComparator { + /// Create a candidate comparator based on given (fake) values. + /// + /// Useful for testing. + #[cfg(test)] + pub fn new_dummy(block_number: Option, candidate_hash: CandidateHash) -> Self { + Self { relay_parent_block_number: block_number, candidate_hash } + } + + /// Create a candidate comparator for a given candidate. + /// + /// Returns: + /// - `Ok(CandidateComparator{Some(relay_parent_block_number), candidate_hash})` when the + /// relay parent can be obtained. This is the happy case. + /// - `Ok(CandidateComparator{None, candidate_hash})` in case the candidate's relay parent + /// can't be obtained. + /// - `FatalError` in case the chain API call fails with an unexpected error. + pub async fn new( + sender: &mut impl overseer::DisputeCoordinatorSenderTrait, + candidate: &CandidateReceipt, + ) -> FatalResult { + let candidate_hash = candidate.hash(); + let n = get_block_number(sender, candidate.descriptor().relay_parent).await?; + + if n.is_none() { + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?candidate_hash, + "Candidate's relay_parent could not be found via chain API - `CandidateComparator` \ + with an empty relay parent block number will be provided!" + ); + } + + Ok(CandidateComparator { relay_parent_block_number: n, candidate_hash }) + } +} + +impl PartialEq for CandidateComparator { + fn eq(&self, other: &CandidateComparator) -> bool { + Ordering::Equal == self.cmp(other) + } +} + +impl Eq for CandidateComparator {} + +impl PartialOrd for CandidateComparator { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for CandidateComparator { + fn cmp(&self, other: &Self) -> Ordering { + return match (self.relay_parent_block_number, other.relay_parent_block_number) { + (None, None) => { + // No relay parents for both -> compare hashes + self.candidate_hash.cmp(&other.candidate_hash) + }, + (Some(self_relay_parent_block_num), Some(other_relay_parent_block_num)) => { + match self_relay_parent_block_num.cmp(&other_relay_parent_block_num) { + // if the relay parent is the same for both -> compare hashes + Ordering::Equal => self.candidate_hash.cmp(&other.candidate_hash), + // if not - return the result from comparing the relay parent block numbers + o => return o, + } + }, + (Some(_), None) => { + // Candidates with known relay parents are always with priority + Ordering::Less + }, + (None, Some(_)) => { + // Ditto + Ordering::Greater + }, + } + } +} + +async fn get_block_number( + sender: &mut impl overseer::DisputeCoordinatorSenderTrait, + relay_parent: Hash, +) -> FatalResult> { + let (tx, rx) = oneshot::channel(); + sender.send_message(ChainApiMessage::BlockNumber(relay_parent, tx)).await; + rx.await + .map_err(|_| FatalError::ChainApiSenderDropped)? + .map_err(FatalError::ChainApiAncestors) +} diff --git a/polkadot/node/core/dispute-coordinator/src/participation/queues/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/queues/tests.rs new file mode 100644 index 00000000..63df0d0a --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/participation/queues/tests.rs @@ -0,0 +1,202 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{metrics::Metrics, ParticipationPriority}; +use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; +use assert_matches::assert_matches; +use polkadot_primitives::{BlockNumber, Hash}; +use std::sync::Arc; + +use super::{CandidateComparator, ParticipationRequest, QueueError, Queues}; + +/// Make a `ParticipationRequest` based on the given commitments hash. +fn make_participation_request(hash: Hash) -> ParticipationRequest { + let mut receipt = dummy_candidate_receipt(dummy_hash()); + // make it differ: + receipt.commitments_hash = hash; + let request_timer = Arc::new(Metrics::default().time_participation_pipeline()); + ParticipationRequest::new(receipt, 1, request_timer) +} + +/// Make dummy comparator for request, based on the given block number. +fn make_dummy_comparator( + req: &ParticipationRequest, + relay_parent: Option, +) -> CandidateComparator { + CandidateComparator::new_dummy(relay_parent, *req.candidate_hash()) +} + +/// Check that dequeuing acknowledges order. +/// +/// Any priority item will be dequeued before any best effort items, priority and best effort with +/// known parent block number items will be processed in order. Best effort items without known parent +/// block number should be treated with lowest priority. +#[test] +fn ordering_works_as_expected() { + let metrics = Metrics::default(); + let mut queue = Queues::new(metrics.clone()); + let req1 = make_participation_request(Hash::repeat_byte(0x01)); + let req_prio = make_participation_request(Hash::repeat_byte(0x02)); + let req3 = make_participation_request(Hash::repeat_byte(0x03)); + let req_prio_2 = make_participation_request(Hash::repeat_byte(0x04)); + let req5_unknown_parent = make_participation_request(Hash::repeat_byte(0x05)); + let req_full = make_participation_request(Hash::repeat_byte(0x06)); + let req_prio_full = make_participation_request(Hash::repeat_byte(0x07)); + queue + .queue_with_comparator( + make_dummy_comparator(&req1, Some(1)), + ParticipationPriority::BestEffort, + req1.clone(), + ) + .unwrap(); + queue + .queue_with_comparator( + make_dummy_comparator(&req_prio, Some(1)), + ParticipationPriority::Priority, + req_prio.clone(), + ) + .unwrap(); + queue + .queue_with_comparator( + make_dummy_comparator(&req3, Some(2)), + ParticipationPriority::BestEffort, + req3.clone(), + ) + .unwrap(); + queue + .queue_with_comparator( + make_dummy_comparator(&req_prio_2, Some(2)), + ParticipationPriority::Priority, + req_prio_2.clone(), + ) + .unwrap(); + queue + .queue_with_comparator( + make_dummy_comparator(&req5_unknown_parent, None), + ParticipationPriority::BestEffort, + req5_unknown_parent.clone(), + ) + .unwrap(); + assert_matches!( + queue.queue_with_comparator( + make_dummy_comparator(&req_prio_full, Some(3)), + ParticipationPriority::Priority, + req_prio_full, + ), + Err(QueueError::PriorityFull) + ); + assert_matches!( + queue.queue_with_comparator( + make_dummy_comparator(&req_full, Some(3)), + ParticipationPriority::BestEffort, + req_full, + ), + Err(QueueError::BestEffortFull) + ); + + // Prioritized queue is ordered correctly + assert_eq!(queue.dequeue(), Some(req_prio)); + assert_eq!(queue.dequeue(), Some(req_prio_2)); + // So is the best-effort + assert_eq!(queue.dequeue(), Some(req1)); + assert_eq!(queue.dequeue(), Some(req3)); + assert_eq!(queue.dequeue(), Some(req5_unknown_parent)); + + assert_matches!(queue.dequeue(), None); +} + +/// No matter how often a candidate gets queued, it should only ever get dequeued once. +#[test] +fn candidate_is_only_dequeued_once() { + let metrics = Metrics::default(); + let mut queue = Queues::new(metrics.clone()); + let req1 = make_participation_request(Hash::repeat_byte(0x01)); + let req_prio = make_participation_request(Hash::repeat_byte(0x02)); + let req_best_effort_then_prio = make_participation_request(Hash::repeat_byte(0x03)); + let req_prio_then_best_effort = make_participation_request(Hash::repeat_byte(0x04)); + + queue + .queue_with_comparator( + make_dummy_comparator(&req1, None), + ParticipationPriority::BestEffort, + req1.clone(), + ) + .unwrap(); + queue + .queue_with_comparator( + make_dummy_comparator(&req_prio, Some(1)), + ParticipationPriority::Priority, + req_prio.clone(), + ) + .unwrap(); + // Insert same best effort again: + queue + .queue_with_comparator( + make_dummy_comparator(&req1, None), + ParticipationPriority::BestEffort, + req1.clone(), + ) + .unwrap(); + // insert same prio again: + queue + .queue_with_comparator( + make_dummy_comparator(&req_prio, Some(1)), + ParticipationPriority::Priority, + req_prio.clone(), + ) + .unwrap(); + // Insert first as best effort: + queue + .queue_with_comparator( + make_dummy_comparator(&req_best_effort_then_prio, Some(2)), + ParticipationPriority::BestEffort, + req_best_effort_then_prio.clone(), + ) + .unwrap(); + // Then as prio: + queue + .queue_with_comparator( + make_dummy_comparator(&req_best_effort_then_prio, Some(2)), + ParticipationPriority::Priority, + req_best_effort_then_prio.clone(), + ) + .unwrap(); + + // Make space in prio: + assert_eq!(queue.dequeue(), Some(req_prio)); + + // Insert first as prio: + queue + .queue_with_comparator( + make_dummy_comparator(&req_prio_then_best_effort, Some(3)), + ParticipationPriority::Priority, + req_prio_then_best_effort.clone(), + ) + .unwrap(); + // Then as best effort: + queue + .queue_with_comparator( + make_dummy_comparator(&req_prio_then_best_effort, Some(3)), + ParticipationPriority::BestEffort, + req_prio_then_best_effort.clone(), + ) + .unwrap(); + + assert_eq!(queue.dequeue(), Some(req_best_effort_then_prio)); + assert_eq!(queue.dequeue(), Some(req_prio_then_best_effort)); + assert_eq!(queue.dequeue(), Some(req1)); + assert_matches!(queue.dequeue(), None); +} diff --git a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs new file mode 100644 index 00000000..a6e5f866 --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs @@ -0,0 +1,547 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use assert_matches::assert_matches; +use futures::StreamExt; +use polkadot_node_subsystem_util::TimeoutExt; +use std::{sync::Arc, time::Duration}; + +use sp_core::testing::TaskExecutor; + +use super::*; +use ::test_helpers::{ + dummy_candidate_commitments, dummy_candidate_receipt_bad_sig, dummy_digest, dummy_hash, +}; +use parity_scale_codec::Encode; +use polkadot_node_primitives::{AvailableData, BlockData, InvalidCandidate, PoV}; +use polkadot_node_subsystem::{ + jaeger, + messages::{ + AllMessages, ChainApiMessage, DisputeCoordinatorMessage, RuntimeApiMessage, + RuntimeApiRequest, + }, + ActivatedLeaf, ActiveLeavesUpdate, LeafStatus, SpawnGlue, +}; +use polkadot_node_subsystem_test_helpers::{ + make_subsystem_context, TestSubsystemContext, TestSubsystemContextHandle, +}; +use polkadot_primitives::{ + BlakeTwo256, CandidateCommitments, HashT, Header, PersistedValidationData, ValidationCode, +}; + +type VirtualOverseer = TestSubsystemContextHandle; + +pub fn make_our_subsystem_context( + spawner: S, +) -> ( + TestSubsystemContext>, + TestSubsystemContextHandle, +) { + make_subsystem_context(spawner) +} + +#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] +async fn participate(ctx: &mut Context, participation: &mut Participation) -> Result<()> { + let commitments = CandidateCommitments::default(); + participate_with_commitments_hash(ctx, participation, commitments.hash()).await +} + +#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] +async fn participate_with_commitments_hash( + ctx: &mut Context, + participation: &mut Participation, + commitments_hash: Hash, +) -> Result<()> { + let candidate_receipt = { + let mut receipt = dummy_candidate_receipt_bad_sig(dummy_hash(), dummy_hash()); + receipt.commitments_hash = commitments_hash; + receipt + }; + let session = 1; + + let request_timer = Arc::new(participation.metrics.time_participation_pipeline()); + let req = ParticipationRequest::new(candidate_receipt, session, request_timer); + + participation + .queue_participation(ctx, ParticipationPriority::BestEffort, req) + .await +} + +#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] +async fn activate_leaf( + ctx: &mut Context, + participation: &mut Participation, + block_number: BlockNumber, +) -> FatalResult<()> { + let block_header = Header { + parent_hash: BlakeTwo256::hash(&block_number.encode()), + number: block_number, + digest: dummy_digest(), + state_root: dummy_hash(), + extrinsics_root: dummy_hash(), + }; + + let block_hash = block_header.hash(); + + participation + .process_active_leaves_update( + ctx, + &ActiveLeavesUpdate::start_work(ActivatedLeaf { + hash: block_hash, + span: Arc::new(jaeger::Span::Disabled), + number: block_number, + status: LeafStatus::Fresh, + }), + ) + .await +} + +/// Full participation happy path as seen via the overseer. +pub async fn participation_full_happy_path( + ctx_handle: &mut VirtualOverseer, + expected_commitments_hash: Hash, +) { + recover_available_data(ctx_handle).await; + fetch_validation_code(ctx_handle).await; + + assert_matches!( + ctx_handle.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive(_, _, candidate_receipt, _, timeout, tx) + ) if timeout == PvfExecTimeoutKind::Approval => { + if expected_commitments_hash != candidate_receipt.commitments_hash { + tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); + } else { + tx.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap(); + } + }, + "overseer did not receive candidate validation message", + ); +} + +/// Full participation with failing availability recovery. +pub async fn participation_missing_availability(ctx_handle: &mut VirtualOverseer) { + assert_matches!( + ctx_handle.recv().await, + AllMessages::AvailabilityRecovery( + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + ) => { + tx.send(Err(RecoveryError::Unavailable)).unwrap(); + }, + "overseer did not receive recover available data message", + ); +} + +async fn recover_available_data(virtual_overseer: &mut VirtualOverseer) { + let pov_block = PoV { block_data: BlockData(Vec::new()) }; + + let available_data = AvailableData { + pov: Arc::new(pov_block), + validation_data: PersistedValidationData::default(), + }; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityRecovery( + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + ) => { + tx.send(Ok(available_data)).unwrap(); + }, + "overseer did not receive recover available data message", + ); +} + +/// Handles validation code fetch, returns the received relay parent hash. +async fn fetch_validation_code(virtual_overseer: &mut VirtualOverseer) -> Hash { + let validation_code = ValidationCode(Vec::new()); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::ValidationCodeByHash( + _, + tx, + ) + )) => { + tx.send(Ok(Some(validation_code))).unwrap(); + hash + }, + "overseer did not receive runtime API request for validation code", + ) +} + +#[test] +fn same_req_wont_get_queued_if_participation_is_already_running() { + futures::executor::block_on(async { + let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new()); + + let (sender, mut worker_receiver) = mpsc::channel(1); + let mut participation = Participation::new(sender, Metrics::default()); + activate_leaf(&mut ctx, &mut participation, 10).await.unwrap(); + participate(&mut ctx, &mut participation).await.unwrap(); + for _ in 0..MAX_PARALLEL_PARTICIPATIONS { + participate(&mut ctx, &mut participation).await.unwrap(); + } + + assert_matches!( + ctx_handle.recv().await, + AllMessages::AvailabilityRecovery( + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + ) => { + tx.send(Err(RecoveryError::Unavailable)).unwrap(); + }, + "overseer did not receive recover available data message", + ); + + let result = participation + .get_participation_result(&mut ctx, worker_receiver.next().await.unwrap()) + .await + .unwrap(); + + assert_matches!( + result.outcome, + ParticipationOutcome::Unavailable => {} + ); + + // we should not have any further results nor recovery requests: + assert_matches!(ctx_handle.recv().timeout(Duration::from_millis(10)).await, None); + assert_matches!(worker_receiver.next().timeout(Duration::from_millis(10)).await, None); + }) +} + +#[test] +fn reqs_get_queued_when_out_of_capacity() { + let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new()); + + let test = async { + let (sender, mut worker_receiver) = mpsc::channel(1); + let mut participation = Participation::new(sender, Metrics::default()); + activate_leaf(&mut ctx, &mut participation, 10).await.unwrap(); + participate(&mut ctx, &mut participation).await.unwrap(); + for i in 0..MAX_PARALLEL_PARTICIPATIONS { + participate_with_commitments_hash( + &mut ctx, + &mut participation, + Hash::repeat_byte(i as _), + ) + .await + .unwrap(); + } + + for _ in 0..MAX_PARALLEL_PARTICIPATIONS + 1 { + let result = participation + .get_participation_result(&mut ctx, worker_receiver.next().await.unwrap()) + .await + .unwrap(); + assert_matches!( + result.outcome, + ParticipationOutcome::Unavailable => {} + ); + } + // we should not have any further recovery requests: + assert_matches!(worker_receiver.next().timeout(Duration::from_millis(10)).await, None); + }; + + let request_handler = async { + let mut recover_available_data_msg_count = 0; + let mut block_number_msg_count = 0; + + while recover_available_data_msg_count < MAX_PARALLEL_PARTICIPATIONS + 1 || + block_number_msg_count < 1 + { + match ctx_handle.recv().await { + AllMessages::AvailabilityRecovery( + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx), + ) => { + tx.send(Err(RecoveryError::Unavailable)).unwrap(); + recover_available_data_msg_count += 1; + }, + AllMessages::ChainApi(ChainApiMessage::BlockNumber(_, tx)) => { + tx.send(Ok(None)).unwrap(); + block_number_msg_count += 1; + }, + _ => assert!(false, "Received unexpected message"), + } + } + + // we should not have any further results + assert_matches!(ctx_handle.recv().timeout(Duration::from_millis(10)).await, None); + }; + + futures::executor::block_on(async { + futures::join!(test, request_handler); + }); +} + +#[test] +fn reqs_get_queued_on_no_recent_block() { + let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new()); + let (mut unblock_test, mut wait_for_verification) = mpsc::channel(0); + let test = async { + let (sender, _worker_receiver) = mpsc::channel(1); + let mut participation = Participation::new(sender, Metrics::default()); + participate(&mut ctx, &mut participation).await.unwrap(); + + // We have initiated participation but we'll block `active_leaf` so that we can check that + // the participation is queued in race-free way + let _ = wait_for_verification.next().await.unwrap(); + + activate_leaf(&mut ctx, &mut participation, 10).await.unwrap(); + }; + + // Responds to messages from the test and verifies its behaviour + let request_handler = async { + // If we receive `BlockNumber` request this implicitly proves that the participation is queued + assert_matches!( + ctx_handle.recv().await, + AllMessages::ChainApi(ChainApiMessage::BlockNumber(_, tx)) => { + tx.send(Ok(None)).unwrap(); + }, + "overseer did not receive `ChainApiMessage::BlockNumber` message", + ); + + assert!(ctx_handle.recv().timeout(Duration::from_millis(10)).await.is_none()); + + // No activity so the participation is queued => unblock the test + unblock_test.send(()).await.unwrap(); + + // after activating at least one leaf the recent block + // state should be available which should lead to trying + // to participate by first trying to recover the available + // data + assert_matches!( + ctx_handle.recv().await, + AllMessages::AvailabilityRecovery(AvailabilityRecoveryMessage::RecoverAvailableData( + .. + )), + "overseer did not receive recover available data message", + ); + }; + + futures::executor::block_on(async { + futures::join!(test, request_handler); + }); +} + +#[test] +fn cannot_participate_if_cannot_recover_available_data() { + futures::executor::block_on(async { + let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new()); + + let (sender, mut worker_receiver) = mpsc::channel(1); + let mut participation = Participation::new(sender, Metrics::default()); + activate_leaf(&mut ctx, &mut participation, 10).await.unwrap(); + participate(&mut ctx, &mut participation).await.unwrap(); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::AvailabilityRecovery( + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + ) => { + tx.send(Err(RecoveryError::Unavailable)).unwrap(); + }, + "overseer did not receive recover available data message", + ); + let result = participation + .get_participation_result(&mut ctx, worker_receiver.next().await.unwrap()) + .await + .unwrap(); + assert_matches!( + result.outcome, + ParticipationOutcome::Unavailable => {} + ); + }) +} + +#[test] +fn cannot_participate_if_cannot_recover_validation_code() { + futures::executor::block_on(async { + let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new()); + + let (sender, mut worker_receiver) = mpsc::channel(1); + let mut participation = Participation::new(sender, Metrics::default()); + activate_leaf(&mut ctx, &mut participation, 10).await.unwrap(); + participate(&mut ctx, &mut participation).await.unwrap(); + + recover_available_data(&mut ctx_handle).await; + + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::ValidationCodeByHash( + _, + tx, + ) + )) => { + tx.send(Ok(None)).unwrap(); + }, + "overseer did not receive runtime API request for validation code", + ); + + let result = participation + .get_participation_result(&mut ctx, worker_receiver.next().await.unwrap()) + .await + .unwrap(); + assert_matches!( + result.outcome, + ParticipationOutcome::Error => {} + ); + }) +} + +#[test] +fn cast_invalid_vote_if_available_data_is_invalid() { + futures::executor::block_on(async { + let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new()); + + let (sender, mut worker_receiver) = mpsc::channel(1); + let mut participation = Participation::new(sender, Metrics::default()); + activate_leaf(&mut ctx, &mut participation, 10).await.unwrap(); + participate(&mut ctx, &mut participation).await.unwrap(); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::AvailabilityRecovery( + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + ) => { + tx.send(Err(RecoveryError::Invalid)).unwrap(); + }, + "overseer did not receive recover available data message", + ); + + let result = participation + .get_participation_result(&mut ctx, worker_receiver.next().await.unwrap()) + .await + .unwrap(); + assert_matches!( + result.outcome, + ParticipationOutcome::Invalid => {} + ); + }) +} + +#[test] +fn cast_invalid_vote_if_validation_fails_or_is_invalid() { + futures::executor::block_on(async { + let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new()); + + let (sender, mut worker_receiver) = mpsc::channel(1); + let mut participation = Participation::new(sender, Metrics::default()); + activate_leaf(&mut ctx, &mut participation, 10).await.unwrap(); + participate(&mut ctx, &mut participation).await.unwrap(); + + recover_available_data(&mut ctx_handle).await; + assert_eq!( + fetch_validation_code(&mut ctx_handle).await, + participation.recent_block.unwrap().1 + ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx) + ) if timeout == PvfExecTimeoutKind::Approval => { + tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))).unwrap(); + }, + "overseer did not receive candidate validation message", + ); + + let result = participation + .get_participation_result(&mut ctx, worker_receiver.next().await.unwrap()) + .await + .unwrap(); + assert_matches!( + result.outcome, + ParticipationOutcome::Invalid => {} + ); + }) +} + +#[test] +fn cast_invalid_vote_if_commitments_dont_match() { + futures::executor::block_on(async { + let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new()); + + let (sender, mut worker_receiver) = mpsc::channel(1); + let mut participation = Participation::new(sender, Metrics::default()); + activate_leaf(&mut ctx, &mut participation, 10).await.unwrap(); + participate(&mut ctx, &mut participation).await.unwrap(); + + recover_available_data(&mut ctx_handle).await; + assert_eq!( + fetch_validation_code(&mut ctx_handle).await, + participation.recent_block.unwrap().1 + ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx) + ) if timeout == PvfExecTimeoutKind::Approval => { + tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); + }, + "overseer did not receive candidate validation message", + ); + + let result = participation + .get_participation_result(&mut ctx, worker_receiver.next().await.unwrap()) + .await + .unwrap(); + assert_matches!( + result.outcome, + ParticipationOutcome::Invalid => {} + ); + }) +} + +#[test] +fn cast_valid_vote_if_validation_passes() { + futures::executor::block_on(async { + let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new()); + + let (sender, mut worker_receiver) = mpsc::channel(1); + let mut participation = Participation::new(sender, Metrics::default()); + activate_leaf(&mut ctx, &mut participation, 10).await.unwrap(); + participate(&mut ctx, &mut participation).await.unwrap(); + + recover_available_data(&mut ctx_handle).await; + assert_eq!( + fetch_validation_code(&mut ctx_handle).await, + participation.recent_block.unwrap().1 + ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx) + ) if timeout == PvfExecTimeoutKind::Approval => { + tx.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap(); + }, + "overseer did not receive candidate validation message", + ); + + let result = participation + .get_participation_result(&mut ctx, worker_receiver.next().await.unwrap()) + .await + .unwrap(); + assert_matches!( + result.outcome, + ParticipationOutcome::Valid => {} + ); + }) +} diff --git a/polkadot/node/core/dispute-coordinator/src/scraping/candidates.rs b/polkadot/node/core/dispute-coordinator/src/scraping/candidates.rs new file mode 100644 index 00000000..89323907 --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/scraping/candidates.rs @@ -0,0 +1,167 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use polkadot_primitives::{BlockNumber, CandidateHash}; +use std::collections::{BTreeMap, HashMap, HashSet}; + +/// Keeps `CandidateHash` in reference counted way. +/// Each `insert` saves a value with `reference count == 1` or increases the reference +/// count if the value already exists. +/// Each `remove` decreases the reference count for the corresponding `CandidateHash`. +/// If the reference count reaches 0 - the value is removed. +struct RefCountedCandidates { + candidates: HashMap, +} + +impl RefCountedCandidates { + pub fn new() -> Self { + Self { candidates: HashMap::new() } + } + // If `CandidateHash` doesn't exist in the `HashMap` it is created and its reference + // count is set to 1. + // If `CandidateHash` already exists in the `HashMap` its reference count is increased. + pub fn insert(&mut self, candidate: CandidateHash) { + *self.candidates.entry(candidate).or_default() += 1; + } + + // If a `CandidateHash` with reference count equals to 1 is about to be removed - the + // candidate is dropped from the container too. + // If a `CandidateHash` with reference count biger than 1 is about to be removed - the + // reference count is decreased and the candidate remains in the container. + pub fn remove(&mut self, candidate: &CandidateHash) { + match self.candidates.get_mut(candidate) { + Some(v) if *v > 1 => *v -= 1, + Some(v) => { + assert!(*v == 1); + self.candidates.remove(candidate); + }, + None => {}, + } + } + + pub fn contains(&self, candidate: &CandidateHash) -> bool { + self.candidates.contains_key(&candidate) + } +} + +#[cfg(test)] +mod ref_counted_candidates_tests { + use super::*; + use polkadot_primitives::{BlakeTwo256, HashT}; + + #[test] + fn element_is_removed_when_refcount_reaches_zero() { + let mut container = RefCountedCandidates::new(); + + let zero = CandidateHash(BlakeTwo256::hash(&vec![0])); + let one = CandidateHash(BlakeTwo256::hash(&vec![1])); + // add two separate candidates + container.insert(zero); // refcount == 1 + container.insert(one); + + // and increase the reference count for the first + container.insert(zero); // refcount == 2 + + assert!(container.contains(&zero)); + assert!(container.contains(&one)); + + // remove once -> refcount == 1 + container.remove(&zero); + assert!(container.contains(&zero)); + assert!(container.contains(&one)); + + // remove once -> refcount == 0 + container.remove(&zero); + assert!(!container.contains(&zero)); + assert!(container.contains(&one)); + + // remove the other element + container.remove(&one); + assert!(!container.contains(&zero)); + assert!(!container.contains(&one)); + } +} + +/// Keeps track of scraped candidates. Supports `insert`, `remove_up_to_height` and `contains` +/// operations. +pub struct ScrapedCandidates { + /// Main data structure which keeps the candidates we know about. `contains` does lookups only here. + candidates: RefCountedCandidates, + /// Keeps track at which block number a candidate was inserted. Used in `remove_up_to_height`. + /// Without this tracking we won't be able to remove all candidates before block X. + candidates_by_block_number: BTreeMap>, +} + +impl ScrapedCandidates { + pub fn new() -> Self { + Self { + candidates: RefCountedCandidates::new(), + candidates_by_block_number: BTreeMap::new(), + } + } + + pub fn contains(&self, candidate_hash: &CandidateHash) -> bool { + self.candidates.contains(candidate_hash) + } + + // Removes all candidates up to a given height. The candidates at the block height are NOT removed. + pub fn remove_up_to_height(&mut self, height: &BlockNumber) -> HashSet { + let mut candidates_modified: HashSet = HashSet::new(); + let not_stale = self.candidates_by_block_number.split_off(&height); + let stale = std::mem::take(&mut self.candidates_by_block_number); + self.candidates_by_block_number = not_stale; + for candidates in stale.values() { + for c in candidates { + self.candidates.remove(c); + candidates_modified.insert(*c); + } + } + candidates_modified + } + + pub fn insert(&mut self, block_number: BlockNumber, candidate_hash: CandidateHash) { + self.candidates.insert(candidate_hash); + self.candidates_by_block_number + .entry(block_number) + .or_default() + .insert(candidate_hash); + } + + // Used only for tests to verify the pruning doesn't leak data. + #[cfg(test)] + pub fn candidates_by_block_number_is_empty(&self) -> bool { + self.candidates_by_block_number.is_empty() + } +} + +#[cfg(test)] +mod scraped_candidates_tests { + use super::*; + use polkadot_primitives::{BlakeTwo256, HashT}; + + #[test] + fn stale_candidates_are_removed() { + let mut candidates = ScrapedCandidates::new(); + let target = CandidateHash(BlakeTwo256::hash(&vec![1, 2, 3])); + candidates.insert(1, target); + + assert!(candidates.contains(&target)); + + candidates.remove_up_to_height(&2); + assert!(!candidates.contains(&target)); + assert!(candidates.candidates_by_block_number_is_empty()); + } +} diff --git a/polkadot/node/core/dispute-coordinator/src/scraping/mod.rs b/polkadot/node/core/dispute-coordinator/src/scraping/mod.rs new file mode 100644 index 00000000..2d2096f6 --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/scraping/mod.rs @@ -0,0 +1,457 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use std::{ + collections::{BTreeMap, HashSet}, + num::NonZeroUsize, +}; + +use futures::channel::oneshot; +use lru::LruCache; + +use polkadot_node_primitives::{DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION, MAX_FINALITY_LAG}; +use polkadot_node_subsystem::{ + messages::ChainApiMessage, overseer, ActivatedLeaf, ActiveLeavesUpdate, ChainApiError, + SubsystemSender, +}; +use polkadot_node_subsystem_util::runtime::{get_candidate_events, get_on_chain_votes}; +use polkadot_primitives::{ + BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, Hash, ScrapedOnChainVotes, +}; + +use crate::{ + error::{FatalError, FatalResult, Result}, + LOG_TARGET, +}; + +#[cfg(test)] +mod tests; + +mod candidates; + +/// Number of hashes to keep in the LRU. +/// +/// +/// When traversing the ancestry of a block we will stop once we hit a hash that we find in the +/// `last_observed_blocks` LRU. This means, this value should the very least be as large as the +/// number of expected forks for keeping chain scraping efficient. Making the LRU much larger than +/// that has very limited use. +const LRU_OBSERVED_BLOCKS_CAPACITY: NonZeroUsize = match NonZeroUsize::new(20) { + Some(cap) => cap, + None => panic!("Observed blocks cache size must be non-zero"), +}; + +/// ScrapedUpdates +/// +/// Updates to on_chain_votes and included receipts for new active leaf and its unprocessed +/// ancestors. +/// +/// on_chain_votes: New votes as seen on chain +/// included_receipts: Newly included parachain block candidate receipts as seen on chain +pub struct ScrapedUpdates { + pub on_chain_votes: Vec, + pub included_receipts: Vec, +} + +impl ScrapedUpdates { + pub fn new() -> Self { + Self { on_chain_votes: Vec::new(), included_receipts: Vec::new() } + } +} + +/// A structure meant to facilitate chain reversions in the event of a dispute +/// concluding against a candidate. Each candidate hash maps to a number of +/// block heights, which in turn map to vectors of blocks at those heights. +pub struct Inclusions { + inclusions_inner: BTreeMap>>, +} + +impl Inclusions { + pub fn new() -> Self { + Self { inclusions_inner: BTreeMap::new() } + } + + // Add parent block to the vector which has CandidateHash as an outer key and + // BlockNumber as an inner key + pub fn insert( + &mut self, + candidate_hash: CandidateHash, + block_number: BlockNumber, + block_hash: Hash, + ) { + if let Some(blocks_including) = self.inclusions_inner.get_mut(&candidate_hash) { + if let Some(blocks_at_height) = blocks_including.get_mut(&block_number) { + blocks_at_height.push(block_hash); + } else { + blocks_including.insert(block_number, Vec::from([block_hash])); + } + } else { + let mut blocks_including: BTreeMap> = BTreeMap::new(); + blocks_including.insert(block_number, Vec::from([block_hash])); + self.inclusions_inner.insert(candidate_hash, blocks_including); + } + } + + pub fn remove_up_to_height( + &mut self, + height: &BlockNumber, + candidates_modified: HashSet, + ) { + for candidate in candidates_modified { + if let Some(blocks_including) = self.inclusions_inner.get_mut(&candidate) { + // Returns everything after the given key, including the key. This works because the blocks are sorted in ascending order. + *blocks_including = blocks_including.split_off(height); + } + } + self.inclusions_inner + .retain(|_, blocks_including| blocks_including.keys().len() > 0); + } + + pub fn get(&mut self, candidate: &CandidateHash) -> Vec<(BlockNumber, Hash)> { + let mut inclusions_as_vec: Vec<(BlockNumber, Hash)> = Vec::new(); + if let Some(blocks_including) = self.inclusions_inner.get(candidate) { + for (height, blocks_at_height) in blocks_including.iter() { + for block in blocks_at_height { + inclusions_as_vec.push((*height, *block)); + } + } + } + inclusions_as_vec + } +} + +/// Chain scraper +/// +/// Scrapes unfinalized chain in order to collect information from blocks. Chain scraping +/// during disputes enables critical spam prevention. It does so by updating two important +/// criteria determining whether a vote sent during dispute distribution is potential +/// spam. Namely, whether the candidate being voted on is backed or included. +/// +/// Concretely: +/// +/// - Monitors for `CandidateIncluded` events to keep track of candidates that have been +/// included on chains. +/// - Monitors for `CandidateBacked` events to keep track of all backed candidates. +/// - Calls `FetchOnChainVotes` for each block to gather potentially missed votes from chain. +/// +/// With this information it provides a `CandidateComparator` and as a return value of +/// `process_active_leaves_update` any scraped votes. +/// +/// Scraped candidates are available `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` more blocks +/// after finalization as a precaution not to prune them prematurely. Besides the newly scraped +/// candidates `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` finalized blocks are parsed as +/// another precaution to have their `CandidateReceipts` available in case a dispute is raised on +/// them, +pub struct ChainScraper { + /// All candidates we have seen included, which not yet have been finalized. + included_candidates: candidates::ScrapedCandidates, + /// All candidates we have seen backed + backed_candidates: candidates::ScrapedCandidates, + /// Latest relay blocks observed by the provider. + /// + /// We assume that ancestors of cached blocks are already processed, i.e. we have saved + /// corresponding included candidates. + last_observed_blocks: LruCache, + /// Maps included candidate hashes to one or more relay block heights and hashes. + /// These correspond to all the relay blocks which marked a candidate as included, + /// and are needed to apply reversions in case a dispute is concluded against the + /// candidate. + inclusions: Inclusions, +} + +impl ChainScraper { + /// Limits the number of ancestors received for a single request. + pub(crate) const ANCESTRY_CHUNK_SIZE: u32 = 10; + /// Limits the overall number of ancestors walked through for a given head. + /// + /// As long as we have `MAX_FINALITY_LAG` this makes sense as a value. + pub(crate) const ANCESTRY_SIZE_LIMIT: u32 = MAX_FINALITY_LAG; + + /// Create a properly initialized `OrderingProvider`. + /// + /// Returns: `Self` and any scraped votes. + pub async fn new( + sender: &mut Sender, + initial_head: ActivatedLeaf, + ) -> Result<(Self, Vec)> + where + Sender: overseer::DisputeCoordinatorSenderTrait, + { + let mut s = Self { + included_candidates: candidates::ScrapedCandidates::new(), + backed_candidates: candidates::ScrapedCandidates::new(), + last_observed_blocks: LruCache::new(LRU_OBSERVED_BLOCKS_CAPACITY), + inclusions: Inclusions::new(), + }; + let update = + ActiveLeavesUpdate { activated: Some(initial_head), deactivated: Default::default() }; + let updates = s.process_active_leaves_update(sender, &update).await?; + Ok((s, updates.on_chain_votes)) + } + + /// Check whether we have seen a candidate included on any chain. + pub fn is_candidate_included(&self, candidate_hash: &CandidateHash) -> bool { + self.included_candidates.contains(candidate_hash) + } + + /// Check whether the candidate is backed + pub fn is_candidate_backed(&self, candidate_hash: &CandidateHash) -> bool { + self.backed_candidates.contains(candidate_hash) + } + + /// Query active leaves for any candidate `CandidateEvent::CandidateIncluded` events. + /// + /// and updates current heads, so we can query candidates for all non finalized blocks. + /// + /// Returns: On chain votes and included candidate receipts for the leaf and any + /// ancestors we might not yet have seen. + pub async fn process_active_leaves_update( + &mut self, + sender: &mut Sender, + update: &ActiveLeavesUpdate, + ) -> Result + where + Sender: overseer::DisputeCoordinatorSenderTrait, + { + let activated = match update.activated.as_ref() { + Some(activated) => activated, + None => return Ok(ScrapedUpdates::new()), + }; + + // Fetch ancestry up to `SCRAPED_FINALIZED_BLOCKS_COUNT` blocks beyond + // the last finalized one + let ancestors = self + .get_relevant_block_ancestors(sender, activated.hash, activated.number) + .await?; + + // Ancestors block numbers are consecutive in the descending order. + let earliest_block_number = activated.number - ancestors.len() as u32; + let block_numbers = (earliest_block_number..=activated.number).rev(); + + let block_hashes = std::iter::once(activated.hash).chain(ancestors); + + let mut scraped_updates = ScrapedUpdates::new(); + for (block_number, block_hash) in block_numbers.zip(block_hashes) { + gum::trace!(target: LOG_TARGET, ?block_number, ?block_hash, "In ancestor processing."); + + let receipts_for_block = + self.process_candidate_events(sender, block_number, block_hash).await?; + scraped_updates.included_receipts.extend(receipts_for_block); + + if let Some(votes) = get_on_chain_votes(sender, block_hash).await? { + scraped_updates.on_chain_votes.push(votes); + } + } + + self.last_observed_blocks.put(activated.hash, ()); + + Ok(scraped_updates) + } + + /// Prune finalized candidates. + /// + /// We keep each candidate for `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks after finalization. + /// After that we treat it as low priority. + pub fn process_finalized_block(&mut self, finalized_block_number: &BlockNumber) { + // `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION - 1` because `finalized_block_number`counts to the + // candidate lifetime. + match finalized_block_number.checked_sub(DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION - 1) + { + Some(key_to_prune) => { + self.backed_candidates.remove_up_to_height(&key_to_prune); + let candidates_modified = + self.included_candidates.remove_up_to_height(&key_to_prune); + self.inclusions.remove_up_to_height(&key_to_prune, candidates_modified); + }, + None => { + // Nothing to prune. We are still in the beginning of the chain and there are not + // enough finalized blocks yet. + }, + } + {} + } + + /// Process candidate events of a block. + /// + /// Keep track of all included and backed candidates. + /// + /// Returns freshly included candidate receipts + async fn process_candidate_events( + &mut self, + sender: &mut Sender, + block_number: BlockNumber, + block_hash: Hash, + ) -> Result> + where + Sender: overseer::DisputeCoordinatorSenderTrait, + { + let events = get_candidate_events(sender, block_hash).await?; + let mut included_receipts: Vec = Vec::new(); + // Get included and backed events: + for ev in events { + match ev { + CandidateEvent::CandidateIncluded(receipt, _, _, _) => { + let candidate_hash = receipt.hash(); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?block_number, + "Processing included event" + ); + self.included_candidates.insert(block_number, candidate_hash); + self.inclusions.insert(candidate_hash, block_number, block_hash); + included_receipts.push(receipt); + }, + CandidateEvent::CandidateBacked(receipt, _, _, _) => { + let candidate_hash = receipt.hash(); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?block_number, + "Processing backed event" + ); + self.backed_candidates.insert(block_number, candidate_hash); + }, + _ => { + // skip the rest + }, + } + } + Ok(included_receipts) + } + + /// Returns ancestors of `head` in the descending order, stopping + /// either at the block present in cache or at `SCRAPED_FINALIZED_BLOCKS_COUNT -1` blocks after + /// the last finalized one (called `target_ancestor`). + /// + /// Both `head` and the `target_ancestor` blocks are **not** included in the result. + async fn get_relevant_block_ancestors( + &mut self, + sender: &mut Sender, + mut head: Hash, + mut head_number: BlockNumber, + ) -> Result> + where + Sender: overseer::DisputeCoordinatorSenderTrait, + { + let target_ancestor = get_finalized_block_number(sender) + .await? + .saturating_sub(DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION); + + let mut ancestors = Vec::new(); + + // If head_number <= target_ancestor + 1 the ancestry will be empty. + if self.last_observed_blocks.get(&head).is_some() || head_number <= target_ancestor + 1 { + return Ok(ancestors) + } + + loop { + let hashes = get_block_ancestors(sender, head, Self::ANCESTRY_CHUNK_SIZE).await?; + + let earliest_block_number = match head_number.checked_sub(hashes.len() as u32) { + Some(number) => number, + None => { + // It's assumed that it's impossible to retrieve + // more than N ancestors for block number N. + gum::error!( + target: LOG_TARGET, + "Received {} ancestors for block number {} from Chain API", + hashes.len(), + head_number, + ); + return Ok(ancestors) + }, + }; + // The reversed order is parent, grandparent, etc. excluding the head. + let block_numbers = (earliest_block_number..head_number).rev(); + + for (block_number, hash) in block_numbers.zip(&hashes) { + // Return if we either met target/cached block or + // hit the size limit for the returned ancestry of head. + if self.last_observed_blocks.get(hash).is_some() || + block_number <= target_ancestor || + ancestors.len() >= Self::ANCESTRY_SIZE_LIMIT as usize + { + return Ok(ancestors) + } + + ancestors.push(*hash); + } + + match hashes.last() { + Some(last_hash) => { + head = *last_hash; + head_number = earliest_block_number; + }, + None => break, + } + } + return Ok(ancestors) + } + + pub fn get_blocks_including_candidate( + &mut self, + candidate: &CandidateHash, + ) -> Vec<(BlockNumber, Hash)> { + self.inclusions.get(candidate) + } +} + +async fn get_finalized_block_number(sender: &mut Sender) -> FatalResult +where + Sender: overseer::DisputeCoordinatorSenderTrait, +{ + let (number_tx, number_rx) = oneshot::channel(); + send_message_fatal(sender, ChainApiMessage::FinalizedBlockNumber(number_tx), number_rx).await +} + +async fn get_block_ancestors( + sender: &mut Sender, + head: Hash, + num_ancestors: BlockNumber, +) -> FatalResult> +where + Sender: overseer::DisputeCoordinatorSenderTrait, +{ + let (tx, rx) = oneshot::channel(); + sender + .send_message(ChainApiMessage::Ancestors { + hash: head, + k: num_ancestors as usize, + response_channel: tx, + }) + .await; + + rx.await + .or(Err(FatalError::ChainApiSenderDropped))? + .map_err(FatalError::ChainApiAncestors) +} + +async fn send_message_fatal( + sender: &mut Sender, + message: ChainApiMessage, + receiver: oneshot::Receiver>, +) -> FatalResult +where + Sender: SubsystemSender, +{ + sender.send_message(message).await; + + receiver + .await + .map_err(|_| FatalError::ChainApiSenderDropped)? + .map_err(FatalError::ChainApiAncestors) +} diff --git a/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs b/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs new file mode 100644 index 00000000..55726b3f --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs @@ -0,0 +1,651 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use std::{sync::Arc, time::Duration}; + +use assert_matches::assert_matches; + +use futures::future::join; +use parity_scale_codec::Encode; +use sp_core::testing::TaskExecutor; + +use ::test_helpers::{dummy_collator, dummy_collator_signature, dummy_hash}; +use polkadot_node_primitives::DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION; +use polkadot_node_subsystem::{ + jaeger, + messages::{ + AllMessages, ChainApiMessage, DisputeCoordinatorMessage, RuntimeApiMessage, + RuntimeApiRequest, + }, + ActivatedLeaf, ActiveLeavesUpdate, LeafStatus, SpawnGlue, +}; +use polkadot_node_subsystem_test_helpers::{ + make_subsystem_context, TestSubsystemContext, TestSubsystemContextHandle, TestSubsystemSender, +}; +use polkadot_node_subsystem_util::{reexports::SubsystemContext, TimeoutExt}; +use polkadot_primitives::{ + BlakeTwo256, BlockNumber, CandidateDescriptor, CandidateEvent, CandidateReceipt, CoreIndex, + GroupIndex, Hash, HashT, HeadData, Id as ParaId, +}; + +use crate::LOG_TARGET; + +use super::ChainScraper; + +type VirtualOverseer = TestSubsystemContextHandle; + +const OVERSEER_RECEIVE_TIMEOUT: Duration = Duration::from_secs(2); + +async fn overseer_recv(virtual_overseer: &mut VirtualOverseer) -> AllMessages { + virtual_overseer + .recv() + .timeout(OVERSEER_RECEIVE_TIMEOUT) + .await + .expect("overseer `recv` timed out") +} + +struct TestState { + chain: Vec, + scraper: ChainScraper, + ctx: TestSubsystemContext>, +} + +impl TestState { + async fn new() -> (Self, VirtualOverseer) { + let (mut ctx, mut ctx_handle) = make_subsystem_context(TaskExecutor::new()); + let chain = vec![get_block_number_hash(0), get_block_number_hash(1)]; + let leaf = get_activated_leaf(1); + + let finalized_block_number = 0; + let overseer_fut = async { + assert_finalized_block_number_request(&mut ctx_handle, finalized_block_number).await; + gum::trace!(target: LOG_TARGET, "After assert_finalized_block_number"); + // No ancestors requests, as list would be empty. + assert_candidate_events_request( + &mut ctx_handle, + &chain, + get_backed_and_included_candidate_events, + ) + .await; + assert_chain_vote_request(&mut ctx_handle, &chain).await; + }; + + let (scraper, _) = join(ChainScraper::new(ctx.sender(), leaf.clone()), overseer_fut) + .await + .0 + .unwrap(); + gum::trace!(target: LOG_TARGET, "After launching chain scraper"); + + let test_state = Self { chain, scraper, ctx }; + + (test_state, ctx_handle) + } +} + +fn next_block_number(chain: &[Hash]) -> BlockNumber { + chain.len() as u32 +} + +/// Get a new leaf. +fn next_leaf(chain: &mut Vec) -> ActivatedLeaf { + let next_block_number = next_block_number(chain); + let next_hash = get_block_number_hash(next_block_number); + chain.push(next_hash); + get_activated_leaf(next_block_number) +} + +async fn process_active_leaves_update( + sender: &mut TestSubsystemSender, + scraper: &mut ChainScraper, + update: ActivatedLeaf, +) { + scraper + .process_active_leaves_update(sender, &ActiveLeavesUpdate::start_work(update)) + .await + .unwrap(); +} + +fn process_finalized_block(scraper: &mut ChainScraper, finalized: &BlockNumber) { + scraper.process_finalized_block(&finalized) +} + +fn make_candidate_receipt(relay_parent: Hash) -> CandidateReceipt { + let zeros = dummy_hash(); + let descriptor = CandidateDescriptor { + para_id: ParaId::from(0_u32), + relay_parent, + collator: dummy_collator(), + persisted_validation_data_hash: zeros, + pov_hash: zeros, + erasure_root: zeros, + signature: dummy_collator_signature(), + para_head: zeros, + validation_code_hash: zeros.into(), + }; + let candidate = CandidateReceipt { descriptor, commitments_hash: zeros }; + candidate +} + +/// Get a dummy `ActivatedLeaf` for a given block number. +fn get_activated_leaf(n: BlockNumber) -> ActivatedLeaf { + ActivatedLeaf { + hash: get_block_number_hash(n), + number: n, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + } +} + +/// Get a dummy relay parent hash for dummy block number. +fn get_block_number_hash(n: BlockNumber) -> Hash { + BlakeTwo256::hash(&n.encode()) +} + +/// Get a dummy event that corresponds to candidate inclusion for the given block number. +fn get_backed_and_included_candidate_events(block_number: BlockNumber) -> Vec { + let candidate_receipt = make_candidate_receipt(get_block_number_hash(block_number)); + vec![ + CandidateEvent::CandidateIncluded( + candidate_receipt.clone(), + HeadData::default(), + CoreIndex::from(0), + GroupIndex::from(0), + ), + CandidateEvent::CandidateBacked( + candidate_receipt, + HeadData::default(), + CoreIndex::from(0), + GroupIndex::from(0), + ), + ] +} + +fn get_backed_candidate_event(block_number: BlockNumber) -> Vec { + let candidate_receipt = make_candidate_receipt(get_block_number_hash(block_number)); + vec![CandidateEvent::CandidateBacked( + candidate_receipt, + HeadData::default(), + CoreIndex::from(0), + GroupIndex::from(0), + )] +} +/// Hash for a 'magic' candidate. This is meant to be a special candidate used to verify special cases. +fn get_magic_candidate_hash() -> Hash { + BlakeTwo256::hash(&"abc".encode()) +} +/// Get a dummy event that corresponds to candidate inclusion for a hardcoded block number. +/// Used to simulate candidates included multiple times at different block heights. +fn get_backed_and_included_magic_candidate_events( + _block_number: BlockNumber, +) -> Vec { + let candidate_receipt = make_candidate_receipt(get_magic_candidate_hash()); + vec![ + CandidateEvent::CandidateIncluded( + candidate_receipt.clone(), + HeadData::default(), + CoreIndex::from(0), + GroupIndex::from(0), + ), + CandidateEvent::CandidateBacked( + candidate_receipt, + HeadData::default(), + CoreIndex::from(0), + GroupIndex::from(0), + ), + ] +} + +async fn assert_candidate_events_request( + virtual_overseer: &mut VirtualOverseer, + chain: &[Hash], + event_generator: F, +) where + F: Fn(u32) -> Vec, +{ + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::CandidateEvents(tx), + )) => { + let maybe_block_number = chain.iter().position(|h| *h == hash); + let response = maybe_block_number + .map(|num| event_generator(num as u32)) + .unwrap_or_default(); + tx.send(Ok(response)).unwrap(); + } + ); +} + +async fn assert_chain_vote_request(virtual_overseer: &mut VirtualOverseer, _chain: &[Hash]) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::FetchOnChainVotes(tx), + )) => { + tx.send(Ok(None)).unwrap(); + } + ); +} + +async fn assert_finalized_block_number_request( + virtual_overseer: &mut VirtualOverseer, + response: BlockNumber, +) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(tx)) => { + tx.send(Ok(response)).unwrap(); + } + ); +} + +async fn assert_block_ancestors_request(virtual_overseer: &mut VirtualOverseer, chain: &[Hash]) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::Ancestors { hash, k, response_channel }) => { + let maybe_block_position = chain.iter().position(|h| *h == hash); + let ancestors = maybe_block_position + .map(|idx| chain[..idx].iter().rev().take(k).copied().collect()) + .unwrap_or_default(); + response_channel.send(Ok(ancestors)).unwrap(); + } + ); +} + +async fn overseer_process_active_leaves_update( + virtual_overseer: &mut VirtualOverseer, + chain: &[Hash], + finalized_block: BlockNumber, + expected_ancestry_len: usize, + event_generator: F, +) where + F: Fn(u32) -> Vec + Clone, +{ + // Before walking through ancestors provider requests latest finalized block number. + assert_finalized_block_number_request(virtual_overseer, finalized_block).await; + // Expect block ancestors requests with respect to the ancestry step. + for _ in (0..expected_ancestry_len).step_by(ChainScraper::ANCESTRY_CHUNK_SIZE as usize) { + assert_block_ancestors_request(virtual_overseer, chain).await; + } + // For each ancestry and the head return corresponding candidates inclusions. + for _ in 0..expected_ancestry_len { + assert_candidate_events_request(virtual_overseer, chain, event_generator.clone()).await; + assert_chain_vote_request(virtual_overseer, chain).await; + } +} + +#[test] +fn scraper_provides_included_state_when_initialized() { + let candidate_1 = make_candidate_receipt(get_block_number_hash(1)); + let candidate_2 = make_candidate_receipt(get_block_number_hash(2)); + futures::executor::block_on(async { + let (state, mut virtual_overseer) = TestState::new().await; + + let TestState { mut chain, mut scraper, mut ctx } = state; + + assert!(!scraper.is_candidate_included(&candidate_2.hash())); + assert!(!scraper.is_candidate_backed(&candidate_2.hash())); + assert!(scraper.is_candidate_included(&candidate_1.hash())); + assert!(scraper.is_candidate_backed(&candidate_1.hash())); + + // After next active leaves update we should see the candidate included. + let next_update = next_leaf(&mut chain); + + let finalized_block_number = 0; + let expected_ancestry_len = 1; + let overseer_fut = overseer_process_active_leaves_update( + &mut virtual_overseer, + &chain, + finalized_block_number, + expected_ancestry_len, + get_backed_and_included_candidate_events, + ); + join(process_active_leaves_update(ctx.sender(), &mut scraper, next_update), overseer_fut) + .await; + + assert!(scraper.is_candidate_included(&candidate_2.hash())); + assert!(scraper.is_candidate_backed(&candidate_2.hash())); + }); +} + +#[test] +fn scraper_requests_candidates_of_leaf_ancestors() { + futures::executor::block_on(async { + // How many blocks should we skip before sending a leaf update. + const BLOCKS_TO_SKIP: usize = 30; + + let (state, mut virtual_overseer) = TestState::new().await; + + let TestState { mut chain, mut scraper, mut ctx } = state; + + let next_update = (0..BLOCKS_TO_SKIP).map(|_| next_leaf(&mut chain)).last().unwrap(); + + let finalized_block_number = 0; + let overseer_fut = overseer_process_active_leaves_update( + &mut virtual_overseer, + &chain, + finalized_block_number, + BLOCKS_TO_SKIP, + get_backed_and_included_candidate_events, + ); + join(process_active_leaves_update(ctx.sender(), &mut scraper, next_update), overseer_fut) + .await; + + let next_block_number = next_block_number(&chain); + for block_number in 1..next_block_number { + let candidate = make_candidate_receipt(get_block_number_hash(block_number)); + assert!(scraper.is_candidate_included(&candidate.hash())); + assert!(scraper.is_candidate_backed(&candidate.hash())); + } + }); +} + +#[test] +fn scraper_requests_candidates_of_non_cached_ancestors() { + futures::executor::block_on(async { + // How many blocks should we skip before sending a leaf update. + const BLOCKS_TO_SKIP: &[usize] = &[30, 15]; + + let (state, mut virtual_overseer) = TestState::new().await; + + let TestState { mut chain, scraper: mut ordering, mut ctx } = state; + + let next_update = (0..BLOCKS_TO_SKIP[0]).map(|_| next_leaf(&mut chain)).last().unwrap(); + + let finalized_block_number = 0; + let overseer_fut = overseer_process_active_leaves_update( + &mut virtual_overseer, + &chain, + finalized_block_number, + BLOCKS_TO_SKIP[0], + get_backed_and_included_candidate_events, + ); + join(process_active_leaves_update(ctx.sender(), &mut ordering, next_update), overseer_fut) + .await; + + // Send the second request and verify that we don't go past the cached block. + let next_update = (0..BLOCKS_TO_SKIP[1]).map(|_| next_leaf(&mut chain)).last().unwrap(); + let overseer_fut = overseer_process_active_leaves_update( + &mut virtual_overseer, + &chain, + finalized_block_number, + BLOCKS_TO_SKIP[1], + get_backed_and_included_candidate_events, + ); + join(process_active_leaves_update(ctx.sender(), &mut ordering, next_update), overseer_fut) + .await; + }); +} + +#[test] +fn scraper_requests_candidates_of_non_finalized_ancestors() { + futures::executor::block_on(async { + // How many blocks should we skip before sending a leaf update. + const BLOCKS_TO_SKIP: usize = 30; + + let (state, mut virtual_overseer) = TestState::new().await; + + let TestState { mut chain, scraper: mut ordering, mut ctx } = state; + + // 1 because `TestState` starts at leaf 1. + let next_update = (1..BLOCKS_TO_SKIP).map(|_| next_leaf(&mut chain)).last().unwrap(); + + let finalized_block_number = 17; + let overseer_fut = overseer_process_active_leaves_update( + &mut virtual_overseer, + &chain, + finalized_block_number, + BLOCKS_TO_SKIP - + (finalized_block_number - DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION) as usize, // Expect the provider not to go past finalized block. + get_backed_and_included_candidate_events, + ); + join(process_active_leaves_update(ctx.sender(), &mut ordering, next_update), overseer_fut) + .await; + }); +} + +#[test] +fn scraper_prunes_finalized_candidates() { + const TEST_TARGET_BLOCK_NUMBER: BlockNumber = 2; + + // How many blocks should we skip before sending a leaf update. + const BLOCKS_TO_SKIP: usize = 3; + + futures::executor::block_on(async { + let (state, mut virtual_overseer) = TestState::new().await; + + let TestState { mut chain, mut scraper, mut ctx } = state; + + // 1 because `TestState` starts at leaf 1. + let next_update = (1..BLOCKS_TO_SKIP).map(|_| next_leaf(&mut chain)).last().unwrap(); + + let mut finalized_block_number = 1; + let expected_ancestry_len = BLOCKS_TO_SKIP - finalized_block_number as usize; + let overseer_fut = overseer_process_active_leaves_update( + &mut virtual_overseer, + &chain, + finalized_block_number, + expected_ancestry_len, + |block_num| { + if block_num == TEST_TARGET_BLOCK_NUMBER { + get_backed_and_included_candidate_events(block_num) + } else { + vec![] + } + }, + ); + join(process_active_leaves_update(ctx.sender(), &mut scraper, next_update), overseer_fut) + .await; + + let candidate = make_candidate_receipt(get_block_number_hash(TEST_TARGET_BLOCK_NUMBER)); + + // After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the candidate should be removed + finalized_block_number = + TEST_TARGET_BLOCK_NUMBER + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION; + process_finalized_block(&mut scraper, &finalized_block_number); + + assert!(!scraper.is_candidate_backed(&candidate.hash())); + assert!(!scraper.is_candidate_included(&candidate.hash())); + }); +} + +#[test] +fn scraper_handles_backed_but_not_included_candidate() { + const TEST_TARGET_BLOCK_NUMBER: BlockNumber = 2; + + // How many blocks should we skip before sending a leaf update. + const BLOCKS_TO_SKIP: usize = 3; + + futures::executor::block_on(async { + let (state, mut virtual_overseer) = TestState::new().await; + + let TestState { mut chain, mut scraper, mut ctx } = state; + + let next_update = (1..BLOCKS_TO_SKIP as BlockNumber) + .map(|_| next_leaf(&mut chain)) + .last() + .unwrap(); + + // Add `ActiveLeavesUpdate` containing `CandidateBacked` event for block `BLOCK_WITH_EVENTS` + let mut finalized_block_number = 1; + let expected_ancestry_len = BLOCKS_TO_SKIP - finalized_block_number as usize; + let overseer_fut = overseer_process_active_leaves_update( + &mut virtual_overseer, + &chain, + finalized_block_number, + expected_ancestry_len, + |block_num| { + if block_num == TEST_TARGET_BLOCK_NUMBER { + get_backed_candidate_event(block_num) + } else { + vec![] + } + }, + ); + join(process_active_leaves_update(ctx.sender(), &mut scraper, next_update), overseer_fut) + .await; + + // Finalize blocks to enforce pruning of scraped events + finalized_block_number += 1; + process_finalized_block(&mut scraper, &finalized_block_number); + + // `FIRST_TEST_BLOCK` is finalized, which is within `BACKED_CANDIDATE_LIFETIME_AFTER_FINALIZATION` window. + // The candidate should still be backed. + let candidate = make_candidate_receipt(get_block_number_hash(TEST_TARGET_BLOCK_NUMBER)); + assert!(!scraper.is_candidate_included(&candidate.hash())); + assert!(scraper.is_candidate_backed(&candidate.hash())); + + // Bump the finalized block outside `BACKED_CANDIDATE_LIFETIME_AFTER_FINALIZATION`. + // The candidate should be removed. + assert!( + finalized_block_number < + TEST_TARGET_BLOCK_NUMBER + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION + ); + finalized_block_number += + TEST_TARGET_BLOCK_NUMBER + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION; + process_finalized_block(&mut scraper, &finalized_block_number); + + assert!(!scraper.is_candidate_included(&candidate.hash())); + assert!(!scraper.is_candidate_backed(&candidate.hash())); + }); +} + +#[test] +fn scraper_handles_the_same_candidate_incuded_in_two_different_block_heights() { + // Same candidate will be inclued in these two leaves + let test_targets = vec![2, 3]; + + // How many blocks should we skip before sending a leaf update. + const BLOCKS_TO_SKIP: usize = 3; + + futures::executor::block_on(async { + let (state, mut virtual_overseer) = TestState::new().await; + + let TestState { mut chain, mut scraper, mut ctx } = state; + + // 1 because `TestState` starts at leaf 1. + let next_update = (1..BLOCKS_TO_SKIP).map(|_| next_leaf(&mut chain)).last().unwrap(); + + // Now we will add the same magic candidate at two different block heights. + // Check `get_backed_and_included_magic_candidate_event` implementation + let mut finalized_block_number = 1; + let expected_ancestry_len = BLOCKS_TO_SKIP - finalized_block_number as usize; + let overseer_fut = overseer_process_active_leaves_update( + &mut virtual_overseer, + &chain, + finalized_block_number, + expected_ancestry_len, + |block_num| { + if test_targets.contains(&block_num) { + get_backed_and_included_magic_candidate_events(block_num) + } else { + vec![] + } + }, + ); + join(process_active_leaves_update(ctx.sender(), &mut scraper, next_update), overseer_fut) + .await; + + // Finalize blocks to enforce pruning of scraped events. + // The magic candidate was added twice, so it shouldn't be removed if we finalize two more blocks. + finalized_block_number = test_targets.first().expect("there are two block nums") + + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION; + process_finalized_block(&mut scraper, &finalized_block_number); + + let magic_candidate = make_candidate_receipt(get_magic_candidate_hash()); + assert!(scraper.is_candidate_backed(&magic_candidate.hash())); + assert!(scraper.is_candidate_included(&magic_candidate.hash())); + + // On the next finalization the magic candidate should be removed + finalized_block_number += 1; + process_finalized_block(&mut scraper, &finalized_block_number); + + assert!(!scraper.is_candidate_backed(&magic_candidate.hash())); + assert!(!scraper.is_candidate_included(&magic_candidate.hash())); + }); +} + +#[test] +fn inclusions_per_candidate_properly_adds_and_prunes() { + const TEST_TARGET_BLOCK_NUMBER: BlockNumber = 2; + const TEST_TARGET_BLOCK_NUMBER_2: BlockNumber = 3; + + // How many blocks should we skip before sending a leaf update. + const BLOCKS_TO_SKIP: usize = 4; + + futures::executor::block_on(async { + let (state, mut virtual_overseer) = TestState::new().await; + + let TestState { mut chain, mut scraper, mut ctx } = state; + + // 1 because `TestState` starts at leaf 1. + let next_update = (1..BLOCKS_TO_SKIP).map(|_| next_leaf(&mut chain)).last().unwrap(); + + let mut finalized_block_number = 1; + let expected_ancestry_len = BLOCKS_TO_SKIP - finalized_block_number as usize; + let overseer_fut = overseer_process_active_leaves_update( + &mut virtual_overseer, + &chain, + finalized_block_number, + expected_ancestry_len, + |block_num| { + if block_num == TEST_TARGET_BLOCK_NUMBER || block_num == TEST_TARGET_BLOCK_NUMBER_2 + { + get_backed_and_included_candidate_events(TEST_TARGET_BLOCK_NUMBER) + } else { + vec![] + } + }, + ); + join(process_active_leaves_update(ctx.sender(), &mut scraper, next_update), overseer_fut) + .await; + + let candidate = make_candidate_receipt(get_block_number_hash(TEST_TARGET_BLOCK_NUMBER)); + + // We included the same candidate at two different block heights. So both blocks in which + // the candidate is included are recorded + assert_eq!( + scraper.get_blocks_including_candidate(&candidate.hash()), + Vec::from([ + (TEST_TARGET_BLOCK_NUMBER, get_block_number_hash(TEST_TARGET_BLOCK_NUMBER)), + (TEST_TARGET_BLOCK_NUMBER_2, get_block_number_hash(TEST_TARGET_BLOCK_NUMBER_2)) + ]) + ); + + // After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the earlier inclusion should be removed + finalized_block_number = + TEST_TARGET_BLOCK_NUMBER + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION; + process_finalized_block(&mut scraper, &finalized_block_number); + + // The later inclusion should still be present, as we haven't exceeded its lifetime + assert_eq!( + scraper.get_blocks_including_candidate(&candidate.hash()), + Vec::from([( + TEST_TARGET_BLOCK_NUMBER_2, + get_block_number_hash(TEST_TARGET_BLOCK_NUMBER_2) + )]) + ); + + finalized_block_number = + TEST_TARGET_BLOCK_NUMBER_2 + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION; + process_finalized_block(&mut scraper, &finalized_block_number); + + // Now both inclusions have exceeded their lifetimes after finalization and should be purged + assert!(scraper.get_blocks_including_candidate(&candidate.hash()).len() == 0); + }); +} diff --git a/polkadot/node/core/dispute-coordinator/src/spam_slots.rs b/polkadot/node/core/dispute-coordinator/src/spam_slots.rs new file mode 100644 index 00000000..cdb9e6fb --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/spam_slots.rs @@ -0,0 +1,135 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use std::collections::{BTreeSet, HashMap}; + +use polkadot_primitives::{CandidateHash, SessionIndex, ValidatorIndex}; + +use crate::LOG_TARGET; + +/// Type used for counting potential spam votes. +type SpamCount = u32; + +/// How many unconfirmed disputes a validator is allowed to import (per session). +/// +/// Unconfirmed means: Node has not seen the candidate be included on any chain, it has not cast a +/// vote itself on that dispute, the dispute has not yet reached more than a third of +/// validator's votes and the including relay chain block has not yet been finalized. +/// +/// Exact number of `MAX_SPAM_VOTES` is not that important here. It is important that the number is +/// low enough to not cause resource exhaustion (disk & memory) on the importing validator, even if +/// multiple validators fully make use of their assigned spam slots. +/// +/// Also if things are working properly, this number cannot really be too low either, as all +/// relevant disputes _should_ have been seen as included by enough validators. (Otherwise the +/// candidate would not have been available in the first place and could not have been included.) +/// So this is really just a fallback mechanism if things go terribly wrong. +#[cfg(not(test))] +const MAX_SPAM_VOTES: SpamCount = 50; +#[cfg(test)] +const MAX_SPAM_VOTES: SpamCount = 1; + +/// Spam slots for raised disputes concerning unknown candidates. +pub struct SpamSlots { + /// Counts per validator and session. + /// + /// Must not exceed `MAX_SPAM_VOTES`. + slots: HashMap<(SessionIndex, ValidatorIndex), SpamCount>, + + /// All unconfirmed candidates we are aware of right now. + unconfirmed: UnconfirmedDisputes, +} + +/// Unconfirmed disputes to be passed at initialization. +pub type UnconfirmedDisputes = HashMap<(SessionIndex, CandidateHash), BTreeSet>; + +impl SpamSlots { + /// Recover `SpamSlots` from state on startup. + /// + /// Initialize based on already existing active disputes. + pub fn recover_from_state(unconfirmed_disputes: UnconfirmedDisputes) -> Self { + let mut slots: HashMap<(SessionIndex, ValidatorIndex), SpamCount> = HashMap::new(); + for ((session, _), validators) in unconfirmed_disputes.iter() { + for validator in validators { + let spam_vote_count = slots.entry((*session, *validator)).or_default(); + *spam_vote_count += 1; + if *spam_vote_count > MAX_SPAM_VOTES { + gum::debug!( + target: LOG_TARGET, + ?session, + ?validator, + count = ?spam_vote_count, + "Import exceeded spam slot for validator" + ); + } + } + } + + Self { slots, unconfirmed: unconfirmed_disputes } + } + + /// Increase a "voting invalid" validator's spam slot. + /// + /// This function should get called for any validator's invalidity vote for any not yet + /// confirmed dispute. + /// + /// Returns: `true` if validator still had vacant spam slots, `false` otherwise. + pub fn add_unconfirmed( + &mut self, + session: SessionIndex, + candidate: CandidateHash, + validator: ValidatorIndex, + ) -> bool { + let spam_vote_count = self.slots.entry((session, validator)).or_default(); + if *spam_vote_count >= MAX_SPAM_VOTES { + return false + } + let validators = self.unconfirmed.entry((session, candidate)).or_default(); + + if validators.insert(validator) { + // We only increment spam slots once per candidate, as each validator has to provide an + // opposing vote for sending out its own vote. Therefore, receiving multiple votes for + // a single candidate is expected and should not get punished here. + *spam_vote_count += 1; + } + + true + } + + /// Clear out spam slots for a given candidate in a session. + /// + /// This effectively reduces the spam slot count for all validators participating in a dispute + /// for that candidate. You should call this function once a dispute became obsolete or got + /// confirmed and thus votes for it should no longer be treated as potential spam. + pub fn clear(&mut self, key: &(SessionIndex, CandidateHash)) { + if let Some(validators) = self.unconfirmed.remove(key) { + let (session, _) = key; + for validator in validators { + if let Some(spam_vote_count) = self.slots.remove(&(*session, validator)) { + let new = spam_vote_count - 1; + if new > 0 { + self.slots.insert((*session, validator), new); + } + } + } + } + } + /// Prune all spam slots for sessions older than the given index. + pub fn prune_old(&mut self, oldest_index: SessionIndex) { + self.unconfirmed.retain(|(session, _), _| *session >= oldest_index); + self.slots.retain(|(session, _), _| *session >= oldest_index); + } +} diff --git a/polkadot/node/core/dispute-coordinator/src/status.rs b/polkadot/node/core/dispute-coordinator/src/status.rs new file mode 100644 index 00000000..a74a02f2 --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/status.rs @@ -0,0 +1,58 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use polkadot_node_primitives::{dispute_is_inactive, DisputeStatus, Timestamp}; +use polkadot_primitives::{CandidateHash, SessionIndex}; +use std::time::{SystemTime, UNIX_EPOCH}; + +use crate::LOG_TARGET; + +/// Get active disputes as iterator, preserving its `DisputeStatus`. +pub fn get_active_with_status( + recent_disputes: impl Iterator, + now: Timestamp, +) -> impl Iterator { + recent_disputes.filter(move |(_, status)| !dispute_is_inactive(status, &now)) +} + +pub trait Clock: Send + Sync { + fn now(&self) -> Timestamp; +} + +pub struct SystemClock; + +impl Clock for SystemClock { + fn now(&self) -> Timestamp { + // `SystemTime` is notoriously non-monotonic, so our timers might not work + // exactly as expected. + // + // Regardless, disputes are considered active based on an order of minutes, + // so a few seconds of slippage in either direction shouldn't affect the + // amount of work the node is doing significantly. + match SystemTime::now().duration_since(UNIX_EPOCH) { + Ok(d) => d.as_secs(), + Err(e) => { + gum::warn!( + target: LOG_TARGET, + err = ?e, + "Current time is before unix epoch. Validation will not work correctly." + ); + + 0 + }, + } + } +} diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs new file mode 100644 index 00000000..7d724324 --- /dev/null +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -0,0 +1,3432 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicU64, Ordering as AtomicOrdering}, + Arc, + }, + time::Duration, +}; + +use assert_matches::assert_matches; +use futures::{ + channel::oneshot, + future::{self, BoxFuture}, +}; + +use polkadot_node_subsystem_util::database::Database; + +use polkadot_node_primitives::{ + DisputeMessage, DisputeStatus, SignedDisputeStatement, SignedFullStatement, Statement, +}; +use polkadot_node_subsystem::{ + messages::{ + ApprovalVotingMessage, ChainApiMessage, ChainSelectionMessage, DisputeCoordinatorMessage, + DisputeDistributionMessage, ImportStatementsResult, + }, + overseer::FromOrchestra, + OverseerSignal, +}; + +use polkadot_node_subsystem_util::TimeoutExt; +use sc_keystore::LocalKeystore; +use sp_application_crypto::AppKey; +use sp_core::{sr25519::Pair, testing::TaskExecutor, Pair as PairT}; +use sp_keyring::Sr25519Keyring; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; + +use ::test_helpers::{dummy_candidate_receipt_bad_sig, dummy_digest, dummy_hash}; +use polkadot_node_primitives::{Timestamp, ACTIVE_DURATION_SECS}; +use polkadot_node_subsystem::{ + jaeger, + messages::{AllMessages, BlockDescription, RuntimeApiMessage, RuntimeApiRequest}, + ActivatedLeaf, ActiveLeavesUpdate, LeafStatus, +}; +use polkadot_node_subsystem_test_helpers::{ + make_buffered_subsystem_context, TestSubsystemContextHandle, +}; +use polkadot_primitives::{ + ApprovalVote, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CandidateReceipt, CoreIndex, DisputeStatement, GroupIndex, Hash, HeadData, Header, IndexedVec, + MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, SessionInfo, SigningContext, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, +}; + +use crate::{ + backend::Backend, + metrics::Metrics, + participation::{participation_full_happy_path, participation_missing_availability}, + status::Clock, + Config, DisputeCoordinatorSubsystem, +}; + +use super::db::v1::DbBackend; + +const TEST_TIMEOUT: Duration = Duration::from_secs(2); + +// sets up a keystore with the given keyring accounts. +fn make_keystore(seeds: impl Iterator) -> LocalKeystore { + let store = LocalKeystore::in_memory(); + + for s in seeds { + store + .sr25519_generate_new(polkadot_primitives::PARACHAIN_KEY_TYPE_ID, Some(&s)) + .unwrap(); + } + + store +} + +type VirtualOverseer = TestSubsystemContextHandle; + +const OVERSEER_RECEIVE_TIMEOUT: Duration = Duration::from_secs(2); + +async fn overseer_recv(virtual_overseer: &mut VirtualOverseer) -> AllMessages { + virtual_overseer + .recv() + .timeout(OVERSEER_RECEIVE_TIMEOUT) + .await + .expect("overseer `recv` timed out") +} + +enum VoteType { + Backing, + Explicit, +} + +/// Helper to condense repeated code that creates vote pairs, one valid and one +/// invalid. Optionally the valid vote of the pair can be made a backing vote. +async fn generate_opposing_votes_pair( + test_state: &TestState, + valid_voter_idx: ValidatorIndex, + invalid_voter_idx: ValidatorIndex, + candidate_hash: CandidateHash, + session: SessionIndex, + valid_vote_type: VoteType, +) -> (SignedDisputeStatement, SignedDisputeStatement) { + let valid_vote = match valid_vote_type { + VoteType::Backing => + test_state + .issue_backing_statement_with_index(valid_voter_idx, candidate_hash, session) + .await, + VoteType::Explicit => + test_state + .issue_explicit_statement_with_index(valid_voter_idx, candidate_hash, session, true) + .await, + }; + let invalid_vote = test_state + .issue_explicit_statement_with_index(invalid_voter_idx, candidate_hash, session, false) + .await; + + (valid_vote, invalid_vote) +} + +#[derive(Clone)] +struct MockClock { + time: Arc, +} + +impl Default for MockClock { + fn default() -> Self { + MockClock { time: Arc::new(AtomicU64::default()) } + } +} + +impl Clock for MockClock { + fn now(&self) -> Timestamp { + self.time.load(AtomicOrdering::SeqCst) + } +} + +impl MockClock { + fn set(&self, to: Timestamp) { + self.time.store(to, AtomicOrdering::SeqCst) + } +} + +struct TestState { + validators: Vec, + validator_public: IndexedVec, + validator_groups: IndexedVec>, + master_keystore: Arc, + subsystem_keystore: Arc, + db: Arc, + config: Config, + clock: MockClock, + headers: HashMap, + block_num_to_header: HashMap, + last_block: Hash, + // last session the subsystem knows about. + known_session: Option, +} + +impl Default for TestState { + fn default() -> TestState { + let p1 = Pair::from_string("//Polka", None).unwrap(); + let p2 = Pair::from_string("//Dot", None).unwrap(); + let p3 = Pair::from_string("//Kusama", None).unwrap(); + let validators = vec![ + (Sr25519Keyring::Alice.pair(), Sr25519Keyring::Alice.to_seed()), + (Sr25519Keyring::Bob.pair(), Sr25519Keyring::Bob.to_seed()), + (Sr25519Keyring::Charlie.pair(), Sr25519Keyring::Charlie.to_seed()), + (Sr25519Keyring::Dave.pair(), Sr25519Keyring::Dave.to_seed()), + (Sr25519Keyring::Eve.pair(), Sr25519Keyring::Eve.to_seed()), + (Sr25519Keyring::One.pair(), Sr25519Keyring::One.to_seed()), + (Sr25519Keyring::Ferdie.pair(), Sr25519Keyring::Ferdie.to_seed()), + // Two more keys needed so disputes are not confirmed already with only 3 statements. + (p1, "//Polka".into()), + (p2, "//Dot".into()), + (p3, "//Kusama".into()), + ]; + + let validator_public = validators + .clone() + .into_iter() + .map(|k| ValidatorId::from(k.0.public())) + .collect(); + + let validator_groups = IndexedVec::>::from(vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2), ValidatorIndex(3)], + vec![ValidatorIndex(4), ValidatorIndex(5), ValidatorIndex(6)], + ]); + + let master_keystore = make_keystore(validators.iter().map(|v| v.1.clone())).into(); + let subsystem_keystore = + make_keystore(vec![Sr25519Keyring::Alice.to_seed()].into_iter()).into(); + + let db = kvdb_memorydb::create(1); + let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]); + let db = Arc::new(db); + let config = Config { col_dispute_data: 0, col_session_data: 1 }; + + let genesis_header = Header { + parent_hash: Hash::zero(), + number: 0, + digest: dummy_digest(), + state_root: dummy_hash(), + extrinsics_root: dummy_hash(), + }; + let last_block = genesis_header.hash(); + + let mut headers = HashMap::new(); + let _ = headers.insert(last_block, genesis_header.clone()); + let mut block_num_to_header = HashMap::new(); + let _ = block_num_to_header.insert(genesis_header.number, last_block); + + TestState { + validators: validators.into_iter().map(|(pair, _)| pair).collect(), + validator_public, + validator_groups, + master_keystore, + subsystem_keystore, + db, + config, + clock: MockClock::default(), + headers, + block_num_to_header, + last_block, + known_session: None, + } + } +} + +impl TestState { + async fn activate_leaf_at_session( + &mut self, + virtual_overseer: &mut VirtualOverseer, + session: SessionIndex, + block_number: BlockNumber, + candidate_events: Vec, + ) { + assert!(block_number > 0); + + let block_header = Header { + parent_hash: self.last_block, + number: block_number, + digest: dummy_digest(), + state_root: dummy_hash(), + extrinsics_root: dummy_hash(), + }; + let block_hash = block_header.hash(); + + let _ = self.headers.insert(block_hash, block_header.clone()); + let _ = self.block_num_to_header.insert(block_header.number, block_hash); + self.last_block = block_hash; + + gum::debug!(?block_number, "Activating block in activate_leaf_at_session."); + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::start_work(ActivatedLeaf { + hash: block_hash, + span: Arc::new(jaeger::Span::Disabled), + number: block_number, + status: LeafStatus::Fresh, + }), + ))) + .await; + + self.handle_sync_queries( + virtual_overseer, + block_hash, + block_number, + session, + candidate_events, + ) + .await; + } + + /// Returns any sent `DisputeMessage`s. + async fn handle_sync_queries( + &mut self, + virtual_overseer: &mut VirtualOverseer, + block_hash: Hash, + block_number: BlockNumber, + session: SessionIndex, + candidate_events: Vec, + ) -> Vec { + // Order of messages is not fixed (different on initializing): + #[derive(Debug)] + struct FinishedSteps { + got_session_information: bool, + got_scraping_information: bool, + } + + impl FinishedSteps { + fn new() -> Self { + Self { got_session_information: false, got_scraping_information: false } + } + fn is_done(&self) -> bool { + self.got_session_information && self.got_scraping_information + } + } + + let mut finished_steps = FinishedSteps::new(); + let mut sent_disputes = Vec::new(); + + while !finished_steps.is_done() { + let recv = overseer_recv(virtual_overseer).await; + match recv { + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + h, + RuntimeApiRequest::SessionIndexForChild(tx), + )) => { + assert!( + !finished_steps.got_session_information, + "session infos already retrieved" + ); + finished_steps.got_session_information = true; + assert_eq!(h, block_hash); + let _ = tx.send(Ok(session)); + + // Queries for fetching earliest unfinalized block session. See `RollingSessionWindow`. + if self.known_session.is_none() { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( + s_tx, + )) => { + let _ = s_tx.send(Ok(block_number)); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( + number, + s_tx, + )) => { + assert_eq!(block_number, number); + let _ = s_tx.send(Ok(Some(block_hash))); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + h, + RuntimeApiRequest::SessionIndexForChild(s_tx), + )) => { + assert_eq!(h, block_hash); + let _ = s_tx.send(Ok(session)); + } + ); + } + + // No queries, if subsystem knows about this session already. + if self.known_session == Some(session) { + continue + } + self.known_session = Some(session); + + loop { + // answer session info queries until the current session is reached. + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + h, + RuntimeApiRequest::SessionInfo(session_index, tx), + )) => { + assert_eq!(h, block_hash); + + let _ = tx.send(Ok(Some(self.session_info()))); + if session_index == session { break } + } + ); + } + }, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(tx)) => { + assert!( + !finished_steps.got_scraping_information, + "Scraping info was already retrieved!" + ); + finished_steps.got_scraping_information = true; + tx.send(Ok(0)).unwrap(); + }, + AllMessages::ChainApi(ChainApiMessage::BlockNumber(hash, tx)) => { + let block_num = self.headers.get(&hash).map(|header| header.number); + tx.send(Ok(block_num)).unwrap(); + }, + AllMessages::DisputeDistribution(DisputeDistributionMessage::SendDispute(msg)) => { + sent_disputes.push(msg); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _new_leaf, + RuntimeApiRequest::CandidateEvents(tx), + )) => { + tx.send(Ok(candidate_events.clone())).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _new_leaf, + RuntimeApiRequest::FetchOnChainVotes(tx), + )) => { + //add some `BackedCandidates` or resolved disputes here as needed + tx.send(Ok(Some(ScrapedOnChainVotes { + session, + backing_validators_per_candidate: Vec::default(), + disputes: MultiDisputeStatementSet::default(), + }))) + .unwrap(); + }, + AllMessages::ChainApi(ChainApiMessage::Ancestors { hash, k, response_channel }) => { + let target_header = self + .headers + .get(&hash) + .expect("The function is called for this block so it should exist"); + let mut response = Vec::new(); + for i in target_header.number.saturating_sub(k as u32)..target_header.number { + response.push( + self.block_num_to_header + .get(&i) + .expect("headers and block_num_to_header should always be in sync") + .clone(), + ); + } + let _ = response_channel.send(Ok(response)); + }, + msg => { + panic!("Received unexpected message in `handle_sync_queries`: {:?}", msg); + }, + } + } + return sent_disputes + } + + async fn handle_resume_sync( + &mut self, + virtual_overseer: &mut VirtualOverseer, + session: SessionIndex, + ) -> Vec { + self.handle_resume_sync_with_events(virtual_overseer, session, Vec::new()).await + } + + async fn handle_resume_sync_with_events( + &mut self, + virtual_overseer: &mut VirtualOverseer, + session: SessionIndex, + mut initial_events: Vec, + ) -> Vec { + let leaves: Vec = self.headers.keys().cloned().collect(); + let mut messages = Vec::new(); + for (n, leaf) in leaves.iter().enumerate() { + gum::debug!( + block_number= ?n, + "Activating block in handle resume sync." + ); + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::start_work(ActivatedLeaf { + hash: *leaf, + number: n as u32, + span: Arc::new(jaeger::Span::Disabled), + status: LeafStatus::Fresh, + }), + ))) + .await; + + let events = if n == 1 { std::mem::take(&mut initial_events) } else { Vec::new() }; + + let mut new_messages = self + .handle_sync_queries(virtual_overseer, *leaf, n as BlockNumber, session, events) + .await; + messages.append(&mut new_messages); + } + messages + } + + fn session_info(&self) -> SessionInfo { + let discovery_keys = self.validators.iter().map(|k| <_>::from(k.public())).collect(); + + let assignment_keys = self.validators.iter().map(|k| <_>::from(k.public())).collect(); + + SessionInfo { + validators: self.validator_public.clone(), + discovery_keys, + assignment_keys, + validator_groups: self.validator_groups.clone(), + n_cores: self.validator_groups.len() as _, + zeroth_delay_tranche_width: 0, + relay_vrf_modulo_samples: 1, + n_delay_tranches: 100, + no_show_slots: 1, + needed_approvals: 10, + active_validator_indices: Vec::new(), + dispute_period: 6, + random_seed: [0u8; 32], + } + } + + async fn issue_explicit_statement_with_index( + &self, + index: ValidatorIndex, + candidate_hash: CandidateHash, + session: SessionIndex, + valid: bool, + ) -> SignedDisputeStatement { + let public = self.validator_public.get(index).unwrap().clone(); + + let keystore = self.master_keystore.clone() as SyncCryptoStorePtr; + + SignedDisputeStatement::sign_explicit(&keystore, valid, candidate_hash, session, public) + .await + .unwrap() + .unwrap() + } + + async fn issue_backing_statement_with_index( + &self, + index: ValidatorIndex, + candidate_hash: CandidateHash, + session: SessionIndex, + ) -> SignedDisputeStatement { + let keystore = self.master_keystore.clone() as SyncCryptoStorePtr; + let validator_id = self.validators[index.0 as usize].public().into(); + let context = + SigningContext { session_index: session, parent_hash: Hash::repeat_byte(0xac) }; + + let statement = SignedFullStatement::sign( + &keystore, + Statement::Valid(candidate_hash), + &context, + index, + &validator_id, + ) + .await + .unwrap() + .unwrap() + .into_unchecked(); + + SignedDisputeStatement::from_backing_statement(&statement, context, validator_id).unwrap() + } + + fn issue_approval_vote_with_index( + &self, + index: ValidatorIndex, + candidate_hash: CandidateHash, + session: SessionIndex, + ) -> SignedDisputeStatement { + let keystore = self.master_keystore.clone() as SyncCryptoStorePtr; + let validator_id = self.validators[index.0 as usize].public(); + + let payload = ApprovalVote(candidate_hash).signing_payload(session); + let signature = SyncCryptoStore::sign_with( + &*keystore, + ValidatorId::ID, + &validator_id.into(), + &payload[..], + ) + .ok() + .flatten() + .unwrap(); + + SignedDisputeStatement::new_unchecked_from_trusted_source( + DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking), + candidate_hash, + session, + validator_id.into(), + signature.try_into().unwrap(), + ) + } + + fn resume(mut self, test: F) -> Self + where + F: FnOnce(TestState, VirtualOverseer) -> BoxFuture<'static, TestState>, + { + self.known_session = None; + let (ctx, ctx_handle) = make_buffered_subsystem_context(TaskExecutor::new(), 1); + let subsystem = DisputeCoordinatorSubsystem::new( + self.db.clone(), + self.config.clone(), + self.subsystem_keystore.clone(), + Metrics::default(), + ); + let backend = + DbBackend::new(self.db.clone(), self.config.column_config(), Metrics::default()); + let subsystem_task = subsystem.run(ctx, backend, Box::new(self.clock.clone())); + let test_task = test(self, ctx_handle); + + let (_, state) = futures::executor::block_on(future::join(subsystem_task, test_task)); + state + } +} + +fn test_harness(test: F) -> TestState +where + F: FnOnce(TestState, VirtualOverseer) -> BoxFuture<'static, TestState>, +{ + let mut test_state = TestState::default(); + + // Add two more blocks after the genesis (which is created in `default()`) + let h1 = Header { + parent_hash: test_state.last_block.clone(), + number: 1, + digest: dummy_digest(), + state_root: dummy_hash(), + extrinsics_root: dummy_hash(), + }; + let h1_hash = h1.hash(); + test_state.headers.insert(h1_hash.clone(), h1); + test_state.block_num_to_header.insert(1, h1_hash.clone()); + test_state.last_block = h1_hash; + + let h2 = Header { + parent_hash: test_state.last_block.clone(), + number: 2, + digest: dummy_digest(), + state_root: dummy_hash(), + extrinsics_root: dummy_hash(), + }; + let h2_hash = h2.hash(); + test_state.headers.insert(h2_hash.clone(), h2); + test_state.block_num_to_header.insert(2, h2_hash.clone()); + test_state.last_block = h2_hash; + + test_state.resume(test) +} + +/// Handle participation messages. +async fn participation_with_distribution( + virtual_overseer: &mut VirtualOverseer, + candidate_hash: &CandidateHash, + expected_commitments_hash: Hash, +) { + participation_full_happy_path(virtual_overseer, expected_commitments_hash).await; + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::DisputeDistribution( + DisputeDistributionMessage::SendDispute(msg) + ) => { + assert_eq!(&msg.candidate_receipt().hash(), candidate_hash); + } + ); +} + +fn make_valid_candidate_receipt() -> CandidateReceipt { + let mut candidate_receipt = dummy_candidate_receipt_bad_sig(dummy_hash(), dummy_hash()); + candidate_receipt.commitments_hash = CandidateCommitments::default().hash(); + candidate_receipt +} + +fn make_invalid_candidate_receipt() -> CandidateReceipt { + dummy_candidate_receipt_bad_sig(Default::default(), Some(Default::default())) +} + +// Generate a `CandidateBacked` event from a `CandidateReceipt`. The rest is dummy data. +fn make_candidate_backed_event(candidate_receipt: CandidateReceipt) -> CandidateEvent { + CandidateEvent::CandidateBacked( + candidate_receipt, + HeadData(Vec::new()), + CoreIndex(0), + GroupIndex(0), + ) +} + +// Generate a `CandidateIncluded` event from a `CandidateReceipt`. The rest is dummy data. +fn make_candidate_included_event(candidate_receipt: CandidateReceipt) -> CandidateEvent { + CandidateEvent::CandidateIncluded( + candidate_receipt, + HeadData(Vec::new()), + CoreIndex(0), + GroupIndex(0), + ) +} + +/// Handle request for approval votes: +pub async fn handle_approval_vote_request( + ctx_handle: &mut VirtualOverseer, + expected_hash: &CandidateHash, + votes_to_send: HashMap, +) { + assert_matches!( + ctx_handle.recv().await, + AllMessages::ApprovalVoting( + ApprovalVotingMessage::GetApprovalSignaturesForCandidate(hash, tx) + ) => { + assert_eq!(&hash, expected_hash); + tx.send(votes_to_send).unwrap(); + }, + "overseer did not receive `GetApprovalSignaturesForCandidate` message.", + ); +} + +/// Handle block number request. In the context of these tests this message is required for +/// handling comparator creation for enqueuing participations. +async fn handle_get_block_number(ctx_handle: &mut VirtualOverseer, test_state: &TestState) { + assert_matches!( + ctx_handle.recv().await, + AllMessages::ChainApi( + ChainApiMessage::BlockNumber(hash, tx)) => { + tx.send(Ok(test_state.headers.get(&hash).map(|r| r.number))).unwrap(); + } + ) +} + +#[test] +fn too_many_unconfirmed_statements_are_considered_spam() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt1 = make_valid_candidate_receipt(); + let candidate_hash1 = candidate_receipt1.hash(); + let candidate_receipt2 = make_invalid_candidate_receipt(); + let candidate_hash2 = candidate_receipt2.hash(); + + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + let (valid_vote1, invalid_vote1) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(3), + ValidatorIndex(1), + candidate_hash1, + session, + VoteType::Backing, + ) + .await; + + let (valid_vote2, invalid_vote2) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(3), + ValidatorIndex(1), + candidate_hash2, + session, + VoteType::Backing, + ) + .await; + + gum::trace!("Before sending `ImportStatements`"); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt1.clone(), + session, + statements: vec![ + (valid_vote1, ValidatorIndex(3)), + (invalid_vote1, ValidatorIndex(1)), + ], + pending_confirmation: None, + }, + }) + .await; + gum::trace!("After sending `ImportStatements`"); + + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, HashMap::new()) + .await; + + // Participation has to fail here, otherwise the dispute will be confirmed. However + // participation won't happen at all because the dispute is neither backed, not confirmed + // nor the candidate is included. Or in other words - we'll refrain from participation. + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!( + rx.await.unwrap(), + vec![(session, candidate_hash1, DisputeStatus::Active)] + ); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash1)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 1); + assert_eq!(votes.invalid.len(), 1); + } + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt2.clone(), + session, + statements: vec![ + (valid_vote2, ValidatorIndex(3)), + (invalid_vote2, ValidatorIndex(1)), + ], + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash2, HashMap::new()) + .await; + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash2)], + tx, + ), + }) + .await; + + assert_matches!(rx.await.unwrap().get(0), None); + } + + // Result should be invalid, because it should be considered spam. + assert_matches!(confirmation_rx.await, Ok(ImportStatementsResult::InvalidImport)); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // No more messages expected: + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn approval_vote_import_works() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt1 = make_valid_candidate_receipt(); + let candidate_hash1 = candidate_receipt1.hash(); + + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + let (valid_vote1, invalid_vote1) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(3), + ValidatorIndex(1), + candidate_hash1, + session, + VoteType::Backing, + ) + .await; + + let approval_vote = test_state.issue_approval_vote_with_index( + ValidatorIndex(4), + candidate_hash1, + session, + ); + + gum::trace!("Before sending `ImportStatements`"); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt1.clone(), + session, + statements: vec![ + (valid_vote1, ValidatorIndex(3)), + (invalid_vote1, ValidatorIndex(1)), + ], + pending_confirmation: None, + }, + }) + .await; + gum::trace!("After sending `ImportStatements`"); + + let approval_votes = [(ValidatorIndex(4), approval_vote.into_validator_signature())] + .into_iter() + .collect(); + + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, approval_votes) + .await; + + // Participation won't happen here because the dispute is neither backed, not confirmed + // nor the candidate is included. Or in other words - we'll refrain from participation. + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!( + rx.await.unwrap(), + vec![(session, candidate_hash1, DisputeStatus::Active)] + ); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash1)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 2); + assert!( + votes.valid.raw().get(&ValidatorIndex(4)).is_some(), + "Approval vote is missing!" + ); + assert_eq!(votes.invalid.len(), 1); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // No more messages expected: + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn dispute_gets_confirmed_via_participation() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt1 = make_valid_candidate_receipt(); + let candidate_hash1 = candidate_receipt1.hash(); + let candidate_receipt2 = make_invalid_candidate_receipt(); + let candidate_hash2 = candidate_receipt2.hash(); + + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![ + make_candidate_backed_event(candidate_receipt1.clone()), + make_candidate_backed_event(candidate_receipt2.clone()), + ], + ) + .await; + + let (valid_vote1, invalid_vote1) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(3), + ValidatorIndex(1), + candidate_hash1, + session, + VoteType::Explicit, + ) + .await; + + let (valid_vote2, invalid_vote2) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(3), + ValidatorIndex(1), + candidate_hash2, + session, + VoteType::Explicit, + ) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt1.clone(), + session, + statements: vec![ + (valid_vote1, ValidatorIndex(3)), + (invalid_vote1, ValidatorIndex(1)), + ], + pending_confirmation: None, + }, + }) + .await; + gum::debug!("After First import!"); + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, HashMap::new()) + .await; + + participation_with_distribution( + &mut virtual_overseer, + &candidate_hash1, + candidate_receipt1.commitments_hash, + ) + .await; + gum::debug!("After Participation!"); + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!( + rx.await.unwrap(), + vec![(session, candidate_hash1, DisputeStatus::Active)] + ); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash1)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 2); + assert_eq!(votes.invalid.len(), 1); + } + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt2.clone(), + session, + statements: vec![ + (valid_vote2, ValidatorIndex(3)), + (invalid_vote2, ValidatorIndex(1)), + ], + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash2, HashMap::new()) + .await; + + participation_missing_availability(&mut virtual_overseer).await; + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash2)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 1); + assert_eq!(votes.invalid.len(), 1); + } + + // Result should be valid, because our node participated, so spam slots are cleared: + assert_matches!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // No more messages expected: + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn dispute_gets_confirmed_at_byzantine_threshold() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt1 = make_valid_candidate_receipt(); + let candidate_hash1 = candidate_receipt1.hash(); + let candidate_receipt2 = make_invalid_candidate_receipt(); + let candidate_hash2 = candidate_receipt2.hash(); + + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + let (valid_vote1, invalid_vote1) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(3), + ValidatorIndex(1), + candidate_hash1, + session, + VoteType::Explicit, + ) + .await; + + let (valid_vote1a, invalid_vote1a) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(4), + ValidatorIndex(5), + candidate_hash1, + session, + VoteType::Explicit, + ) + .await; + + let (valid_vote2, invalid_vote2) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(3), + ValidatorIndex(1), + candidate_hash2, + session, + VoteType::Explicit, + ) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt1.clone(), + session, + statements: vec![ + (valid_vote1, ValidatorIndex(3)), + (invalid_vote1, ValidatorIndex(1)), + (valid_vote1a, ValidatorIndex(4)), + (invalid_vote1a, ValidatorIndex(5)), + ], + pending_confirmation: None, + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, HashMap::new()) + .await; + + // Participation won't happen here because the dispute is neither backed, not confirmed + // nor the candidate is included. Or in other words - we'll refrain from participation. + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!( + rx.await.unwrap(), + vec![(session, candidate_hash1, DisputeStatus::Confirmed)] + ); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash1)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 2); + assert_eq!(votes.invalid.len(), 2); + } + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt2.clone(), + session, + statements: vec![ + (valid_vote2, ValidatorIndex(3)), + (invalid_vote2, ValidatorIndex(1)), + ], + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash2, HashMap::new()) + .await; + + participation_missing_availability(&mut virtual_overseer).await; + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash2)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 1); + assert_eq!(votes.invalid.len(), 1); + } + + // Result should be valid, because byzantine threshold has been reached in first + // import, so spam slots are cleared: + assert_matches!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // No more messages expected: + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn backing_statements_import_works_and_no_spam() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + let valid_vote1 = test_state + .issue_backing_statement_with_index(ValidatorIndex(3), candidate_hash, session) + .await; + + let valid_vote2 = test_state + .issue_backing_statement_with_index(ValidatorIndex(4), candidate_hash, session) + .await; + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote1, ValidatorIndex(3)), + (valid_vote2, ValidatorIndex(4)), + ], + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + assert_matches!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); + + { + // Just backing votes - we should not have any active disputes now. + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert!(rx.await.unwrap().is_empty()); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 2); + assert_eq!(votes.invalid.len(), 0); + } + + let candidate_receipt = make_invalid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + let valid_vote1 = test_state + .issue_backing_statement_with_index(ValidatorIndex(3), candidate_hash, session) + .await; + + let valid_vote2 = test_state + .issue_backing_statement_with_index(ValidatorIndex(4), candidate_hash, session) + .await; + + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + // Backing vote import should not have accounted to spam slots, so this should succeed + // as well: + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote1, ValidatorIndex(3)), + (valid_vote2, ValidatorIndex(4)), + ], + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + + // Import should be valid, as spam slots were not filled + assert_matches!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // No more messages expected: + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn conflicting_votes_lead_to_dispute_participation() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; + + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(3), + ValidatorIndex(1), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + let invalid_vote_2 = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + false, + ) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(3)), + (invalid_vote, ValidatorIndex(1)), + ], + pending_confirmation: None, + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + participation_with_distribution( + &mut virtual_overseer, + &candidate_hash, + candidate_receipt.commitments_hash, + ) + .await; + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!( + rx.await.unwrap(), + vec![(session, candidate_hash, DisputeStatus::Active)] + ); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 2); + assert_eq!(votes.invalid.len(), 1); + } + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![(invalid_vote_2, ValidatorIndex(2))], + pending_confirmation: None, + }, + }) + .await; + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 2); + assert_eq!(votes.invalid.len(), 2); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // This confirms that the second vote doesn't lead to participation again. + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn positive_votes_dont_trigger_participation() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; + + let valid_vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + true, + ) + .await; + + let valid_vote_2 = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + true, + ) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![(valid_vote, ValidatorIndex(2))], + pending_confirmation: None, + }, + }) + .await; + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert!(rx.await.unwrap().is_empty()); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 1); + assert!(votes.invalid.is_empty()); + } + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![(valid_vote_2, ValidatorIndex(1))], + pending_confirmation: None, + }, + }) + .await; + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert!(rx.await.unwrap().is_empty()); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 2); + assert!(votes.invalid.is_empty()); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // This confirms that no participation request is made. + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn wrong_validator_index_is_ignored() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(2), + ValidatorIndex(1), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(1)), + (invalid_vote, ValidatorIndex(2)), + ], + pending_confirmation: None, + }, + }) + .await; + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert!(rx.await.unwrap().is_empty()); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash)], + tx, + ), + }) + .await; + + assert_matches!(rx.await.unwrap().get(0), None); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // This confirms that no participation request is made. + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn finality_votes_ignore_disputed_candidates() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; + + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(2), + ValidatorIndex(1), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(2)), + (invalid_vote, ValidatorIndex(1)), + ], + pending_confirmation: None, + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + participation_with_distribution( + &mut virtual_overseer, + &candidate_hash, + candidate_receipt.commitments_hash, + ) + .await; + + { + let (tx, rx) = oneshot::channel(); + + let base_block = Hash::repeat_byte(0x0f); + let block_hash_a = Hash::repeat_byte(0x0a); + let block_hash_b = Hash::repeat_byte(0x0b); + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::DetermineUndisputedChain { + base: (10, base_block), + block_descriptions: vec![BlockDescription { + block_hash: block_hash_a, + session, + candidates: vec![candidate_hash], + }], + tx, + }, + }) + .await; + + assert_eq!(rx.await.unwrap(), (10, base_block)); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::DetermineUndisputedChain { + base: (10, base_block), + block_descriptions: vec![ + BlockDescription { + block_hash: block_hash_a, + session, + candidates: vec![], + }, + BlockDescription { + block_hash: block_hash_b, + session, + candidates: vec![candidate_hash], + }, + ], + tx, + }, + }) + .await; + + assert_eq!(rx.await.unwrap(), (11, block_hash_a)); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn supermajority_valid_dispute_may_be_finalized() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; + + let supermajority_threshold = + polkadot_primitives::supermajority_threshold(test_state.validators.len()); + + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(2), + ValidatorIndex(1), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(2)), + (invalid_vote, ValidatorIndex(1)), + ], + pending_confirmation: None, + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + participation_with_distribution( + &mut virtual_overseer, + &candidate_hash, + candidate_receipt.commitments_hash, + ) + .await; + + let mut statements = Vec::new(); + for i in (0_u32..supermajority_threshold as u32 - 1).map(|i| i + 3) { + let vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(i), + candidate_hash, + session, + true, + ) + .await; + + statements.push((vote, ValidatorIndex(i as _))); + } + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements, + pending_confirmation: None, + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + { + let (tx, rx) = oneshot::channel(); + + let base_hash = Hash::repeat_byte(0x0f); + let block_hash_a = Hash::repeat_byte(0x0a); + let block_hash_b = Hash::repeat_byte(0x0b); + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::DetermineUndisputedChain { + base: (10, base_hash), + block_descriptions: vec![BlockDescription { + block_hash: block_hash_a, + session, + candidates: vec![candidate_hash], + }], + tx, + }, + }) + .await; + + assert_eq!(rx.await.unwrap(), (11, block_hash_a)); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::DetermineUndisputedChain { + base: (10, base_hash), + block_descriptions: vec![ + BlockDescription { + block_hash: block_hash_a, + session, + candidates: vec![], + }, + BlockDescription { + block_hash: block_hash_b, + session, + candidates: vec![candidate_hash], + }, + ], + tx, + }, + }) + .await; + + assert_eq!(rx.await.unwrap(), (12, block_hash_b)); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn concluded_supermajority_for_non_active_after_time() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; + + let supermajority_threshold = + polkadot_primitives::supermajority_threshold(test_state.validators.len()); + + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(2), + ValidatorIndex(1), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(2)), + (invalid_vote, ValidatorIndex(1)), + ], + pending_confirmation: None, + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + participation_with_distribution( + &mut virtual_overseer, + &candidate_hash, + candidate_receipt.commitments_hash, + ) + .await; + + let mut statements = Vec::new(); + // -2: 1 for already imported vote and one for local vote (which is valid). + for i in (0_u32..supermajority_threshold as u32 - 2).map(|i| i + 3) { + let vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(i), + candidate_hash, + session, + true, + ) + .await; + + statements.push((vote, ValidatorIndex(i as _))); + } + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements, + pending_confirmation: None, + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + test_state.clock.set(ACTIVE_DURATION_SECS + 1); + + { + let (tx, rx) = oneshot::channel(); + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert!(rx.await.unwrap().is_empty()); + + let (tx, rx) = oneshot::channel(); + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::RecentDisputes(tx), + }) + .await; + + assert_eq!(rx.await.unwrap().len(), 1); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn concluded_supermajority_against_non_active_after_time() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_invalid_candidate_receipt(); + + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; + + let supermajority_threshold = + polkadot_primitives::supermajority_threshold(test_state.validators.len()); + + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(2), + ValidatorIndex(1), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(2)), + (invalid_vote, ValidatorIndex(1)), + ], + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + assert_matches!(confirmation_rx.await.unwrap(), + ImportStatementsResult::ValidImport => {} + ); + + // Use a different expected commitments hash to ensure the candidate validation returns invalid. + participation_with_distribution( + &mut virtual_overseer, + &candidate_hash, + CandidateCommitments::default().hash(), + ) + .await; + + let mut statements = Vec::new(); + // minus 2, because of local vote and one previously imported invalid vote. + for i in (0_u32..supermajority_threshold as u32 - 2).map(|i| i + 3) { + let vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(i), + candidate_hash, + session, + false, + ) + .await; + + statements.push((vote, ValidatorIndex(i as _))); + } + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements, + pending_confirmation: None, + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + test_state.clock.set(ACTIVE_DURATION_SECS + 1); + + { + let (tx, rx) = oneshot::channel(); + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert!(rx.await.unwrap().is_empty()); + let (tx, rx) = oneshot::channel(); + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::RecentDisputes(tx), + }) + .await; + + assert_eq!(rx.await.unwrap().len(), 1); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert_matches!( + virtual_overseer.try_recv().await, + None => {} + ); + + test_state + }) + }); +} + +#[test] +fn resume_dispute_without_local_statement() { + sp_tracing::init_for_tests(); + let session = 1; + + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(1), + ValidatorIndex(2), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(1)), + (invalid_vote, ValidatorIndex(2)), + ], + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + // Participation won't happen here because the dispute is neither backed, not confirmed + // nor the candidate is included. Or in other words - we'll refrain from participation. + assert_eq!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); + + { + let (tx, rx) = oneshot::channel(); + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!(rx.await.unwrap().len(), 1); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }) + // Alice should send a DisputeParticiationMessage::Participate on restart since she has no + // local statement for the active dispute. + .resume(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let candidate_receipt = make_valid_candidate_receipt(); + // Candidate is now backed: + let dispute_messages = test_state + .handle_resume_sync_with_events( + &mut virtual_overseer, + session, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; + + assert_eq!(dispute_messages.len(), 0, "We don't expect any messages sent here."); + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + participation_with_distribution( + &mut virtual_overseer, + &candidate_hash, + candidate_receipt.commitments_hash, + ) + .await; + + let mut statements = Vec::new(); + // Getting votes for supermajority. Should already have two valid votes. + for i in vec![3, 4, 5, 6, 7] { + let vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(i), + candidate_hash, + session, + true, + ) + .await; + + statements.push((vote, ValidatorIndex(i as _))); + } + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements, + pending_confirmation: None, + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + // Advance the clock far enough so that the concluded dispute will be omitted from an + // ActiveDisputes query. + test_state.clock.set(test_state.clock.now() + ACTIVE_DURATION_SECS + 1); + + { + let (tx, rx) = oneshot::channel(); + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert!(rx.await.unwrap().is_empty()); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn resume_dispute_with_local_statement() { + sp_tracing::init_for_tests(); + let session = 1; + + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; + + let local_valid_vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(0), + candidate_hash, + session, + true, + ) + .await; + + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(1), + ValidatorIndex(2), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (local_valid_vote, ValidatorIndex(0)), + (valid_vote, ValidatorIndex(1)), + (invalid_vote, ValidatorIndex(2)), + ], + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + assert_eq!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); + + { + let (tx, rx) = oneshot::channel(); + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!(rx.await.unwrap().len(), 1); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }) + // Alice should not send a DisputeParticiationMessage::Participate on restart since she has a + // local statement for the active dispute, instead she should try to (re-)send her vote. + .resume(|mut test_state, mut virtual_overseer| { + let candidate_receipt = make_valid_candidate_receipt(); + Box::pin(async move { + let messages = test_state + .handle_resume_sync_with_events( + &mut virtual_overseer, + session, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; + + assert_eq!(messages.len(), 1, "A message should have gone out."); + + // Assert that subsystem is not sending Participation messages because we issued a local statement + assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none()); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn resume_dispute_without_local_statement_or_local_key() { + let session = 1; + let mut test_state = TestState::default(); + test_state.subsystem_keystore = + make_keystore(vec![Sr25519Keyring::Two.to_seed()].into_iter()).into(); + test_state + .resume(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_included_event(candidate_receipt.clone())], + ) + .await; + + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(1), + ValidatorIndex(2), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(1)), + (invalid_vote, ValidatorIndex(2)), + ], + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + handle_approval_vote_request( + &mut virtual_overseer, + &candidate_hash, + HashMap::new(), + ) + .await; + + assert_eq!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); + + { + let (tx, rx) = oneshot::channel(); + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!(rx.await.unwrap().len(), 1); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert_matches!( + virtual_overseer.try_recv().await, + None => {} + ); + + test_state + }) + }) + // Two should not send a DisputeParticiationMessage::Participate on restart since she is no + // validator in that dispute. + .resume(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + // Assert that subsystem is not sending Participation messages because we issued a local statement + assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none()); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn issue_valid_local_statement_does_cause_distribution_but_not_duplicate_participation() { + issue_local_statement_does_cause_distribution_but_not_duplicate_participation(true); +} + +#[test] +fn issue_invalid_local_statement_does_cause_distribution_but_not_duplicate_participation() { + issue_local_statement_does_cause_distribution_but_not_duplicate_participation(false); +} + +fn issue_local_statement_does_cause_distribution_but_not_duplicate_participation(validity: bool) { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + let other_vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + !validity, + ) + .await; + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![(other_vote, ValidatorIndex(1))], + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + + assert_eq!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); + + // Initiate dispute locally: + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::IssueLocalStatement( + session, + candidate_hash, + candidate_receipt.clone(), + validity, + ), + }) + .await; + + // Dispute distribution should get notified now: + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::DisputeDistribution( + DisputeDistributionMessage::SendDispute(msg) + ) => { + assert_eq!(msg.session_index(), session); + assert_eq!(msg.candidate_receipt(), &candidate_receipt); + } + ); + + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + // Make sure we won't participate: + assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none()); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn own_approval_vote_gets_distributed_on_dispute() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + let statement = test_state.issue_approval_vote_with_index( + ValidatorIndex(0), + candidate_hash, + session, + ); + + // Import our approval vote: + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![(statement, ValidatorIndex(0))], + pending_confirmation: None, + }, + }) + .await; + + // Trigger dispute: + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(2), + ValidatorIndex(1), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (invalid_vote, ValidatorIndex(1)), + (valid_vote, ValidatorIndex(2)), + ], + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + assert_eq!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); + + // Dispute distribution should get notified now (without participation, as we already + // have an approval vote): + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::DisputeDistribution( + DisputeDistributionMessage::SendDispute(msg) + ) => { + assert_eq!(msg.session_index(), session); + assert_eq!(msg.candidate_receipt(), &candidate_receipt); + } + ); + + // No participation should occur: + assert_matches!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await, None); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn negative_issue_local_statement_only_triggers_import() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_invalid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::IssueLocalStatement( + session, + candidate_hash, + candidate_receipt.clone(), + false, + ), + }) + .await; + + // Assert that subsystem is not participating. + assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none()); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert!(virtual_overseer.try_recv().await.is_none()); + + let backend = DbBackend::new( + test_state.db.clone(), + test_state.config.column_config(), + Metrics::default(), + ); + + let votes = backend.load_candidate_votes(session, &candidate_hash).unwrap().unwrap(); + assert_eq!(votes.invalid.len(), 1); + assert_eq!(votes.valid.len(), 0); + + let disputes = backend.load_recent_disputes().unwrap(); + assert_eq!(disputes, None); + + test_state + }) + }); +} + +#[test] +fn redundant_votes_ignored() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + let valid_vote = test_state + .issue_backing_statement_with_index(ValidatorIndex(1), candidate_hash, session) + .await; + + let valid_vote_2 = test_state + .issue_backing_statement_with_index(ValidatorIndex(1), candidate_hash, session) + .await; + + assert!(valid_vote.validator_signature() != valid_vote_2.validator_signature()); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![(valid_vote.clone(), ValidatorIndex(1))], + pending_confirmation: Some(tx), + }, + }) + .await; + + rx.await.unwrap(); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![(valid_vote_2, ValidatorIndex(1))], + pending_confirmation: Some(tx), + }, + }) + .await; + + rx.await.unwrap(); + + let backend = DbBackend::new( + test_state.db.clone(), + test_state.config.column_config(), + Metrics::default(), + ); + + let votes = backend.load_candidate_votes(session, &candidate_hash).unwrap().unwrap(); + assert_eq!(votes.invalid.len(), 0); + assert_eq!(votes.valid.len(), 1); + assert_eq!(&votes.valid[0].2, valid_vote.validator_signature()); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +/// Make sure no disputes are recorded when there are no opposing votes, even if we reached supermajority. +fn no_onesided_disputes() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + let mut statements = Vec::new(); + for index in 1..10 { + statements.push(( + test_state + .issue_backing_statement_with_index( + ValidatorIndex(index), + candidate_hash, + session, + ) + .await, + ValidatorIndex(index), + )); + } + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements, + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + assert_matches!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); + + // We should not have any active disputes now. + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert!(rx.await.unwrap().is_empty()); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // No more messages expected: + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +#[test] +fn refrain_from_participation() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + // activate leaf - no backing/included event + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + // generate two votes + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(1), + ValidatorIndex(2), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(1)), + (invalid_vote, ValidatorIndex(2)), + ], + pending_confirmation: None, + }, + }) + .await; + + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!(rx.await.unwrap().len(), 1); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 1); + assert_eq!(votes.invalid.len(), 1); + } + + // activate leaf - no backing event + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // confirm that no participation request is made. + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +/// We have got no `participation_for_backed_candidates` test because most of the other tests (e.g. +/// `dispute_gets_confirmed_via_participation`, `backing_statements_import_works_and_no_spam`) use +/// candidate backing event to trigger participation. If they pass - that case works. +#[test] +fn participation_for_included_candidates() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + // activate leaf - with candidate included event + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_included_event(candidate_receipt.clone())], + ) + .await; + + // generate two votes + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(1), + ValidatorIndex(2), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(1)), + (invalid_vote, ValidatorIndex(2)), + ], + pending_confirmation: None, + }, + }) + .await; + + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + participation_with_distribution( + &mut virtual_overseer, + &candidate_hash, + candidate_receipt.commitments_hash, + ) + .await; + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!(rx.await.unwrap().len(), 1); + + // check if we have participated (cast a vote) + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 2); // 2 => we have participated + assert_eq!(votes.invalid.len(), 1); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + test_state + }) + }); +} + +/// Shows that importing backing votes when a backing event is being processed +/// results in participation. +#[test] +fn local_participation_in_dispute_for_backed_candidate() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + // Step 1: Show that we don't participate when not backed, confirmed, or included + + // activate leaf - without candidate backed event + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, vec![]) + .await; + + // generate two votes + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(1), + ValidatorIndex(2), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(1)), + (invalid_vote, ValidatorIndex(2)), + ], + pending_confirmation: None, + }, + }) + .await; + + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + assert_matches!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await, None); + + // Step 2: Show that once backing votes are processed we participate + + // Activate leaf: With candidate backed event + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; + + let backing_valid = test_state + .issue_backing_statement_with_index(ValidatorIndex(3), candidate_hash, session) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![(backing_valid, ValidatorIndex(3))], + pending_confirmation: None, + }, + }) + .await; + + participation_with_distribution( + &mut virtual_overseer, + &candidate_hash, + candidate_receipt.commitments_hash, + ) + .await; + + // Check for our 1 active dispute + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!(rx.await.unwrap().len(), 1); + + // check if we have participated (casted a vote) + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.raw().len(), 3); // 3 => 1 initial vote, 1 backing vote, and our vote + assert_eq!(votes.invalid.len(), 1); + + // Wrap up + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + test_state + }) + }); +} + +/// Shows that when a candidate_included event is scraped from the chain we +/// reprioritize any participation requests pertaining to that candidate. +/// This involves moving the request for this candidate from the best effort +/// queue to the priority queue. +#[test] +fn participation_requests_reprioritized_for_newly_included() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + let mut receipts: Vec = Vec::new(); + + // Generate all receipts + for repetition in 1..=3u8 { + // Building candidate receipts + let mut candidate_receipt = make_valid_candidate_receipt(); + candidate_receipt.descriptor.pov_hash = Hash::from( + [repetition; 32], // Altering this receipt so its hash will be changed + ); + // Set consecutive parents (starting from zero). They will order the candidates for participation. + let parent_block_num: BlockNumber = repetition as BlockNumber - 1; + candidate_receipt.descriptor.relay_parent = + test_state.block_num_to_header.get(&parent_block_num).unwrap().clone(); + receipts.push(candidate_receipt.clone()); + } + + // Mark all candidates as backed, so their participation requests make it to best effort. + // These calls must all occur before including the candidates due to test overseer + // oddities. + let mut candidate_events = Vec::new(); + for r in receipts.iter() { + candidate_events.push(make_candidate_backed_event(r.clone())) + } + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, candidate_events) + .await; + + for (idx, candidate_receipt) in receipts.iter().enumerate() { + let candidate_hash = candidate_receipt.hash(); + + // Create votes for candidates + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(1), + ValidatorIndex(2), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + // Import votes for candidates + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(1)), + (invalid_vote, ValidatorIndex(2)), + ], + pending_confirmation: None, + }, + }) + .await; + + // Handle corresponding messages to unblock import + // we need to handle `ApprovalVotingMessage::GetApprovalSignaturesForCandidate` for import + handle_approval_vote_request( + &mut virtual_overseer, + &candidate_hash, + HashMap::new(), + ) + .await; + + // We'll trigger participation for the first `MAX_PARALLEL_PARTICIPATIONS` candidates. + // The rest will be queued => we need to handle `ChainApiMessage::BlockNumber` for them. + if idx >= crate::participation::MAX_PARALLEL_PARTICIPATIONS { + // We send the `idx` as parent block number, because it is used for ordering. + // This way we get predictable ordering and participation. + handle_get_block_number(&mut virtual_overseer, &test_state).await; + } + } + + // Generate included event for one of the candidates here + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 2, + vec![make_candidate_included_event( + receipts.last().expect("There is more than one candidate").clone(), + )], + ) + .await; + + // NB: The checks below are a bit racy. In theory candidate 2 can be processed even before candidate 0 and this is okay. If any + // of the asserts in the two functions after this comment fail -> rework `participation_with_distribution` to expect a set of + // commitment hashes instead of just one. + + // This is the candidate for which participation was started initially (`MAX_PARALLEL_PARTICIPATIONS` threshold was not yet hit) + participation_with_distribution( + &mut virtual_overseer, + &receipts.get(0).expect("There is more than one candidate").hash(), + receipts.first().expect("There is more than one candidate").commitments_hash, + ) + .await; + + // This one should have been prioritized + participation_with_distribution( + &mut virtual_overseer, + &receipts.get(2).expect("There is more than one candidate").hash(), + receipts.last().expect("There is more than one candidate").commitments_hash, + ) + .await; + + // And this is the last one + participation_with_distribution( + &mut virtual_overseer, + &receipts.get(1).expect("There is more than one candidate").hash(), + receipts.first().expect("There is more than one candidate").commitments_hash, + ) + .await; + + // Wrap up + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + test_state + }) + }); +} + +// When a dispute has concluded against a parachain block candidate we want to notify +// the chain selection subsystem. Then chain selection can revert the relay parents of +// the disputed candidate and mark all descendants as non-viable. This direct +// notification saves time compared to letting chain selection learn about a dispute +// conclusion from an on chain revert log. +#[test] +fn informs_chain_selection_when_dispute_concluded_against() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_invalid_candidate_receipt(); + let parent_1_number = 1; + let parent_2_number = 2; + + let candidate_hash = candidate_receipt.hash(); + + // Including test candidate in 2 different parent blocks + let block_1_header = Header { + parent_hash: test_state.last_block, + number: parent_1_number, + digest: dummy_digest(), + state_root: dummy_hash(), + extrinsics_root: dummy_hash(), + }; + let parent_1_hash = block_1_header.hash(); + + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + parent_1_number, + vec![make_candidate_included_event(candidate_receipt.clone())], + ) + .await; + + let block_2_header = Header { + parent_hash: test_state.last_block, + number: parent_2_number, + digest: dummy_digest(), + state_root: dummy_hash(), + extrinsics_root: dummy_hash(), + }; + let parent_2_hash = block_2_header.hash(); + + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + parent_2_number, + vec![make_candidate_included_event(candidate_receipt.clone())], + ) + .await; + + let supermajority_threshold = + polkadot_primitives::supermajority_threshold(test_state.validators.len()); + + let (valid_vote, invalid_vote) = generate_opposing_votes_pair( + &test_state, + ValidatorIndex(2), + ValidatorIndex(1), + candidate_hash, + session, + VoteType::Explicit, + ) + .await; + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(2)), + (invalid_vote, ValidatorIndex(1)), + ], + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + assert_matches!(confirmation_rx.await.unwrap(), + ImportStatementsResult::ValidImport => {} + ); + + // Use a different expected commitments hash to ensure the candidate validation returns invalid. + participation_with_distribution( + &mut virtual_overseer, + &candidate_hash, + CandidateCommitments::default().hash(), + ) + .await; + + let mut statements = Vec::new(); + // minus 2, because of local vote and one previously imported invalid vote. + for i in (0_u32..supermajority_threshold as u32 - 2).map(|i| i + 3) { + let vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(i), + candidate_hash, + session, + false, + ) + .await; + + statements.push((vote, ValidatorIndex(i as _))); + } + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements, + pending_confirmation: None, + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + // Checking that concluded dispute has signaled the reversion of all parent blocks. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ChainSelection( + ChainSelectionMessage::RevertBlocks(revert_set) + ) => { + assert!(revert_set.contains(&(parent_1_number, parent_1_hash))); + assert!(revert_set.contains(&(parent_2_number, parent_2_hash))); + }, + "Overseer did not receive `ChainSelectionMessage::RevertBlocks` message" + ); + + // Wrap up + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert_matches!( + virtual_overseer.try_recv().await, + None => {} + ); + + test_state + }) + }); +}