Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Merge remote-tracking branch 'origin/master' into anton/8466-add_know…
Browse files Browse the repository at this point in the history
…n_address-never-cleaned-up
  • Loading branch information
parity-processbot committed Jan 13, 2023
2 parents e231f4b + 2bfc1dd commit 69d4959
Show file tree
Hide file tree
Showing 124 changed files with 1,947 additions and 2,150 deletions.
46 changes: 42 additions & 4 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,23 +48,39 @@ variables:
CARGO_INCREMENTAL: 0
DOCKER_OS: "debian:stretch"
ARCH: "x86_64"
CI_IMAGE: "paritytech/ci-linux:production"
CI_IMAGE: "paritytech/ci-linux@sha256:9140bc3c843a8b12a3bcf6f5886346536092795bbadfd7f1836362cb28dfcc71"
BUILDAH_IMAGE: "quay.io/buildah/stable:v1.27"
RUSTY_CACHIER_SINGLE_BRANCH: master
RUSTY_CACHIER_DONT_OPERATE_ON_MAIN_BRANCH: "true"
RUSTY_CACHIER_COMPRESSION_METHOD: zstd
NEXTEST_FAILURE_OUTPUT: immediate-final
NEXTEST_SUCCESS_OUTPUT: final
ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.22"

default:
.shared-default: &shared-default
retry:
max: 2
when:
- runner_system_failure
- unknown_failure
- api_failure
interruptible: true
cache: {}

.default-pipeline-definitions:
default:
<<: *shared-default
interruptible: true

.crate-publishing-pipeline-definitions:
default:
<<: *shared-default
# The crate-publishing pipeline defaults to `interruptible: false` so that we'll be able to
# reach and run the publishing jobs despite the "Auto-cancel redundant pipelines" CI setting.
# The setting is relevant because the crate-publishing pipeline runs on `master`, thus future
# pipelines on `master` (e.g. created for new commits or other schedules) might unintendedly
# cancel the publishing jobs or its dependencies before we get to actually publish the crates.
interruptible: false

.collect-artifacts:
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
Expand Down Expand Up @@ -213,9 +229,15 @@ default:
# this job runs only on nightly pipeline with the mentioned variable, against `master` branch
- if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "nightly"

.crate-publishing-pipeline:
rules:
- if: $CI_COMMIT_REF_NAME != "master"
when: never

.scheduled-crate-publishing-pipeline:
rules:
- if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "automatic-crate-publishing"
- !reference [.crate-publishing-pipeline, rules]
- if: $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "automatic-crate-publishing"

.crates-publishing-template:
stage: test
Expand Down Expand Up @@ -263,6 +285,22 @@ include:
- scripts/ci/gitlab/pipeline/publish.yml
# zombienet jobs
- scripts/ci/gitlab/pipeline/zombienet.yml
# The crate-publishing pipeline requires a customized `interruptible` configuration. Unfortunately
# `interruptible` can't currently be dynamically set based on variables as per:
# - https://gitlab.com/gitlab-org/gitlab/-/issues/38349
# - https://gitlab.com/gitlab-org/gitlab/-/issues/194023
# Thus we work around that limitation by using conditional includes.
# For crate-publishing pipelines: run it with defaults + `interruptible: false`. The WHOLE
# pipeline is made uninterruptible to ensure that test jobs also get a chance to run to
# completion, because the publishing jobs depends on them AS INTENDED: crates should not be
# published before their source code is checked.
- local: scripts/ci/gitlab/crate-publishing-pipeline.yml
rules:
- if: $PIPELINE == "automatic-crate-publishing"
# For normal pipelines: run it with defaults + `interruptible: true`
- local: scripts/ci/gitlab/default-pipeline.yml
rules:
- if: $PIPELINE != "automatic-crate-publishing"

#### stage: deploy

Expand Down
16 changes: 8 additions & 8 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion bin/node-template/runtime/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ frame-system-benchmarking = { version = "4.0.0-dev", default-features = false, p
pallet-template = { version = "4.0.0-dev", default-features = false, path = "../pallets/template" }

[build-dependencies]
substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" }
substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder", optional = true }

[features]
default = ["std"]
Expand Down Expand Up @@ -86,6 +86,7 @@ std = [
"sp-std/std",
"sp-transaction-pool/std",
"sp-version/std",
"substrate-wasm-builder",
]
runtime-benchmarks = [
"frame-benchmarking/runtime-benchmarks",
Expand Down
15 changes: 8 additions & 7 deletions bin/node-template/runtime/build.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
use substrate_wasm_builder::WasmBuilder;

fn main() {
WasmBuilder::new()
.with_current_project()
.export_heap_base()
.import_memory()
.build()
#[cfg(feature = "std")]
{
substrate_wasm_builder::WasmBuilder::new()
.with_current_project()
.export_heap_base()
.import_memory()
.build();
}
}
3 changes: 2 additions & 1 deletion bin/node/runtime/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ pallet-vesting = { version = "4.0.0-dev", default-features = false, path = "../.
pallet-whitelist = { version = "4.0.0-dev", default-features = false, path = "../../../frame/whitelist" }

[build-dependencies]
substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" }
substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder", optional = true }

[features]
default = ["std"]
Expand Down Expand Up @@ -205,6 +205,7 @@ std = [
"sp-io/std",
"pallet-child-bounties/std",
"pallet-alliance/std",
"substrate-wasm-builder",
]
runtime-benchmarks = [
"frame-benchmarking/runtime-benchmarks",
Expand Down
15 changes: 8 additions & 7 deletions bin/node/runtime/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.

use substrate_wasm_builder::WasmBuilder;

fn main() {
WasmBuilder::new()
.with_current_project()
.export_heap_base()
.import_memory()
.build()
#[cfg(feature = "std")]
{
substrate_wasm_builder::WasmBuilder::new()
.with_current_project()
.export_heap_base()
.import_memory()
.build();
}
}
4 changes: 2 additions & 2 deletions client/beefy/src/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ where
warn!(target: "beefy", "🥩 Buffer vote dropped for round: {:?}", block_num)
}
} else {
error!(target: "beefy", "🥩 Buffer justification dropped for round: {:?}.", block_num);
warn!(target: "beefy", "🥩 Buffer vote dropped for round: {:?}.", block_num);
}
},
RoundAction::Drop => (),
Expand Down Expand Up @@ -528,7 +528,7 @@ where
if self.pending_justifications.len() < MAX_BUFFERED_JUSTIFICATIONS {
self.pending_justifications.entry(block_num).or_insert(justification);
} else {
error!(target: "beefy", "🥩 Buffer justification dropped for round: {:?}.", block_num);
warn!(target: "beefy", "🥩 Buffer justification dropped for round: {:?}.", block_num);
}
},
RoundAction::Drop => (),
Expand Down
51 changes: 29 additions & 22 deletions client/consensus/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ use sp_api::{ApiExt, ProvideRuntimeApi};
use sp_application_crypto::AppKey;
use sp_block_builder::BlockBuilder as BlockBuilderApi;
use sp_blockchain::{
Backend as _, Error as ClientError, ForkBackend, HeaderBackend, HeaderMetadata,
Backend as _, BlockStatus, Error as ClientError, ForkBackend, HeaderBackend, HeaderMetadata,
Result as ClientResult,
};
use sp_consensus::{
Expand Down Expand Up @@ -1133,11 +1133,17 @@ where
let hash = block.header.hash();
let parent_hash = *block.header.parent_hash();

if block.with_state() {
// When importing whole state we don't calculate epoch descriptor, but rather
// read it from the state after import. We also skip all verifications
// because there's no parent state and we trust the sync module to verify
// that the state is correct and finalized.
let info = self.client.info();
let number = *block.header.number();

if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || block.with_state() {
// Verification for imported blocks is skipped in two cases:
// 1. When importing blocks below the last finalized block during network initial
// synchronization.
// 2. When importing whole state we don't calculate epoch descriptor, but rather
// read it from the state after import. We also skip all verifications
// because there's no parent state and we trust the sync module to verify
// that the state is correct and finalized.
return Ok((block, Default::default()))
}

Expand Down Expand Up @@ -1399,18 +1405,24 @@ where
) -> Result<ImportResult, Self::Error> {
let hash = block.post_hash();
let number = *block.header.number();
let info = self.client.info();

// early exit if block already in chain, otherwise the check for
// epoch changes will error when trying to re-import an epoch change
match self.client.status(hash) {
Ok(sp_blockchain::BlockStatus::InChain) => {
// When re-importing existing block strip away intermediates.
let _ = block.remove_intermediate::<BabeIntermediate<Block>>(INTERMEDIATE_KEY);
block.fork_choice = Some(ForkChoiceStrategy::Custom(false));
return self.inner.import_block(block, new_cache).await.map_err(Into::into)
},
Ok(sp_blockchain::BlockStatus::Unknown) => {},
Err(e) => return Err(ConsensusError::ClientImport(e.to_string())),
let block_status = self
.client
.status(hash)
.map_err(|e| ConsensusError::ClientImport(e.to_string()))?;

// Skip babe logic if block already in chain or importing blocks during initial sync,
// otherwise the check for epoch changes will error because trying to re-import an
// epoch change or because of missing epoch data in the tree, respectivelly.
if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) ||
block_status == BlockStatus::InChain
{
// When re-importing existing block strip away intermediates.
// In case of initial sync intermediates should not be present...
let _ = block.remove_intermediate::<BabeIntermediate<Block>>(INTERMEDIATE_KEY);
block.fork_choice = Some(ForkChoiceStrategy::Custom(false));
return self.inner.import_block(block, new_cache).await.map_err(Into::into)
}

if block.with_state() {
Expand Down Expand Up @@ -1506,8 +1518,6 @@ where
)),
}

let info = self.client.info();

if let Some(next_epoch_descriptor) = next_epoch_digest {
old_epoch_changes = Some((*epoch_changes).clone());

Expand Down Expand Up @@ -1701,9 +1711,6 @@ where
Client: HeaderBackend<Block> + HeaderMetadata<Block, Error = sp_blockchain::Error>,
{
let info = client.info();
if info.block_gap.is_none() {
epoch_changes.clear_gap();
}

let finalized_slot = {
let finalized_header = client
Expand Down
Loading

0 comments on commit 69d4959

Please sign in to comment.