diff --git a/Cargo.lock b/Cargo.lock index 05386203452c..77a71df95913 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4360,7 +4360,7 @@ dependencies = [ "async-trait", "cumulus-primitives-core", "futures", - "jsonrpsee-core", + "jsonrpsee-core 0.23.2", "parity-scale-codec", "polkadot-overseer", "sc-client-api", @@ -4415,7 +4415,7 @@ dependencies = [ "either", "futures", "futures-timer", - "jsonrpsee", + "jsonrpsee 0.23.2", "parity-scale-codec", "pin-project", "polkadot-overseer", @@ -4561,7 +4561,7 @@ dependencies = [ "frame-system", "frame-system-rpc-runtime-api", "futures", - "jsonrpsee", + "jsonrpsee 0.23.2", "pallet-timestamp", "pallet-transaction-payment", "parachains-common", @@ -5983,7 +5983,7 @@ version = "0.35.0" dependencies = [ "futures", "indicatif", - "jsonrpsee", + "jsonrpsee 0.23.2", "log", "parity-scale-codec", "serde", @@ -6501,6 +6501,19 @@ version = "0.3.1" source = "registry+/~https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "globset" +version = "0.4.13" +source = "registry+/~https://github.com/rust-lang/crates.io-index" +checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" +dependencies = [ + "aho-corasick", + "bstr", + "fnv", + "log", + "regex", +] + [[package]] name = "gloo-timers" version = "0.2.6" @@ -7397,17 +7410,28 @@ dependencies = [ "serde_json", ] +[[package]] +name = "jsonrpsee" +version = "0.16.3" +source = "registry+/~https://github.com/rust-lang/crates.io-index" +checksum = "367a292944c07385839818bb71c8d76611138e2dedb0677d035b8da21d29c78b" +dependencies = [ + "jsonrpsee-core 0.16.3", + "jsonrpsee-server 0.16.3", + "jsonrpsee-types 0.16.3", +] + [[package]] name = "jsonrpsee" version = "0.23.2" source = "registry+/~https://github.com/rust-lang/crates.io-index" checksum = "62b089779ad7f80768693755a031cc14a7766aba707cbe886674e3f79e9b7e47" dependencies = [ - "jsonrpsee-core", + "jsonrpsee-core 0.23.2", "jsonrpsee-http-client", "jsonrpsee-proc-macros", - "jsonrpsee-server", - "jsonrpsee-types", + "jsonrpsee-server 0.23.2", + "jsonrpsee-types 0.23.2", "jsonrpsee-ws-client", "tokio", "tracing", @@ -7422,7 +7446,7 @@ dependencies = [ "base64 0.22.1", "futures-util", "http 1.1.0", - "jsonrpsee-core", + "jsonrpsee-core 0.23.2", "pin-project", "rustls 0.23.10", "rustls-pki-types", @@ -7436,6 +7460,32 @@ dependencies = [ "url", ] +[[package]] +name = "jsonrpsee-core" +version = "0.16.3" +source = "registry+/~https://github.com/rust-lang/crates.io-index" +checksum = "2b5dde66c53d6dcdc8caea1874a45632ec0fcf5b437789f1e45766a1512ce803" +dependencies = [ + "anyhow", + "arrayvec 0.7.4", + "async-trait", + "beef", + "futures-channel", + "futures-util", + "globset", + "hyper 0.14.29", + "jsonrpsee-types 0.16.3", + "parking_lot 0.12.3", + "rand", + "rustc-hash", + "serde", + "serde_json", + "soketto 0.7.1", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "jsonrpsee-core" version = "0.23.2" @@ -7451,7 +7501,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "jsonrpsee-types", + "jsonrpsee-types 0.23.2", "parking_lot 0.12.3", "pin-project", "rand", @@ -7476,8 +7526,8 @@ dependencies = [ "hyper 1.3.1", "hyper-rustls 0.27.2", "hyper-util", - "jsonrpsee-core", - "jsonrpsee-types", + "jsonrpsee-core 0.23.2", + "jsonrpsee-types 0.23.2", "rustls 0.23.10", "rustls-platform-verifier", "serde", @@ -7502,6 +7552,28 @@ dependencies = [ "syn 2.0.61", ] +[[package]] +name = "jsonrpsee-server" +version = "0.16.3" +source = "registry+/~https://github.com/rust-lang/crates.io-index" +checksum = "cf4d945a6008c9b03db3354fb3c83ee02d2faa9f2e755ec1dfb69c3551b8f4ba" +dependencies = [ + "futures-channel", + "futures-util", + "http 0.2.9", + "hyper 0.14.29", + "jsonrpsee-core 0.16.3", + "jsonrpsee-types 0.16.3", + "serde", + "serde_json", + "soketto 0.7.1", + "tokio", + "tokio-stream", + "tokio-util", + "tower", + "tracing", +] + [[package]] name = "jsonrpsee-server" version = "0.23.2" @@ -7515,8 +7587,8 @@ dependencies = [ "http-body-util", "hyper 1.3.1", "hyper-util", - "jsonrpsee-core", - "jsonrpsee-types", + "jsonrpsee-core 0.23.2", + "jsonrpsee-types 0.23.2", "pin-project", "route-recognizer", "serde", @@ -7530,6 +7602,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "jsonrpsee-types" +version = "0.16.3" +source = "registry+/~https://github.com/rust-lang/crates.io-index" +checksum = "245ba8e5aa633dd1c1e4fae72bce06e71f42d34c14a2767c6b4d173b57bee5e5" +dependencies = [ + "anyhow", + "beef", + "serde", + "serde_json", + "thiserror", + "tracing", +] + [[package]] name = "jsonrpsee-types" version = "0.23.2" @@ -7551,8 +7637,8 @@ checksum = "1c28759775f5cb2f1ea9667672d3fe2b0e701d1f4b7b67954e60afe7fd058b5e" dependencies = [ "http 1.1.0", "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-types", + "jsonrpsee-core 0.23.2", + "jsonrpsee-types 0.23.2", "url", ] @@ -8733,7 +8819,7 @@ dependencies = [ "docify", "futures", "futures-timer", - "jsonrpsee", + "jsonrpsee 0.23.2", "minimal-template-runtime", "polkadot-sdk-frame", "sc-basic-authorship", @@ -8851,7 +8937,7 @@ dependencies = [ name = "mmr-rpc" version = "28.0.0" dependencies = [ - "jsonrpsee", + "jsonrpsee 0.23.2", "parity-scale-codec", "serde", "serde_json", @@ -9265,7 +9351,7 @@ dependencies = [ name = "node-rpc" version = "3.0.0-dev" dependencies = [ - "jsonrpsee", + "jsonrpsee 0.23.2", "mmr-rpc", "node-primitives", "pallet-transaction-payment-rpc", @@ -9306,6 +9392,91 @@ dependencies = [ "kitchensink-runtime", ] +[[package]] +name = "node-sassafras" +version = "0.3.4-dev" +dependencies = [ + "clap 4.5.11", + "frame-benchmarking", + "frame-benchmarking-cli", + "frame-system", + "futures", + "jsonrpsee 0.16.3", + "node-sassafras-runtime", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-grandpa", + "sc-consensus-sassafras", + "sc-consensus-slots", + "sc-executor", + "sc-keystore", + "sc-network", + "sc-offchain", + "sc-rpc", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "serde_json", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-grandpa", + "sp-consensus-sassafras", + "sp-consensus-slots", + "sp-core", + "sp-crypto-ec-utils 0.10.0", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", +] + +[[package]] +name = "node-sassafras-runtime" +version = "0.3.4-dev" +dependencies = [ + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "pallet-balances", + "pallet-grandpa", + "pallet-sassafras", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-sassafras", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 14.0.0", + "sp-storage 19.0.0", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder", +] + [[package]] name = "node-template-release" version = "3.0.0" @@ -11484,10 +11655,12 @@ name = "pallet-sassafras" version = "0.3.5-dev" dependencies = [ "array-bytes", + "env_logger 0.11.3", "frame-benchmarking", "frame-support", "frame-system", "log", + "pallet-session", "parity-scale-codec", "scale-info", "sp-consensus-sassafras", @@ -11788,7 +11961,7 @@ dependencies = [ name = "pallet-transaction-payment-rpc" version = "30.0.0" dependencies = [ - "jsonrpsee", + "jsonrpsee 0.23.2", "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "sp-api", @@ -12047,7 +12220,7 @@ dependencies = [ "frame-benchmarking", "frame-benchmarking-cli", "futures", - "jsonrpsee", + "jsonrpsee 0.23.2", "log", "pallet-transaction-payment-rpc", "parachain-template-runtime", @@ -13878,7 +14051,7 @@ dependencies = [ "futures", "glutton-westend-runtime", "hex-literal", - "jsonrpsee", + "jsonrpsee 0.23.2", "log", "nix 0.28.0", "pallet-transaction-payment", @@ -13996,7 +14169,7 @@ dependencies = [ name = "polkadot-rpc" version = "7.0.0" dependencies = [ - "jsonrpsee", + "jsonrpsee 0.23.2", "mmr-rpc", "pallet-transaction-payment-rpc", "polkadot-primitives", @@ -16174,7 +16347,7 @@ dependencies = [ "finality-relay", "frame-support", "futures", - "jsonrpsee", + "jsonrpsee 0.23.2", "log", "num-traits", "pallet-transaction-payment", @@ -17332,7 +17505,7 @@ name = "sc-consensus-babe-rpc" version = "0.34.0" dependencies = [ "futures", - "jsonrpsee", + "jsonrpsee 0.23.2", "sc-consensus", "sc-consensus-babe", "sc-consensus-epochs", @@ -17404,7 +17577,7 @@ name = "sc-consensus-beefy-rpc" version = "13.0.0" dependencies = [ "futures", - "jsonrpsee", + "jsonrpsee 0.23.2", "log", "parity-scale-codec", "parking_lot 0.12.3", @@ -17489,7 +17662,7 @@ version = "0.19.0" dependencies = [ "finality-grandpa", "futures", - "jsonrpsee", + "jsonrpsee 0.23.2", "log", "parity-scale-codec", "sc-block-builder", @@ -17515,7 +17688,7 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "jsonrpsee", + "jsonrpsee 0.23.2", "log", "parity-scale-codec", "sc-basic-authorship", @@ -17569,6 +17742,45 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-consensus-sassafras" +version = "0.3.4-dev" +dependencies = [ + "async-trait", + "env_logger 0.10.1", + "fork-tree", + "futures", + "log", + "parity-scale-codec", + "parking_lot 0.12.3", + "sc-block-builder", + "sc-client-api", + "sc-consensus", + "sc-consensus-epochs", + "sc-consensus-slots", + "sc-keystore", + "sc-network-test", + "sc-telemetry", + "sc-transaction-pool-api", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-consensus-slots", + "sp-core", + "sp-inherents", + "sp-keyring", + "sp-keystore", + "sp-runtime", + "sp-timestamp", + "substrate-prometheus-endpoint", + "substrate-test-runtime-client", + "thiserror", + "tokio", +] + [[package]] name = "sc-consensus-slots" version = "0.33.0" @@ -18044,7 +18256,7 @@ dependencies = [ "assert_matches", "env_logger 0.11.3", "futures", - "jsonrpsee", + "jsonrpsee 0.23.2", "log", "parity-scale-codec", "parking_lot 0.12.3", @@ -18083,7 +18295,7 @@ dependencies = [ name = "sc-rpc-api" version = "0.33.0" dependencies = [ - "jsonrpsee", + "jsonrpsee 0.23.2", "parity-scale-codec", "sc-chain-spec", "sc-mixnet", @@ -18109,7 +18321,7 @@ dependencies = [ "http-body-util", "hyper 1.3.1", "ip_network", - "jsonrpsee", + "jsonrpsee 0.23.2", "log", "serde", "serde_json", @@ -18128,7 +18340,7 @@ dependencies = [ "futures", "futures-util", "hex", - "jsonrpsee", + "jsonrpsee 0.23.2", "log", "parity-scale-codec", "parking_lot 0.12.3", @@ -18182,7 +18394,7 @@ dependencies = [ "exit-future", "futures", "futures-timer", - "jsonrpsee", + "jsonrpsee 0.23.2", "log", "parity-scale-codec", "parking_lot 0.12.3", @@ -18320,7 +18532,7 @@ dependencies = [ name = "sc-sync-state-rpc" version = "0.34.0" dependencies = [ - "jsonrpsee", + "jsonrpsee 0.23.2", "parity-scale-codec", "sc-chain-spec", "sc-client-api", @@ -19704,6 +19916,7 @@ dependencies = [ "base64 0.13.1", "bytes", "futures", + "http 0.2.9", "httparse", "log", "rand", @@ -19735,7 +19948,7 @@ dependencies = [ "frame-metadata-hash-extension", "frame-system", "futures", - "jsonrpsee", + "jsonrpsee 0.23.2", "pallet-transaction-payment", "pallet-transaction-payment-rpc", "sc-basic-authorship", @@ -20077,6 +20290,7 @@ dependencies = [ "sp-application-crypto", "sp-consensus-slots", "sp-core", + "sp-inherents", "sp-runtime", ] @@ -20936,7 +21150,7 @@ dependencies = [ "clap_complete", "criterion", "futures", - "jsonrpsee", + "jsonrpsee 0.23.2", "kitchensink-runtime", "log", "nix 0.28.0", @@ -21284,7 +21498,7 @@ version = "29.0.0" dependencies = [ "frame-support", "frame-system", - "jsonrpsee", + "jsonrpsee 0.23.2", "parity-scale-codec", "sc-rpc-api", "scale-info", @@ -21303,7 +21517,7 @@ dependencies = [ "docify", "frame-system-rpc-runtime-api", "futures", - "jsonrpsee", + "jsonrpsee 0.23.2", "log", "parity-scale-codec", "sc-rpc-api", @@ -21380,7 +21594,7 @@ name = "substrate-rpc-client" version = "0.33.0" dependencies = [ "async-trait", - "jsonrpsee", + "jsonrpsee 0.23.2", "log", "sc-rpc-api", "serde", @@ -21393,7 +21607,7 @@ dependencies = [ name = "substrate-state-trie-migration-rpc" version = "27.0.0" dependencies = [ - "jsonrpsee", + "jsonrpsee 0.23.2", "parity-scale-codec", "sc-client-api", "sc-rpc-api", @@ -21446,6 +21660,7 @@ dependencies = [ "log", "pallet-babe", "pallet-balances", + "pallet-sassafras", "pallet-timestamp", "parity-scale-codec", "sc-block-builder", @@ -21463,6 +21678,7 @@ dependencies = [ "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-grandpa", + "sp-consensus-sassafras", "sp-core", "sp-crypto-hashing", "sp-externalities 0.25.0", @@ -21496,6 +21712,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-crypto-ec-utils 0.10.0", "sp-runtime", "substrate-test-client", "substrate-test-runtime", diff --git a/Cargo.toml b/Cargo.toml index e07ec97be3a2..6434a659b156 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -235,6 +235,8 @@ members = [ "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", + "substrate/bin/node-sassafras/node", + "substrate/bin/node-sassafras/runtime", "substrate/bin/node/bench", "substrate/bin/node/cli", "substrate/bin/node/inspect", @@ -263,6 +265,7 @@ members = [ "substrate/client/consensus/grandpa/rpc", "substrate/client/consensus/manual-seal", "substrate/client/consensus/pow", + "substrate/client/consensus/sassafras", "substrate/client/consensus/slots", "substrate/client/db", "substrate/client/executor", @@ -1431,3 +1434,17 @@ wasmi = { opt-level = 3 } x25519-dalek = { opt-level = 3 } yamux = { opt-level = 3 } zeroize = { opt-level = 3 } + +# [patch."/~https://github.com/w3f/ring-vrf"] +# # bandersnatch_vrfs = { git = "/~https://github.com/davxy/ring-vrf", branch = "davxy-patch" } +# bandersnatch_vrfs = { path = "../../w3f/ring-vrf/bandersnatch_vrfs" } + +# [patch."/~https://github.com/w3f/ring-proof"] +# # ring = { git = "/~https://github.com/davxy/ring-proof", branch = "davxy-patch" } +# # common = { git = "/~https://github.com/davxy/ring-proof", branch = "davxy-patch" } +# ring = { path = "../../w3f/ring-proof/ring" } +# common = { path = "../../w3f/ring-proof/common" } + +# [patch."/~https://github.com/w3f/fflonk"] +# # fflonk = { git = "/~https://github.com/davxy/fflonk", branch = "davxy-patch" } +# fflonk = { path = "../../w3f/fflonk" } diff --git a/benchmark.sh b/benchmark.sh new file mode 100755 index 000000000000..d1871a43fdf7 --- /dev/null +++ b/benchmark.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +binary="./target/release/node-sassafras" + +steps=20 +repeat=3 + +export RUST_LOG="sassafras=debug" + +pallet='pallet_sassafras' + +extrinsic=$1 + +if [[ $extrinsic == "" ]]; then + list=$($binary benchmark pallet --list | grep $pallet | cut -d ',' -f 2) + + echo "Usage: $0 " + echo "" + echo "Available benchmarks:" + for bench in $list; do + echo "- $bench" + done + echo "- all" + exit +fi + +if [[ $extrinsic == "all" ]]; then + extrinsic='*' +fi + +$binary benchmark pallet \ + --chain dev \ + --pallet $pallet \ + --extrinsic "$extrinsic" \ + --steps $steps \ + --repeat $repeat \ + --output weights.rs \ + --template substrate/.maintain/frame-weight-template.hbs diff --git a/substrate/bin/node-sassafras/.editorconfig b/substrate/bin/node-sassafras/.editorconfig new file mode 100644 index 000000000000..5adac74ca24b --- /dev/null +++ b/substrate/bin/node-sassafras/.editorconfig @@ -0,0 +1,16 @@ +root = true + +[*] +indent_style=space +indent_size=2 +tab_width=2 +end_of_line=lf +charset=utf-8 +trim_trailing_whitespace=true +insert_final_newline = true + +[*.{rs,toml}] +indent_style=tab +indent_size=tab +tab_width=4 +max_line_length=100 diff --git a/substrate/bin/node-sassafras/node/Cargo.toml b/substrate/bin/node-sassafras/node/Cargo.toml new file mode 100644 index 000000000000..b2962e3ec113 --- /dev/null +++ b/substrate/bin/node-sassafras/node/Cargo.toml @@ -0,0 +1,84 @@ +[package] +name = "node-sassafras" +version = "0.3.4-dev" +authors = ["Davide Galassi ", "Parity Technologies "] +description = "Node testbed for Sassafras consensus." +homepage = "https://substrate.io/" +edition = "2021" +license = "Unlicense" +publish = false +build = "build.rs" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[[bin]] +name = "node-sassafras" + +[dependencies] +clap = { version = "4.0.9", features = ["derive"] } +futures = { version = "0.3.21", features = ["thread-pool"] } +serde_json = "1.0.108" + +sc-cli = { path = "../../../client/cli" } +sp-core = { path = "../../../primitives/core" } +sc-executor = { path = "../../../client/executor" } +sc-network = { path = "../../../client/network" } +sc-service = { path = "../../../client/service" } +sc-telemetry = { path = "../../../client/telemetry" } +sc-keystore = { path = "../../../client/keystore" } +sc-transaction-pool = { path = "../../../client/transaction-pool" } +sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } +sc-offchain = { path = "../../../client/offchain" } +sc-consensus-sassafras = { path = "../../../client/consensus/sassafras" } +sp-consensus-sassafras = { path = "../../../primitives/consensus/sassafras" } +sp-consensus = { path = "../../../primitives/consensus/common" } +sc-consensus = { path = "../../../client/consensus/common" } +sc-consensus-grandpa = { path = "../../../client/consensus/grandpa" } +sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } +sc-client-api = { path = "../../../client/api" } +sp-runtime = { path = "../../../primitives/runtime" } +sp-io = { path = "../../../primitives/io" } +sp-timestamp = { path = "../../../primitives/timestamp" } +sp-inherents = { path = "../../../primitives/inherents" } +sp-keyring = { path = "../../../primitives/keyring" } +frame-system = { path = "../../../frame/system" } +pallet-transaction-payment = { path = "../../../frame/transaction-payment" } +sp-crypto-ec-utils = { path = "../../../primitives/crypto/ec-utils", features = ["bls12-381", "ed-on-bls12-381-bandersnatch"] } +sp-consensus-slots = { path = "../../../primitives/consensus/slots" } +sc-consensus-slots = { path = "../../../client/consensus/slots" } + +# These dependencies are used for the node template's RPCs +jsonrpsee = { version = "0.16.2", features = ["server"] } +sc-rpc = { path = "../../../client/rpc" } +sp-api = { path = "../../../primitives/api" } +sc-rpc-api = { path = "../../../client/rpc-api" } +sp-blockchain = { path = "../../../primitives/blockchain" } +sp-block-builder = { path = "../../../primitives/block-builder" } +sc-basic-authorship = { path = "../../../client/basic-authorship" } +substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } +pallet-transaction-payment-rpc = { path = "../../../frame/transaction-payment/rpc/" } + +# These dependencies are used for runtime benchmarking +frame-benchmarking = { path = "../../../frame/benchmarking" } +frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli" } + +# Local Dependencies +node-sassafras-runtime = { path = "../runtime" } + +[build-dependencies] +substrate-build-script-utils = { path = "../../../utils/build-script-utils" } + +[features] +default = [] +runtime-benchmarks = [ + "frame-benchmarking-cli/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "node-sassafras-runtime/runtime-benchmarks", + "sc-service/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +use-session-pallet = [ + "node-sassafras-runtime/use-session-pallet", +] diff --git a/substrate/bin/node-sassafras/node/build.rs b/substrate/bin/node-sassafras/node/build.rs new file mode 100644 index 000000000000..e3bfe3116bf2 --- /dev/null +++ b/substrate/bin/node-sassafras/node/build.rs @@ -0,0 +1,7 @@ +use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; + +fn main() { + generate_cargo_keys(); + + rerun_if_git_head_changed(); +} diff --git a/substrate/bin/node-sassafras/node/src/chain_spec.rs b/substrate/bin/node-sassafras/node/src/chain_spec.rs new file mode 100644 index 000000000000..faed562835e4 --- /dev/null +++ b/substrate/bin/node-sassafras/node/src/chain_spec.rs @@ -0,0 +1,162 @@ +#[cfg(feature = "use-session-pallet")] +use node_sassafras_runtime::SessionKeys; +use node_sassafras_runtime::{AccountId, RuntimeGenesisConfig, Signature, WASM_BINARY}; +use sc_service::ChainType; +use sp_consensus_grandpa::AuthorityId as GrandpaId; +use sp_consensus_sassafras::AuthorityId as SassafrasId; +use sp_core::{sr25519, Pair, Public}; +use sp_runtime::traits::{IdentifyAccount, Verify}; + +// Genesis constants for Sassafras parameters configuration. +const SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER: u32 = 8; +const SASSAFRAS_TICKETS_REDUNDANCY_FACTOR: u32 = 1; + +/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. +/// Ec-utils host functions required to construct the test `RingContext` instance. +pub type ChainSpec = sc_service::GenericChainSpec< + RuntimeGenesisConfig, + Option<()>, + ( + sp_crypto_ec_utils::bls12_381::host_calls::HostFunctions, + sp_crypto_ec_utils::ed_on_bls12_381_bandersnatch::host_calls::HostFunctions, + ), +>; + +/// Generate a crypto pair from seed. +pub fn get_from_seed(seed: &str) -> ::Public { + TPublic::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() +} + +type AccountPublic = ::Signer; + +/// Generate an account id from seed. +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, +{ + AccountPublic::from(get_from_seed::(seed)).into_account() +} + +/// Generate authority account id and keys from seed. +pub fn authority_keys_from_seed(seed: &str) -> (AccountId, SassafrasId, GrandpaId) { + ( + get_account_id_from_seed::(seed), + get_from_seed::(seed), + get_from_seed::(seed), + ) +} + +pub fn development_config() -> Result { + Ok(ChainSpec::builder( + WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?, + None, + ) + .with_name("Development") + .with_id("dev") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(testnet_genesis( + vec![authority_keys_from_seed("Alice")], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + )) + .build()) +} + +pub fn local_testnet_config() -> Result { + Ok(ChainSpec::builder( + WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?, + None, + ) + .with_name("Local Testnet") + .with_id("local_testnet") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(testnet_genesis( + // Initial PoA authorities + vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], + // Sudo account + get_account_id_from_seed::("Alice"), + // Pre-funded accounts + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + )) + .build()) +} + +#[cfg(feature = "use-session-pallet")] +fn testnet_genesis( + initial_authorities: Vec<(AccountId, SassafrasId, GrandpaId)>, + root_key: AccountId, + endowed_accounts: Vec, +) -> serde_json::Value { + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().cloned().map(|k| (k, 1u64 << 60)).collect::>(), + }, + "sassafras": { + "epochConfig": sp_consensus_sassafras::EpochConfiguration { + attempts_number: SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER, + redundancy_factor: SASSAFRAS_TICKETS_REDUNDANCY_FACTOR, + }, + }, + "session": { + "keys": initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + SessionKeys { sassafras: x.1.clone(), grandpa: x.2.clone() }, + ) + }) + .collect::>(), + }, + "sudo": { + "key": Some(root_key), + }, + }) +} + +#[cfg(not(feature = "use-session-pallet"))] +fn testnet_genesis( + initial_authorities: Vec<(AccountId, SassafrasId, GrandpaId)>, + root_key: AccountId, + endowed_accounts: Vec, +) -> serde_json::Value { + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().cloned().map(|k| (k, 1u64 << 60)).collect::>(), + }, + "sassafras": { + "authorities": initial_authorities.iter().map(|x| x.1.clone()).collect::>(), + "epochConfig": sp_consensus_sassafras::EpochConfiguration { + attempts_number: SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER, + redundancy_factor: SASSAFRAS_TICKETS_REDUNDANCY_FACTOR, + }, + }, + "grandpa": { + "authorities": initial_authorities.iter().map(|x| (x.2.clone(), 1)).collect::>(), + }, + "sudo": { + "key": Some(root_key), + }, + }) +} diff --git a/substrate/bin/node-sassafras/node/src/cli.rs b/substrate/bin/node-sassafras/node/src/cli.rs new file mode 100644 index 000000000000..5bc6c9b102aa --- /dev/null +++ b/substrate/bin/node-sassafras/node/src/cli.rs @@ -0,0 +1,45 @@ +use sc_cli::RunCmd; + +#[derive(Debug, clap::Parser)] +pub struct Cli { + #[clap(subcommand)] + pub subcommand: Option, + + #[clap(flatten)] + pub run: RunCmd, +} + +#[derive(Debug, clap::Subcommand)] +pub enum Subcommand { + /// Key management cli utilities + #[clap(subcommand)] + Key(sc_cli::KeySubcommand), + + /// Build a chain specification. + BuildSpec(sc_cli::BuildSpecCmd), + + /// Validate blocks. + CheckBlock(sc_cli::CheckBlockCmd), + + /// Export blocks. + ExportBlocks(sc_cli::ExportBlocksCmd), + + /// Export the state of a given block into a chain spec. + ExportState(sc_cli::ExportStateCmd), + + /// Import blocks. + ImportBlocks(sc_cli::ImportBlocksCmd), + + /// Remove the whole chain. + PurgeChain(sc_cli::PurgeChainCmd), + + /// Revert the chain to a previous state. + Revert(sc_cli::RevertCmd), + + /// Sub-commands concerned with benchmarking. + #[clap(subcommand)] + Benchmark(frame_benchmarking_cli::BenchmarkCmd), + + /// Db meta columns information. + ChainInfo(sc_cli::ChainInfoCmd), +} diff --git a/substrate/bin/node-sassafras/node/src/command.rs b/substrate/bin/node-sassafras/node/src/command.rs new file mode 100644 index 000000000000..12e562d8c38e --- /dev/null +++ b/substrate/bin/node-sassafras/node/src/command.rs @@ -0,0 +1,142 @@ +use crate::{ + chain_spec, + cli::{Cli, Subcommand}, + service, +}; +use frame_benchmarking_cli::BenchmarkCmd; +use node_sassafras_runtime::Block; +use sc_cli::SubstrateCli; +use sc_service::PartialComponents; + +impl SubstrateCli for Cli { + fn impl_name() -> String { + "Sassafras Node".into() + } + + fn impl_version() -> String { + env!("SUBSTRATE_CLI_IMPL_VERSION").into() + } + + fn description() -> String { + env!("CARGO_PKG_DESCRIPTION").into() + } + + fn author() -> String { + env!("CARGO_PKG_AUTHORS").into() + } + + fn support_url() -> String { + "support.anonymous.an".into() + } + + fn copyright_start_year() -> i32 { + 2023 + } + + fn load_spec(&self, id: &str) -> Result, String> { + Ok(match id { + "dev" => Box::new(chain_spec::development_config()?), + "" | "local" => Box::new(chain_spec::local_testnet_config()?), + path => + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), + }) + } +} + +/// Parse and run command line arguments +pub fn run() -> sc_cli::Result<()> { + let cli = Cli::from_args(); + + match &cli.subcommand { + Some(Subcommand::Key(cmd)) => cmd.run(&cli), + Some(Subcommand::BuildSpec(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) + }, + Some(Subcommand::CheckBlock(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + }, + Some(Subcommand::ExportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; + Ok((cmd.run(client, config.database), task_manager)) + }) + }, + Some(Subcommand::ExportState(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; + Ok((cmd.run(client, config.chain_spec), task_manager)) + }) + }, + Some(Subcommand::ImportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + }, + Some(Subcommand::PurgeChain(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.database)) + }, + Some(Subcommand::Revert(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, backend, .. } = + service::new_partial(&config)?; + let aux_revert = Box::new(|client, backend, blocks| { + sc_consensus_sassafras::revert(backend, blocks)?; + sc_consensus_grandpa::revert(client, blocks)?; + Ok(()) + }); + Ok((cmd.run(client, backend, Some(aux_revert)), task_manager)) + }) + }, + Some(Subcommand::Benchmark(cmd)) => { + let runner = cli.create_runner(cmd)?; + + runner.sync_run(|config| { + // This switch needs to be in the client, since the client decides + // which sub-commands it wants to support. + match cmd { + BenchmarkCmd::Pallet(cmd) => { + if !cfg!(feature = "runtime-benchmarks") { + return Err( + "Runtime benchmarking wasn't enabled when building the node. \ + You can enable it with `--features runtime-benchmarks`." + .into(), + ) + } + + cmd.run::(config) + }, + _ => { + eprintln!("Not implemented..."); + Ok(()) + }, + } + }) + }, + Some(Subcommand::ChainInfo(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run::(&config)) + }, + None => { + let runner = cli.create_runner(&cli.run)?; + runner.run_node_until_exit(|config| async move { + service::new_full(config).map_err(sc_cli::Error::Service) + }) + }, + } +} diff --git a/substrate/bin/node-sassafras/node/src/main.rs b/substrate/bin/node-sassafras/node/src/main.rs new file mode 100644 index 000000000000..4449d28b9fa4 --- /dev/null +++ b/substrate/bin/node-sassafras/node/src/main.rs @@ -0,0 +1,13 @@ +//! Substrate Node Template CLI library. +#![warn(missing_docs)] + +mod chain_spec; +#[macro_use] +mod service; +mod cli; +mod command; +mod rpc; + +fn main() -> sc_cli::Result<()> { + command::run() +} diff --git a/substrate/bin/node-sassafras/node/src/rpc.rs b/substrate/bin/node-sassafras/node/src/rpc.rs new file mode 100644 index 000000000000..72c7b3d69ba1 --- /dev/null +++ b/substrate/bin/node-sassafras/node/src/rpc.rs @@ -0,0 +1,57 @@ +//! A collection of node-specific RPC methods. +//! Substrate provides the `sc-rpc` crate, which defines the core RPC layer +//! used by Substrate nodes. This file extends those RPC definitions with +//! capabilities that are specific to this project's runtime configuration. + +#![warn(missing_docs)] + +use std::sync::Arc; + +use jsonrpsee::RpcModule; +use node_sassafras_runtime::{opaque::Block, AccountId, Balance, Nonce}; +use sc_transaction_pool_api::TransactionPool; +use sp_api::ProvideRuntimeApi; +use sp_block_builder::BlockBuilder; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; + +pub use sc_rpc_api::DenyUnsafe; + +/// Full client dependencies. +pub struct FullDeps { + /// The client instance to use. + pub client: Arc, + /// Transaction pool instance. + pub pool: Arc

, + /// Whether to deny unsafe calls + pub deny_unsafe: DenyUnsafe, +} + +/// Instantiate all full RPC extensions. +pub fn create_full( + deps: FullDeps, +) -> Result, Box> +where + C: ProvideRuntimeApi, + C: HeaderBackend + HeaderMetadata + 'static, + C: Send + Sync + 'static, + C::Api: substrate_frame_rpc_system::AccountNonceApi, + C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, + C::Api: BlockBuilder, + P: TransactionPool + 'static, +{ + use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; + use substrate_frame_rpc_system::{System, SystemApiServer}; + + let mut module = RpcModule::new(()); + let FullDeps { client, pool, deny_unsafe } = deps; + + module.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?; + module.merge(TransactionPayment::new(client).into_rpc())?; + + // Extend this RPC with a custom API by using the following syntax. + // `YourRpcStruct` should have a reference to a client, which is needed + // to call into the runtime. + // `module.merge(YourRpcTrait::into_rpc(YourRpcStruct::new(ReferenceToClient, ...)))?;` + + Ok(module) +} diff --git a/substrate/bin/node-sassafras/node/src/service.rs b/substrate/bin/node-sassafras/node/src/service.rs new file mode 100644 index 000000000000..52537ca0c9df --- /dev/null +++ b/substrate/bin/node-sassafras/node/src/service.rs @@ -0,0 +1,326 @@ +//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. + +use futures::FutureExt; +use node_sassafras_runtime::{self, opaque::Block, RuntimeApi}; +use sc_client_api::{Backend, BlockBackend}; +use sc_consensus_grandpa::SharedVoterState; +use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams}; +use sc_telemetry::{Telemetry, TelemetryWorker}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use std::{sync::Arc, time::Duration}; + +/// Only enable the benchmarking host functions when we actually want to benchmark. +#[cfg(feature = "runtime-benchmarks")] +type HostFunctions = ( + sp_io::SubstrateHostFunctions, + sp_crypto_ec_utils::bls12_381::host_calls::HostFunctions, + sp_crypto_ec_utils::ed_on_bls12_381_bandersnatch::host_calls::HostFunctions, + frame_benchmarking::benchmarking::HostFunctions, +); +/// Otherwise we only use the default Substrate host functions. +#[cfg(not(feature = "runtime-benchmarks"))] +type HostFunctions = ( + sp_io::SubstrateHostFunctions, + sp_crypto_ec_utils::bls12_381::host_calls::HostFunctions, + sp_crypto_ec_utils::ed_on_bls12_381_bandersnatch::host_calls::HostFunctions, +); + +pub(crate) type FullClient = + sc_service::TFullClient>; +type FullBackend = sc_service::TFullBackend; +type FullSelectChain = sc_consensus::LongestChain; + +type FullGrandpaBlockImport = + sc_consensus_grandpa::GrandpaBlockImport; + +/// The minimum period of blocks on which justifications will be +/// imported and generated. +const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512; + +fn create_inherent_data_providers() -> impl sp_inherents::CreateInherentDataProviders< + Block, + (), + InherentDataProviders = impl sc_consensus_slots::InherentDataProviderExt, +> { + |_block_hash, _extra_args| async move { + let slot_duration_ms = node_sassafras_runtime::SLOT_DURATION_IN_MILLISECONDS; + let slot_duration = sp_consensus_slots::SlotDuration::from_millis(slot_duration_ms); + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + // TODO: implement a shared sc-consensus-slots::InherentDataProvider + // (shared between babe and sassafras) + let slot = + sc_consensus_sassafras::InherentDataProvider::from_timestamp(*timestamp, slot_duration); + Ok((slot, timestamp)) + } +} + +pub fn new_partial( + config: &Configuration, +) -> Result< + sc_service::PartialComponents< + FullClient, + FullBackend, + FullSelectChain, + sc_consensus::DefaultImportQueue, + sc_transaction_pool::FullPool, + ( + sc_consensus_sassafras::SassafrasBlockImport, + sc_consensus_sassafras::SassafrasLink, + sc_consensus_grandpa::LinkHalf, + Option, + ), + >, + ServiceError, +> { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let executor = sc_service::new_wasm_executor::(&config); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + let client = Arc::new(client); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); + telemetry + }); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); + + let (grandpa_block_import, grandpa_link) = sc_consensus_grandpa::block_import( + client.clone(), + GRANDPA_JUSTIFICATION_PERIOD, + &client, + select_chain.clone(), + telemetry.as_ref().map(|x| x.handle()), + )?; + + let justification_import = grandpa_block_import.clone(); + + let (sassafras_block_import, sassafras_link) = sc_consensus_sassafras::block_import( + sc_consensus_sassafras::finalized_configuration(&*client)?, + grandpa_block_import, + client.clone(), + )?; + + let import_queue = sc_consensus_sassafras::import_queue( + sassafras_link.clone(), + sassafras_block_import.clone(), + Some(Box::new(justification_import)), + client.clone(), + select_chain.clone(), + create_inherent_data_providers(), + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + telemetry.as_ref().map(|x| x.handle()), + )?; + + Ok(sc_service::PartialComponents { + client, + backend, + task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: (sassafras_block_import, sassafras_link, grandpa_link, telemetry), + }) +} + +/// Builds a new service for a full client. +pub fn new_full(config: Configuration) -> Result { + let sc_service::PartialComponents { + client, + backend, + mut task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: (block_import, sassafras_link, grandpa_link, mut telemetry), + } = new_partial(&config)?; + + let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); + + let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name( + &client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"), + &config.chain_spec, + ); + let (grandpa_protocol_config, grandpa_notification_service) = + sc_consensus_grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone()); + net_config.add_notification_protocol(grandpa_protocol_config); + + let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + grandpa_link.shared_authority_set().clone(), + Vec::default(), + )); + + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + net_config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + block_announce_validator_builder: None, + warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), + block_relay: None, + })?; + + if config.offchain_worker.enabled { + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-worker", + sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { + runtime_api_provider: client.clone(), + is_validator: config.role.is_authority(), + keystore: Some(keystore_container.keystore()), + offchain_db: backend.offchain_storage(), + transaction_pool: Some(OffchainTransactionPoolFactory::new( + transaction_pool.clone(), + )), + network_provider: network.clone(), + enable_http_requests: true, + custom_extensions: |_| vec![], + }) + .run(client.clone(), task_manager.spawn_handle()) + .boxed(), + ); + } + + let role = config.role.clone(); + let force_authoring = config.force_authoring; + let name = config.network.node_name.clone(); + let enable_grandpa = !config.disable_grandpa; + let prometheus_registry = config.prometheus_registry().cloned(); + + let rpc_extensions_builder = { + let client = client.clone(); + let pool = transaction_pool.clone(); + + Box::new(move |deny_unsafe, _| { + let deps = + crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; + crate::rpc::create_full(deps).map_err(Into::into) + }) + }; + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network: network.clone(), + client: client.clone(), + keystore: keystore_container.keystore(), + task_manager: &mut task_manager, + transaction_pool: transaction_pool.clone(), + rpc_builder: rpc_extensions_builder, + backend, + system_rpc_tx, + tx_handler_controller, + sync_service: sync_service.clone(), + config, + telemetry: telemetry.as_mut(), + })?; + + if role.is_authority() { + let proposer = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), + ); + + let sassafras_params = sc_consensus_sassafras::SassafrasWorkerParams { + client: client.clone(), + keystore: keystore_container.keystore(), + select_chain, + env: proposer, + block_import, + sassafras_link, + sync_oracle: sync_service.clone(), + justification_sync_link: sync_service.clone(), + force_authoring, + create_inherent_data_providers: create_inherent_data_providers(), + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), + }; + + let sassafras = sc_consensus_sassafras::start_sassafras(sassafras_params)?; + + // the Sassafras authoring task is considered essential, i.e. if it + // fails we take down the service with it. + task_manager.spawn_essential_handle().spawn_blocking( + "sassafras", + Some("block-authoring"), + sassafras, + ); + } + + if enable_grandpa { + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = role.is_authority().then(|| keystore_container.keystore()); + + let grandpa_config = sc_consensus_grandpa::Config { + gossip_duration: Duration::from_millis(333), + justification_generation_period: GRANDPA_JUSTIFICATION_PERIOD, + name: Some(name), + observer_enabled: false, + keystore, + local_role: role, + telemetry: telemetry.as_ref().map(|x| x.handle()), + protocol_name: grandpa_protocol_name, + }; + + // start the full GRANDPA voter + // NOTE: non-authorities could run the GRANDPA observer protocol, but at + // this point the full voter should provide better guarantees of block + // and vote data availability than the observer. The observer has not + // been tested extensively yet and having most nodes in a network run it + // could lead to finality stalls. + let grandpa_params = sc_consensus_grandpa::GrandpaParams { + config: grandpa_config, + link: grandpa_link, + network, + sync: Arc::new(sync_service), + notification_service: grandpa_notification_service, + voting_rule: sc_consensus_grandpa::VotingRulesBuilder::default().build(), + prometheus_registry, + shared_voter_state: SharedVoterState::empty(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool), + }; + + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + task_manager.spawn_essential_handle().spawn_blocking( + "grandpa-voter", + None, + sc_consensus_grandpa::run_grandpa_voter(grandpa_params)?, + ); + } + + network_starter.start_network(); + Ok(task_manager) +} diff --git a/substrate/bin/node-sassafras/runtime/Cargo.toml b/substrate/bin/node-sassafras/runtime/Cargo.toml new file mode 100644 index 000000000000..ddb56b36fab7 --- /dev/null +++ b/substrate/bin/node-sassafras/runtime/Cargo.toml @@ -0,0 +1,98 @@ +[package] +name = "node-sassafras-runtime" +version = "0.3.4-dev" +authors = ["Davide Galassi ", "Parity Technologies "] +description = "Runtime testbed for Sassafras consensus." +homepage = "https://substrate.io/" +edition = "2021" +license = "Unlicense" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } + +pallet-sassafras = { path = "../../../frame/sassafras", default-features = false, features = ["construct-dummy-ring-context"] } +pallet-balances = { path = "../../../frame/balances", default-features = false } +pallet-session = { path = "../../../frame/session", default-features = false } +frame-support = { path = "../../../frame/support", default-features = false } +pallet-grandpa = { path = "../../../frame/grandpa", default-features = false } +pallet-sudo = { path = "../../../frame/sudo", default-features = false } +frame-system = { path = "../../../frame/system", default-features = false } +pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } +frame-executive = { path = "../../../frame/executive", default-features = false } +sp-api = { path = "../../../primitives/api", default-features = false } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false } +sp-consensus-sassafras = { path = "../../../primitives/consensus/sassafras", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-inherents = { path = "../../../primitives/inherents", default-features = false } +sp-offchain = { path = "../../../primitives/offchain", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-session = { path = "../../../primitives/session", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-storage = { path = "../../../primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../primitives/version", default-features = false } +sp-genesis-builder = { path = "../../../primitives/genesis-builder", default-features = false } + +# Used for the node template's RPCs +frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api/", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api/", default-features = false } + +# Used for runtime benchmarking +frame-benchmarking = { path = "../../../frame/benchmarking", optional = true, default-features = false } +frame-system-benchmarking = { path = "../../../frame/system/benchmarking", optional = true, default-features = false } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../utils/wasm-builder" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking/std", + "frame-executive/std", + "frame-support/std", + "frame-system-benchmarking/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "pallet-balances/std", + "pallet-grandpa/std", + "pallet-sassafras/std", + "pallet-session/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "scale-info/std", + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-sassafras/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-storage/std", + "sp-transaction-pool/std", + "sp-version/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-grandpa/runtime-benchmarks", + "pallet-sassafras/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +use-session-pallet = ["pallet-sassafras/session-pallet-support"] diff --git a/substrate/bin/node-sassafras/runtime/build.rs b/substrate/bin/node-sassafras/runtime/build.rs new file mode 100644 index 000000000000..9b53d2457dff --- /dev/null +++ b/substrate/bin/node-sassafras/runtime/build.rs @@ -0,0 +1,9 @@ +use substrate_wasm_builder::WasmBuilder; + +fn main() { + WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build() +} diff --git a/substrate/bin/node-sassafras/runtime/src/lib.rs b/substrate/bin/node-sassafras/runtime/src/lib.rs new file mode 100644 index 000000000000..c01864fd996e --- /dev/null +++ b/substrate/bin/node-sassafras/runtime/src/lib.rs @@ -0,0 +1,569 @@ +#![cfg_attr(not(feature = "std"), no_std)] +// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +use sp_api::impl_runtime_apis; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +#[cfg(feature = "use-session-pallet")] +use sp_runtime::traits::OpaqueKeys; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, MultiSignature, Perbill, +}; +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +use pallet_grandpa::{ + fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, +}; +use pallet_transaction_payment::CurrencyAdapter; + +use frame_support::{ + construct_runtime, genesis_builder_helper, parameter_types, + traits::{ConstU128, ConstU32, ConstU64, ConstU8}, + weights::{ + constants::{RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND}, + IdentityFee, Weight, + }, +}; + +/// An index to a block. +pub type BlockNumber = u32; + +/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. +pub type Signature = MultiSignature; + +/// A hash of some data used by the chain. +pub type Hash = sp_core::H256; + +/// Block header type as expected by this runtime. +pub type Header = generic::Header; + +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); + +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; + +/// Block type as expected by this runtime. +pub type Block = generic::Block; + +/// Some way of identifying an account on the chain. We intentionally make it equivalent +/// to the public key of our transaction signing scheme. +pub type AccountId = <::Signer as IdentifyAccount>::AccountId; + +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; + +/// Balance of an account. +pub type Balance = u128; + +/// Index of a transaction in the chain. +pub type Nonce = u32; + +/// The payload being signed in transactions. +pub type SignedPayload = generic::SignedPayload; + +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, +>; + +/// Opaque types. These are used by the CLI to instantiate machinery that don't need to know +/// the specifics of the runtime. They can then be made to be agnostic over specific formats +/// of data like extrinsics, allowing for them to continue syncing the network through upgrades +/// to even the core data structures. +pub mod opaque { + use super::*; + + pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; + /// Opaque block header type. + pub type Header = generic::Header; + /// Opaque block type. + pub type Block = generic::Block; + /// Opaque block identifier type. + pub type BlockId = generic::BlockId; +} + +impl_opaque_keys! { + pub struct SessionKeys { + pub sassafras: Sassafras, + pub grandpa: Grandpa, + } +} + +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("node-sassafras"), + impl_name: create_runtime_str!("node-sassafras"), + authoring_version: 1, + spec_version: 100, + impl_version: 1, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, + state_version: 1, +}; + +/// Sassafras slot duration in milliseconds +pub const SLOT_DURATION_IN_MILLISECONDS: u64 = 3000; + +/// Sassafras epoch duration in slots. +pub const EPOCH_DURATION_IN_SLOTS: u32 = 10; + +/// Max authorities for both Sassafras and Grandpa. +pub const MAX_AUTHORITIES: u32 = 32; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +// Required to send unsigned transactoins from Sassafras pallet +// TODO @davxy: isn't grandpa requiring the same thing? +impl frame_system::offchain::SendTransactionTypes for Runtime +where + RuntimeCall: From, +{ + type Extrinsic = UncheckedExtrinsic; + type OverarchingCall = RuntimeCall; +} + +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); + +parameter_types! { + pub const BlockHashCount: BlockNumber = 2400; + pub const Version: RuntimeVersion = VERSION; + /// We allow for 2 seconds of compute with a 3 second average block time. + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::with_sensible_defaults( + Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), + NORMAL_DISPATCH_RATIO, + ); + pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength + ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub const SS58Prefix: u8 = 42; +} + +// Configure FRAME pallets to include in runtime. + +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; + type BlockWeights = BlockWeights; + type BlockLength = BlockLength; + type AccountId = AccountId; + type RuntimeCall = RuntimeCall; + type Lookup = AccountIdLookup; + type Nonce = Nonce; + type Hash = Hash; + type Hashing = BlakeTwo256; + type RuntimeEvent = RuntimeEvent; + type RuntimeOrigin = RuntimeOrigin; + type BlockHashCount = BlockHashCount; + type DbWeight = RocksDbWeight; + type Version = Version; + type PalletInfo = PalletInfo; + type OnNewAccount = (); + type OnKilledAccount = (); + type AccountData = pallet_balances::AccountData; + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_sassafras::Config for Runtime { + type EpochLength = ConstU32; + type MaxAuthorities = ConstU32; + type WeightInfo = pallet_sassafras::weights::SubstrateWeight; + #[cfg(feature = "use-session-pallet")] + type EpochChangeTrigger = pallet_sassafras::EpochChangeExternalTrigger; + #[cfg(not(feature = "use-session-pallet"))] + type EpochChangeTrigger = pallet_sassafras::EpochChangeInternalTrigger; +} + +impl pallet_grandpa::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type MaxAuthorities = ConstU32; + type MaxNominators = ConstU32<0>; + type MaxSetIdSessionEntries = ConstU64<0>; + type KeyOwnerProof = sp_core::Void; + type EquivocationReportSystem = (); +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<{ SLOT_DURATION_IN_MILLISECONDS / 2 }>; + type WeightInfo = (); +} + +impl pallet_balances::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type MaxLocks = ConstU32<50>; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type DustRemoval = (); + type ExistentialDeposit = ConstU128<500>; + type AccountStore = System; + type WeightInfo = pallet_balances::weights::SubstrateWeight; + type FreezeIdentifier = (); + type MaxFreezes = (); + type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); + type MaxHolds = (); +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = CurrencyAdapter; + type OperationalFeeMultiplier = ConstU8<5>; + type WeightToFee = IdentityFee; + type LengthToFee = IdentityFee; + type FeeMultiplierUpdate = (); +} + +impl pallet_sudo::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type WeightInfo = pallet_sudo::weights::SubstrateWeight; +} + +#[cfg(feature = "use-session-pallet")] +impl pallet_session::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValidatorId = ::AccountId; + type ValidatorIdOf = (); //pallet_staking::StashOf; + type ShouldEndSession = Sassafras; + type NextSessionRotation = Sassafras; + type SessionManager = (); //pallet_session::historical::NoteHistoricalRoot; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type WeightInfo = pallet_session::weights::SubstrateWeight; +} + +// Create a runtime using session pallet +#[cfg(feature = "use-session-pallet")] +construct_runtime!( + pub enum Runtime + { + System: frame_system, + Timestamp: pallet_timestamp, + Sassafras: pallet_sassafras, + Grandpa: pallet_grandpa, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + Sudo: pallet_sudo, + Session: pallet_session, + } +); + +// Create a runtime NOT using session pallet +#[cfg(not(feature = "use-session-pallet"))] +construct_runtime!( + pub enum Runtime + { + System: frame_system, + Timestamp: pallet_timestamp, + Sassafras: pallet_sassafras, + Grandpa: pallet_grandpa, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + Sudo: pallet_sudo, + } +); + +#[cfg(feature = "runtime-benchmarks")] +#[macro_use] +extern crate frame_benchmarking; + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + define_benchmarks!( + [frame_benchmarking, BaselineBench::] + [frame_system, SystemBench::] + [pallet_balances, Balances] + [pallet_timestamp, Timestamp] + [pallet_grandpa, Grandpa] + [pallet_sassafras, Sassafras] + ); +} + +impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block); + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_consensus_sassafras::SassafrasApi for Runtime { + fn ring_context() -> Option { + Sassafras::ring_context() + } + + fn submit_tickets_unsigned_extrinsic( + tickets: Vec + ) -> bool { + Sassafras::submit_tickets_unsigned_extrinsic(tickets) + } + + fn slot_ticket_id(slot: sp_consensus_sassafras::Slot) -> Option { + Sassafras::slot_ticket_id(slot) + } + + fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option<(sp_consensus_sassafras::TicketId, sp_consensus_sassafras::TicketBody)> { + Sassafras::slot_ticket(slot) + } + + fn current_epoch() -> sp_consensus_sassafras::Epoch { + Sassafras::current_epoch() + } + + fn next_epoch() -> sp_consensus_sassafras::Epoch { + Sassafras::next_epoch() + } + + fn generate_key_ownership_proof( + _authority_id: sp_consensus_sassafras::AuthorityId, + ) -> Option { + None + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: sp_consensus_sassafras::EquivocationProof<::Header>, + _key_owner_proof: sp_consensus_sassafras::OpaqueKeyOwnershipProof, + ) -> bool { + //let key_owner_proof = key_owner_proof.decode()?; + Sassafras::submit_unsigned_equivocation_report(equivocation_proof) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys(encoded: Vec) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl fg_primitives::GrandpaApi for Runtime { + fn grandpa_authorities() -> GrandpaAuthorityList { + Grandpa::grandpa_authorities() + } + + fn current_set_id() -> fg_primitives::SetId { + Grandpa::current_set_id() + } + + fn submit_report_equivocation_unsigned_extrinsic( + _equivocation_proof: fg_primitives::EquivocationProof< + ::Hash, + NumberFor, + >, + _key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + None + } + + fn generate_key_ownership_proof( + _set_id: fg_primitives::SetId, + _authority_id: GrandpaId, + ) -> Option { + // NOTE: this is the only implementation possible since we've + // defined our key owner proof type as a bottom type (i.e. a type + // with no values). + None + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi for Runtime { + fn query_call_info( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + + fn query_call_fee_details( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + genesis_builder_helper::create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + genesis_builder_helper::build_config::(config) + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{baseline, Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + use baseline::Pallet as BaselineBench; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch}; + use sp_storage::TrackedStorageKey; + use frame_system_benchmarking::Pallet as SystemBench; + use baseline::Pallet as BaselineBench; + + impl frame_system_benchmarking::Config for Runtime {} + impl baseline::Config for Runtime {} + + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + + Ok(batches) + } + } +} diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index 6f805188b9a4..5782709d6721 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -33,7 +33,6 @@ use sp_consensus_babe::{ inherents::InherentDataProvider, make_vrf_sign_data, AllowedSlots, AuthorityId, AuthorityPair, Slot, }; -use sp_consensus_slots::SlotDuration; use sp_core::crypto::Pair; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; @@ -66,8 +65,6 @@ type Mutator = Arc; type BabeBlockImport = PanickingBlockImport>>; -const SLOT_DURATION_MS: u64 = 1000; - #[derive(Clone)] struct DummyFactory { client: Arc, @@ -255,14 +252,15 @@ impl TestNetFactory for BabeTestNet { let (_, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + let slot_duration = data.link.config.slot_duration(); TestVerifier { inner: BabeVerifier { client: client.clone(), select_chain: longest_chain, - create_inherent_data_providers: Box::new(|_, _| async { + create_inherent_data_providers: Box::new(move |_, _| async move { let slot = InherentDataProvider::from_timestamp_and_slot_duration( Timestamp::current(), - SlotDuration::from_millis(SLOT_DURATION_MS), + slot_duration, ); Ok((slot,)) }), @@ -1016,7 +1014,7 @@ async fn obsolete_blocks_aux_data_cleanup() { let data = peer.data.as_ref().expect("babe link set up during initialization"); let client = peer.client().as_client(); - // Register the handler (as done by `babe_start`) + // Register the handler (as done by Babe's `block_import` method) let client_clone = client.clone(); let on_finality = move |summary: &FinalityNotification| { aux_storage_cleanup(client_clone.as_ref(), summary) diff --git a/substrate/client/consensus/sassafras/Cargo.toml b/substrate/client/consensus/sassafras/Cargo.toml new file mode 100644 index 000000000000..37229dea56d6 --- /dev/null +++ b/substrate/client/consensus/sassafras/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "sc-consensus-sassafras" +version = "0.3.4-dev" +authors = ["Parity Technologies "] +description = "Sassafras consensus algorithm for substrate" +edition = "2021" +license = "Apache 2.0" +homepage = "https://substrate.io" +repository = "/~https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-consensus-sassafras" +readme = "README.md" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +async-trait = "0.1.50" +scale-codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +futures = "0.3.21" +log = "0.4.16" +parking_lot = "0.12.0" +thiserror = "1.0" +fork-tree = { path = "../../../utils/fork-tree" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } +sc-client-api = { path = "../../api" } +sc-consensus = { path = "../../../client/consensus/common" } +sc-consensus-epochs = { path = "../epochs" } +sc-consensus-slots = { path = "../slots" } +sc-telemetry = { path = "../../telemetry" } +sc-transaction-pool-api = { path = "../../transaction-pool/api" } +sp-api = { path = "../../../primitives/api" } +sp-application-crypto = { path = "../../../primitives/application-crypto" } +sp-block-builder = { path = "../../../primitives/block-builder" } +sp-blockchain = { path = "../../../primitives/blockchain" } +sp-consensus = { path = "../../../primitives/consensus/common" } +sp-consensus-sassafras = { path = "../../../primitives/consensus/sassafras" } +sp-consensus-slots = { path = "../../../primitives/consensus/slots" } +sp-core = { path = "../../../primitives/core" } +sp-inherents = { path = "../../../primitives/inherents" } +sp-keystore = { path = "../../../primitives/keystore" } +sp-runtime = { path = "../../../primitives/runtime" } +sp-timestamp = { path = "../../../primitives/timestamp" } + +[dev-dependencies] +sc-block-builder = { path = "../../block-builder" } +sc-keystore = { path = "../../keystore" } +sc-network-test = { path = "../../network/test" } +sp-keyring = { path = "../../../primitives/keyring" } +substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +tokio = "1.22.0" +env_logger = "0.10.1" diff --git a/substrate/client/consensus/sassafras/README.md b/substrate/client/consensus/sassafras/README.md new file mode 100644 index 000000000000..3ff3e2c8ab9c --- /dev/null +++ b/substrate/client/consensus/sassafras/README.md @@ -0,0 +1,8 @@ +Client module for SASSAFRAS consensus + +- Tracking issue: /~https://github.com/paritytech/polkadot-sdk/issues/41 +- Protocol RFC proposal: /~https://github.com/polkadot-fellows/RFCs/pull/26 + +# ⚠️ WARNING ⚠️ + +The crate interfaces and structures are experimental and may be subject to changes. diff --git a/substrate/client/consensus/sassafras/src/authorship.rs b/substrate/client/consensus/sassafras/src/authorship.rs new file mode 100644 index 000000000000..38d40680cfd4 --- /dev/null +++ b/substrate/client/consensus/sassafras/src/authorship.rs @@ -0,0 +1,636 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Types and functions related to authority selection and slot claiming. + +use super::*; + +use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use sp_consensus_sassafras::{ + digests::SlotClaim, ticket_id_threshold, vrf::RingContext, AuthorityId, Slot, TicketBody, + TicketClaim, TicketEnvelope, TicketId, +}; +use sp_core::{ed25519::Pair as EphemeralPair, hashing::blake2_64, ByteArray}; +use std::pin::Pin; + +/// Get secondary authority index for the given epoch and slot. +pub(crate) fn secondary_authority_index(slot: Slot, epoch: &Epoch) -> AuthorityIndex { + // TODO @davxy twox -> blake2 + let hash = u64::from_le_bytes((epoch.randomness, slot).using_encoded(blake2_64)); + (hash % epoch.authorities.len() as u64) as AuthorityIndex +} + +/// Try to claim an epoch slot. +/// If ticket is `None`, then the slot should be claimed using the fallback mechanism. +pub(crate) fn claim_slot( + slot: Slot, + epoch: &mut Epoch, + maybe_ticket: Option<(TicketId, TicketBody)>, + keystore: &KeystorePtr, +) -> Option<(SlotClaim, AuthorityId)> { + if epoch.authorities.is_empty() { + return None + } + + let mut vrf_sign_data = vrf::slot_claim_sign_data(&epoch.randomness, slot, epoch.index); + + let (authority_idx, ticket_claim) = match maybe_ticket { + Some((ticket_id, ticket_body)) => { + debug!(target: LOG_TARGET, "[TRY PRIMARY] slot: {}, tkt: {:032x}", slot, ticket_id); + + // TODO @davxy + // If we lose the secret cache then to know if we are the ticket owner then looks + // like we need to regenerate the ticket-id using all our keys and check if the + // output matches with the onchain one. + let (authority_idx, ticket_secret) = epoch.tickets_aux.remove(&ticket_id)?; + debug!( + target: LOG_TARGET, + " got ticket: authority: {}, attempt: {}", + authority_idx, + ticket_body.attempt_idx + ); + + vrf_sign_data.push_transcript_data(&ticket_body.encode()); + + let reveal_vrf_input = + vrf::revealed_key_input(&epoch.randomness, ticket_body.attempt_idx, epoch.index); + vrf_sign_data + .push_vrf_input(reveal_vrf_input) + .expect("Sign data has enough space; qed"); + + // Sign some data using the erased key to enforce our ownership + let data = vrf_sign_data.challenge::<32>(); + let erased_pair = EphemeralPair::from_seed(&ticket_secret.seed); + let erased_signature = erased_pair.sign(&data); + + let claim = TicketClaim { erased_signature }; + (authority_idx, Some(claim)) + }, + None => { + debug!(target: LOG_TARGET, "[TRY SECONDARY] slot: {})", slot); + (secondary_authority_index(slot, epoch), None) + }, + }; + + let authority_id = epoch.authorities.get(authority_idx as usize)?; + + let vrf_signature = keystore + .bandersnatch_vrf_sign(AuthorityId::ID, authority_id.as_ref(), &vrf_sign_data) + .ok() + .flatten()?; + + let claim = SlotClaim { authority_idx, slot, vrf_signature, ticket_claim }; + + Some((claim, authority_id.clone())) +} + +/// Generate the tickets for the given epoch. +/// +/// Tickets additional information will be stored within the `Epoch` structure. +/// The additional information will be used later during the epoch to claim slots. +fn generate_epoch_tickets( + epoch: &mut Epoch, + keystore: &KeystorePtr, + ring_ctx: &RingContext, +) -> Vec { + let mut tickets = Vec::new(); + + let threshold = ticket_id_threshold( + epoch.config.redundancy_factor, + epoch.length, + epoch.config.attempts_number, + epoch.authorities.len() as u32, + ); + debug!(target: LOG_TARGET, "Generating tickets for epoch {} @ slot {}", epoch.index, epoch.start); + trace!(target: LOG_TARGET, " threshold: {:032x}", threshold); + + // We need a list of raw unwrapped keys + let pks: Vec<_> = epoch.authorities.iter().map(|a| *a.as_ref()).collect(); + + let tickets_aux = &mut epoch.tickets_aux; + let epoch = &epoch.inner; + + for (authority_idx, authority_id) in epoch.authorities.iter().enumerate() { + if !keystore.has_keys(&[(authority_id.to_raw_vec(), AuthorityId::ID)]) { + continue + } + + trace!(target: LOG_TARGET, "Generating ring prover key..."); + let prover = ring_ctx.prover(&pks, authority_idx).unwrap(); + trace!(target: LOG_TARGET, " ...done"); + + let make_ticket = |attempt_idx| { + // Ticket id and threshold check. + let ticket_id_input = vrf::ticket_id_input(&epoch.randomness, attempt_idx, epoch.index); + let ticket_id_output = keystore + .bandersnatch_vrf_output(AuthorityId::ID, authority_id.as_ref(), &ticket_id_input) + .ok()??; + let ticket_id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_output); + if ticket_id >= threshold { + return None + } + + // Erased key. + let (erased_pair, erased_seed) = EphemeralPair::generate(); + let erased_public = erased_pair.public(); + + // Revealed key. + let revealed_input = + vrf::revealed_key_input(&epoch.randomness, attempt_idx, epoch.index); + let revealed_output = keystore + .bandersnatch_vrf_output(AuthorityId::ID, authority_id.as_ref(), &revealed_input) + .ok()??; + let revealed_seed = vrf::make_revealed_key_seed(&revealed_input, &revealed_output); + let revealed_public = EphemeralPair::from_seed(&revealed_seed).public(); + + let body = TicketBody { attempt_idx, erased_public, revealed_public }; + + let sign_data = vrf::ticket_body_sign_data(&body, ticket_id_input); + + trace!(target: LOG_TARGET, "Forging ring proof for {:032x} (attempt: {})", ticket_id, attempt_idx); + let signature = keystore + .bandersnatch_ring_vrf_sign( + AuthorityId::ID, + authority_id.as_ref(), + &sign_data, + &prover, + ) + .ok()??; + trace!(target: LOG_TARGET, " ...done"); + + debug_assert_eq!(ticket_id_output, signature.outputs[0]); + + let ticket_envelope = TicketEnvelope { body, signature }; + let ticket_secret = TicketSecret { attempt_idx, seed: erased_seed }; + Some((ticket_id, ticket_envelope, ticket_secret)) + }; + + for attempt in 0..epoch.config.attempts_number { + if let Some((ticket_id, ticket_envelope, ticket_secret)) = make_ticket(attempt) { + tickets.push(ticket_envelope); + tickets_aux.insert(ticket_id, (authority_idx as u32, ticket_secret)); + } + } + } + + tickets +} + +struct SlotWorker { + client: Arc, + block_import: I, + env: E, + sync_oracle: SO, + justification_sync_link: L, + force_authoring: bool, + keystore: KeystorePtr, + epoch_changes: SharedEpochChanges, + slot_notification_sinks: SlotNotificationSinks, + genesis_config: Epoch, +} + +#[async_trait::async_trait] +impl sc_consensus_slots::SimpleSlotWorker + for SlotWorker +where + B: BlockT, + C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, + C::Api: SassafrasApi, + E: Environment + Send + Sync, + E::Proposer: Proposer, + I: BlockImport + Send + Sync + 'static, + SO: SyncOracle + Send + Clone + Sync, + L: sc_consensus::JustificationSyncLink, + ER: std::error::Error + Send + 'static, +{ + type Claim = (SlotClaim, AuthorityId); + type SyncOracle = SO; + type JustificationSyncLink = L; + type CreateProposer = + Pin> + Send + 'static>>; + type Proposer = E::Proposer; + type BlockImport = I; + type AuxData = ViableEpochDescriptor, Epoch>; + + fn logging_target(&self) -> &'static str { + LOG_TARGET + } + + fn block_import(&mut self) -> &mut Self::BlockImport { + &mut self.block_import + } + + fn aux_data(&self, parent: &B::Header, slot: Slot) -> Result { + self.epoch_changes + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + *parent.number(), + slot, + ) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or(ConsensusError::InvalidAuthoritiesSet) + } + + fn authorities_len(&self, epoch_descriptor: &Self::AuxData) -> Option { + self.epoch_changes + .shared_data() + .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) + .map(|epoch| epoch.as_ref().authorities.len()) + } + + async fn claim_slot( + &mut self, + parent_header: &B::Header, + slot: Slot, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) -> Option { + // Get the next slot ticket from the runtime. + let maybe_ticket = + self.client.runtime_api().slot_ticket(parent_header.hash(), slot).ok()?; + + let mut epoch_changes = self.epoch_changes.shared_data_locked(); + let mut epoch = epoch_changes.viable_epoch_mut(epoch_descriptor, |slot| { + Epoch::genesis(&self.genesis_config, slot) + })?; + + let claim = authorship::claim_slot(slot, &mut epoch.as_mut(), maybe_ticket, &self.keystore); + if claim.is_some() { + debug!(target: LOG_TARGET, "Claimed slot {}", slot); + } + claim + } + + fn notify_slot( + &self, + _parent_header: &B::Header, + slot: Slot, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) { + let sinks = &mut self.slot_notification_sinks.lock(); + sinks.retain_mut(|sink| match sink.try_send((slot, epoch_descriptor.clone())) { + Ok(()) => true, + Err(e) => + if e.is_full() { + warn!(target: LOG_TARGET, "Trying to notify a slot but the channel is full"); + true + } else { + false + }, + }); + } + + fn pre_digest_data(&self, _slot: Slot, claim: &Self::Claim) -> Vec { + vec![DigestItem::from(&claim.0)] + } + + async fn block_import_params( + &self, + header: B::Header, + header_hash: &B::Hash, + body: Vec, + storage_changes: StorageChanges, + (_, public): Self::Claim, + epoch_descriptor: Self::AuxData, + ) -> Result, ConsensusError> { + let signature = self + .keystore + .bandersnatch_sign( + ::ID, + public.as_ref(), + header_hash.as_ref(), + ) + .map_err(|e| ConsensusError::CannotSign(format!("{}. Key {:?}", e, public)))? + .map(|sig| AuthoritySignature::from(sig)) + .ok_or_else(|| { + ConsensusError::CannotSign(format!( + "Could not find key in keystore. Key {:?}", + public + )) + })?; + + let mut block = BlockImportParams::new(BlockOrigin::Own, header); + block.post_digests.push(DigestItem::from(&signature)); + block.body = Some(body); + block.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); + block + .insert_intermediate(INTERMEDIATE_KEY, SassafrasIntermediate:: { epoch_descriptor }); + + Ok(block) + } + + fn force_authoring(&self) -> bool { + self.force_authoring + } + + fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool { + // TODO @davxy + false + } + + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { + &mut self.sync_oracle + } + + fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { + &mut self.justification_sync_link + } + + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin( + self.env + .init(block) + .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e))), + ) + } + + fn telemetry(&self) -> Option { + // TODO @davxy + None + } + + fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { + let parent_slot = find_slot_claim::(&slot_info.chain_head).ok().map(|d| d.slot); + + // TODO @davxy : clarify this field. In Sassafras this is part of 'self' + let block_proposal_slot_portion = sc_consensus_slots::SlotProportion::new(0.5); + + sc_consensus_slots::proposing_remaining_duration( + parent_slot, + slot_info, + &block_proposal_slot_portion, + None, + sc_consensus_slots::SlotLenienceType::Exponential, + self.logging_target(), + ) + } +} + +/// Authoring tickets generation worker. +/// +/// Listens on the client's import notification stream for blocks which contains new epoch +/// information, that is blocks that signals the begin of a new epoch. +/// +/// A new epoch announcement trigger the begin of the generation of tickets for the next epoch. +/// +/// The tickets generated by the worker are saved within the epoch changes tree +/// and are volatile. +async fn start_tickets_worker( + client: Arc, + keystore: KeystorePtr, + epoch_changes: SharedEpochChanges, + select_chain: SC, + offchain_tx_pool_factory: OffchainTransactionPoolFactory, +) where + B: BlockT, + C: BlockchainEvents + ProvideRuntimeApi, + C::Api: SassafrasApi, + SC: SelectChain + 'static, +{ + let mut notifications = client.import_notification_stream(); + + // TODO @davxy: if be lot smarter to wait on `finality_notification_stream`. + // In that way we prevent producting tickets on potentially multiple forks. + // Furthermore, as these notifications are not 100% reliable may be a good + // idea to detect epoch changes withouth checking for the next epoch digest. + // As a fallback may be sufficient to play with known epoch length and slot + // number in the `SlotClaim` digest. + + while let Some(notification) = notifications.next().await { + let epoch_desc = match find_next_epoch_digest::(¬ification.header) { + Ok(Some(epoch_desc)) => epoch_desc, + Err(err) => { + warn!(target: LOG_TARGET, "Error fetching next epoch digest: {}", err); + continue + }, + _ => continue, + }; + + trace!(target: LOG_TARGET, "Announcement: {:x?}", epoch_desc); + + let number = *notification.header.number(); + let position = if number == One::one() { + EpochIdentifierPosition::Genesis1 + } else { + EpochIdentifierPosition::Regular + }; + let epoch_identifier = EpochIdentifier { position, hash: notification.hash, number }; + + let mut epoch = match epoch_changes.shared_data().epoch(&epoch_identifier).cloned() { + Some(epoch) => epoch, + None => { + warn!( + target: LOG_TARGET, + "Unexpected missing epoch data for {:?}", epoch_identifier + ); + continue + }, + }; + + // Get the best block on which we will publish the tickets. + let best_hash = match select_chain.best_chain().await { + Ok(header) => header.hash(), + Err(err) => { + error!(target: LOG_TARGET, "Error fetching best chain block id: {}", err); + continue + }, + }; + + let ring_ctx = match client.runtime_api().ring_context(best_hash) { + Ok(Some(ctx)) => ctx, + Ok(None) => { + info!(target: LOG_TARGET, "Ring context not initialized yet"); + continue + }, + Err(err) => { + error!(target: LOG_TARGET, "Unable to read ring context: {}", err); + continue + }, + }; + + let tickets = generate_epoch_tickets(&mut epoch, &keystore, &ring_ctx); + if tickets.is_empty() { + continue + } + + // Register the offchain tx pool to be able to use it from the runtime. + let mut runtime_api = client.runtime_api(); + runtime_api + .register_extension(offchain_tx_pool_factory.offchain_transaction_pool(best_hash)); + + let err = match runtime_api.submit_tickets_unsigned_extrinsic(best_hash, tickets) { + Err(err) => Some(err.to_string()), + Ok(false) => Some("Unknown reason".to_string()), + _ => None, + }; + + match err { + None => { + // Cache tickets secret in the epoch changes tree + // TODO @davxy: use the keystre + epoch_changes + .shared_data() + .epoch_mut(&epoch_identifier) + .map(|target_epoch| target_epoch.tickets_aux = epoch.tickets_aux); + }, + Some(err) => { + error!(target: LOG_TARGET, "Unable to submit tickets: {}", err); + }, + } + } +} + +/// Worker for Sassafras which implements `Future`. This must be polled. +pub struct SassafrasWorker { + inner: Pin + Send + 'static>>, + slot_notification_sinks: SlotNotificationSinks, +} + +impl SassafrasWorker { + /// Return an event stream of notifications for when new slot happens, and the corresponding + /// epoch descriptor. + pub fn slot_notification_stream( + &self, + ) -> Receiver<(Slot, ViableEpochDescriptor, Epoch>)> { + const CHANNEL_BUFFER_SIZE: usize = 1024; + + let (sink, stream) = channel(CHANNEL_BUFFER_SIZE); + self.slot_notification_sinks.lock().push(sink); + stream + } +} + +impl Future for SassafrasWorker { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + self.inner.as_mut().poll(cx) + } +} + +/// Slot notification sinks. +type SlotNotificationSinks = Arc< + Mutex::Hash, NumberFor, Epoch>)>>>, +>; + +/// Parameters for Sassafras. +pub struct SassafrasWorkerParams { + /// The client to use + pub client: Arc, + /// The keystore that manages the keys of the node. + pub keystore: KeystorePtr, + /// The chain selection strategy + pub select_chain: SC, + /// The environment we are producing blocks for. + pub env: EN, + /// The underlying block-import object to supply our produced blocks to. + /// This must be a `SassafrasBlockImport` or a wrapper of it, otherwise + /// critical consensus logic will be omitted. + pub block_import: I, + /// A sync oracle + pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// Force authoring of blocks even if we are offline. + pub force_authoring: bool, + /// State shared between import queue and authoring worker. + pub sassafras_link: SassafrasLink, + /// The offchain transaction pool factory used for tickets submission. + pub offchain_tx_pool_factory: OffchainTransactionPoolFactory, +} + +/// Start the Sassafras worker. +pub fn start_sassafras( + SassafrasWorkerParams { + client, + keystore, + select_chain, + env, + block_import, + sync_oracle, + justification_sync_link, + create_inherent_data_providers, + force_authoring, + sassafras_link, + offchain_tx_pool_factory, + }: SassafrasWorkerParams, +) -> Result, ConsensusError> +where + B: BlockT, + C: ProvideRuntimeApi + + ProvideUncles + + BlockchainEvents + + HeaderBackend + + HeaderMetadata + + Send + + Sync + + 'static, + C::Api: SassafrasApi, + SC: SelectChain + 'static, + EN: Environment + Send + Sync + 'static, + EN::Proposer: Proposer, + I: BlockImport + Send + Sync + 'static, + SO: SyncOracle + Send + Sync + Clone + 'static, + L: sc_consensus::JustificationSyncLink + 'static, + CIDP: CreateInherentDataProviders + Send + Sync + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send, + ER: std::error::Error + Send + From + From + 'static, +{ + info!(target: LOG_TARGET, "🍁 Starting authorship worker"); + + let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); + + let slot_worker = SlotWorker { + client: client.clone(), + block_import, + env, + sync_oracle: sync_oracle.clone(), + justification_sync_link, + force_authoring, + keystore: keystore.clone(), + epoch_changes: sassafras_link.epoch_changes.clone(), + slot_notification_sinks: slot_notification_sinks.clone(), + genesis_config: sassafras_link.genesis_config.clone(), + }; + + let slot_worker = sc_consensus_slots::start_slot_worker( + // TODO FIXME @davxy slot duration should be passed as part of the worker params + //sassafras_link.genesis_config.slot_duration, + sp_consensus_slots::SlotDuration::from_millis(3000), + select_chain.clone(), + sc_consensus_slots::SimpleSlotWorkerToSlotWorker(slot_worker), + sync_oracle, + create_inherent_data_providers, + ); + + let tickets_worker = start_tickets_worker( + client.clone(), + keystore, + sassafras_link.epoch_changes.clone(), + select_chain, + offchain_tx_pool_factory, + ); + + let inner = future::select(Box::pin(slot_worker), Box::pin(tickets_worker)); + + Ok(SassafrasWorker { inner: Box::pin(inner.map(|_| ())), slot_notification_sinks }) +} diff --git a/substrate/client/consensus/sassafras/src/aux_schema.rs b/substrate/client/consensus/sassafras/src/aux_schema.rs new file mode 100644 index 000000000000..8353f34aa60a --- /dev/null +++ b/substrate/client/consensus/sassafras/src/aux_schema.rs @@ -0,0 +1,172 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Schema for auxiliary data persistence. +//! +//! TODO @davxy : RENAME FROM aux_schema.rs => aux_data.rs + +use std::{collections::HashSet, sync::Arc}; + +use scale_codec::{Decode, Encode}; + +use sc_client_api::backend::AuxStore; +use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges}; + +use sc_client_api::{blockchain::Backend as _, Backend as BackendT}; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult}; +use sp_consensus_sassafras::SassafrasBlockWeight; +use sp_runtime::traits::{Block as BlockT, NumberFor, SaturatedConversion, Zero}; + +use crate::Epoch; + +const SASSAFRAS_EPOCH_CHANGES_KEY: &[u8] = b"sassafras_epoch_changes"; + +/// The aux storage key used to store the block weight of the given block hash. +fn block_weight_key(block_hash: H) -> Vec { + (b"sassafras_block_weight", block_hash).encode() +} + +fn load_decode(backend: &B, key: &[u8]) -> ClientResult> +where + B: AuxStore, + T: Decode, +{ + match backend.get_aux(key)? { + Some(t) => T::decode(&mut &t[..]).map(Some).map_err(|e| { + ClientError::Backend(format!("Sassafras db is corrupted, Decode error: {}", e)) + }), + None => Ok(None), + } +} + +/// Update the epoch changes to persist after a change. +pub fn write_epoch_changes( + epoch_changes: &EpochChangesFor, + write_aux: F, +) -> R +where + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, +{ + epoch_changes.using_encoded(|s| write_aux(&[(SASSAFRAS_EPOCH_CHANGES_KEY, s)])) +} + +/// Load or initialize persistent epoch change data from backend. +pub fn load_epoch_changes( + backend: &AS, +) -> ClientResult> { + let maybe_epoch_changes = + load_decode::<_, EpochChangesFor>(backend, SASSAFRAS_EPOCH_CHANGES_KEY)?; + + let epoch_changes = SharedEpochChanges::::new( + maybe_epoch_changes.unwrap_or_else(|| EpochChangesFor::::default()), + ); + + // Rebalance the tree after deserialization. this isn't strictly necessary + // since the tree is now rebalanced on every update operation. but since the + // tree wasn't rebalanced initially it's useful to temporarily leave it here + // to avoid having to wait until an import for rebalancing. + epoch_changes.shared_data().rebalance(); + + Ok(epoch_changes) +} + +/// Write the cumulative chain-weight of a block ot aux storage. +pub fn write_block_weight( + block_hash: H, + block_weight: SassafrasBlockWeight, + write_aux: F, +) -> R +where + F: FnOnce(&[(Vec, &[u8])]) -> R, +{ + let key = block_weight_key(block_hash); + block_weight.using_encoded(|s| write_aux(&[(key, s)])) +} + +/// Load the cumulative chain-weight associated with a block. +pub fn load_block_weight( + backend: &B, + block_hash: H, +) -> ClientResult> { + load_decode(backend, block_weight_key(block_hash).as_slice()) +} + +/// Reverts protocol aux data from the best block to at most the last finalized block. +/// +/// Epoch-changes and block weights announced after the revert point are removed. +pub fn revert(backend: Arc, blocks: NumberFor) -> ClientResult<()> +where + Block: BlockT, + Backend: BackendT, +{ + let blockchain = backend.blockchain(); + let best_number = blockchain.info().best_number; + let finalized = blockchain.info().finalized_number; + + let revertible = blocks.min(best_number - finalized); + if revertible == Zero::zero() { + return Ok(()) + } + + let revert_up_to_number = best_number - revertible; + let revert_up_to_hash = blockchain.hash(revert_up_to_number)?.ok_or(ClientError::Backend( + format!("Unexpected hash lookup failure for block number: {}", revert_up_to_number), + ))?; + + // Revert epoch changes tree. + + // This config is only used on-genesis. + let epoch_changes = load_epoch_changes::(&*backend)?; + let mut epoch_changes = epoch_changes.shared_data(); + + if revert_up_to_number == Zero::zero() { + // Special case, no epoch changes data were present on genesis. + *epoch_changes = EpochChangesFor::::new(); + } else { + let descendent_query = sc_consensus_epochs::descendent_query(blockchain); + epoch_changes.revert(descendent_query, revert_up_to_hash, revert_up_to_number); + } + + // Remove block weights added after the revert point. + + let mut weight_keys = HashSet::with_capacity(revertible.saturated_into()); + + let leaves = backend.blockchain().leaves()?.into_iter().filter(|&leaf| { + sp_blockchain::tree_route(blockchain, revert_up_to_hash, leaf) + .map(|route| route.retracted().is_empty()) + .unwrap_or_default() + }); + + for mut hash in leaves { + loop { + let meta = blockchain.header_metadata(hash)?; + if meta.number <= revert_up_to_number || !weight_keys.insert(block_weight_key(hash)) { + // We've reached the revert point or an already processed branch, stop here. + break + } + hash = meta.parent; + } + } + + let weight_keys: Vec<_> = weight_keys.iter().map(|val| val.as_slice()).collect(); + + // Write epoch changes and remove weights in one shot. + write_epoch_changes::(&epoch_changes, |values| { + AuxStore::insert_aux(&*backend, values, weight_keys.iter()) + }) +} diff --git a/substrate/client/consensus/sassafras/src/block_import.rs b/substrate/client/consensus/sassafras/src/block_import.rs new file mode 100644 index 000000000000..7524ce0020f3 --- /dev/null +++ b/substrate/client/consensus/sassafras/src/block_import.rs @@ -0,0 +1,530 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Types and functions related to block import. + +use super::*; +use sc_client_api::{AuxDataOperations, FinalityNotification, PreCommitActions}; +use sp_blockchain::BlockStatus; + +/// Block-import handler for Sassafras. +/// +/// This scans each imported block for epoch change announcements. The announcements are +/// tracked in a tree (of all forks), and the import logic validates all epoch change +/// transitions, i.e. whether a given epoch change is expected or whether it is missing. +/// +/// The epoch change tree should be pruned as blocks are finalized. +pub struct SassafrasBlockImport { + inner: I, + client: Arc, + epoch_changes: SharedEpochChanges, + genesis_config: Epoch, +} + +impl Clone for SassafrasBlockImport { + fn clone(&self) -> Self { + SassafrasBlockImport { + inner: self.inner.clone(), + client: self.client.clone(), + epoch_changes: self.epoch_changes.clone(), + genesis_config: self.genesis_config.clone(), + } + } +} + +fn aux_storage_cleanup( + _client: &C, + _notification: &FinalityNotification, +) -> AuxDataOperations +where + B: BlockT, + C: HeaderMetadata + HeaderBackend, +{ + // TODO @davxy + Default::default() +} + +impl SassafrasBlockImport +where + C: AuxStore + + HeaderBackend + + HeaderMetadata + + PreCommitActions + + 'static, +{ + /// Constructor. + pub fn new( + inner: I, + client: Arc, + epoch_changes: SharedEpochChanges, + genesis_config: Epoch, + ) -> Self { + let client_weak = Arc::downgrade(&client); + let on_finality = move |notification: &FinalityNotification| { + if let Some(client) = client_weak.upgrade() { + aux_storage_cleanup(client.as_ref(), notification) + } else { + Default::default() + } + }; + client.register_finality_action(Box::new(on_finality)); + + SassafrasBlockImport { inner, client, epoch_changes, genesis_config } + } +} + +struct RecoverableEpochChanges { + old_epoch_changes: EpochChangesFor, + weak_lock: sc_consensus::shared_data::SharedDataLockedUpgradable>, +} + +impl RecoverableEpochChanges { + fn rollback(mut self) { + *self.weak_lock.upgrade() = self.old_epoch_changes; + } +} + +impl SassafrasBlockImport +where + C: AuxStore + HeaderBackend + HeaderMetadata, +{ + // The fork choice rule is that we pick the heaviest chain (i.e. more blocks built + // using primary mechanism), if there's a tie we go with the longest chain. + fn is_new_best( + &self, + curr_weight: u32, + curr_number: NumberFor, + parent_hash: B::Hash, + ) -> Result { + let info = self.client.info(); + + let new_best = if info.best_hash == parent_hash { + true + } else { + let best_weight = aux_schema::load_block_weight(&*self.client, &info.best_hash) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup("No block weight for best header.".into()) + })?; + curr_weight > best_weight || + (curr_weight == best_weight && curr_number > info.best_number) + }; + + Ok(new_best) + } + + fn import_epoch( + &mut self, + viable_epoch_desc: ViableEpochDescriptor, Epoch>, + next_epoch_desc: NextEpochDescriptor, + slot: Slot, + number: NumberFor, + hash: B::Hash, + parent_hash: B::Hash, + verbose: bool, + auxiliary: &mut Vec<(Vec, Option>)>, + ) -> Result, ConsensusError> { + let mut epoch_changes = self.epoch_changes.shared_data_locked(); + + let log_level = if verbose { log::Level::Debug } else { log::Level::Info }; + + let mut viable_epoch = epoch_changes + .viable_epoch(&viable_epoch_desc, |slot| Epoch::genesis(&self.genesis_config, slot)) + .ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })? + .into_cloned(); + + if viable_epoch.as_ref().end_slot() <= slot { + // Some epochs must have been skipped as our current slot fits outside the + // current epoch. We will figure out which is the first skipped epoch and we + // will partially re-use its data for this "recovery" epoch. + let epoch_data = viable_epoch.as_mut(); + let skipped_epochs = (*slot - *epoch_data.start) / epoch_data.length as u64; + let original_epoch_idx = epoch_data.index; + + // NOTE: notice that we are only updating a local copy of the `Epoch`, this + // makes it so that when we insert the next epoch into `EpochChanges` below + // (after incrementing it), it will use the correct epoch index and start slot. + // We do not update the original epoch that may be reused because there may be + // some other forks where the epoch isn't skipped. + // Not updating the original epoch works because when we search the tree for + // which epoch to use for a given slot, we will search in-depth with the + // predicate `epoch.start_slot <= slot` which will still match correctly without + // requiring to update `start_slot` to the correct value. + epoch_data.index += skipped_epochs; + epoch_data.start = + Slot::from(*epoch_data.start + skipped_epochs * epoch_data.length as u64); + warn!( + target: LOG_TARGET, + "Epoch(s) skipped from {} to {}", + original_epoch_idx, + epoch_data.index + ); + } + + log!( + target: LOG_TARGET, + log_level, + "New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().index, + hash, + slot, + viable_epoch.as_ref().start, + ); + + let next_epoch = viable_epoch.increment(next_epoch_desc); + + log!( + target: LOG_TARGET, + log_level, + "Next epoch starts at slot {}", + next_epoch.as_ref().start, + ); + + let old_epoch_changes = (*epoch_changes).clone(); + + // Prune the tree of epochs not part of the finalized chain or + // that are not live anymore, and then track the given epoch change + // in the tree. + // NOTE: it is important that these operations are done in this + // order, otherwise if pruning after import the `is_descendent_of` + // used by pruning may not know about the block that is being + // imported. + let prune_and_import = || { + prune_finalized(self.client.clone(), &mut epoch_changes)?; + + epoch_changes + .import(descendent_query(&*self.client), hash, number, parent_hash, next_epoch) + .map_err(|e| { + ConsensusError::ClientImport(format!("Error importing epoch changes: {}", e)) + })?; + + Ok(()) + }; + + if let Err(e) = prune_and_import() { + warn!(target: LOG_TARGET, "Failed to launch next epoch: {}", e); + *epoch_changes = old_epoch_changes; + return Err(e) + } + + aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + auxiliary.extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + Ok(RecoverableEpochChanges { old_epoch_changes, weak_lock: epoch_changes.release_mutex() }) + } +} + +impl SassafrasBlockImport +where + Block: BlockT, + Inner: BlockImport + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + Send + + Sync, + Client::Api: SassafrasApi + ApiExt, +{ + /// Import whole state after a warp sync. + /// + /// This function makes multiple transactions to the DB. If one of them fails we may + /// end up in an inconsistent state and have to resync + async fn import_state( + &mut self, + mut block: BlockImportParams, + ) -> Result { + let hash = block.post_hash(); + let parent_hash = *block.header.parent_hash(); + let number = *block.header.number(); + + // Check for the unit tag. + block.remove_intermediate::<()>(INTERMEDIATE_KEY)?; + + // Import as best + block.fork_choice = Some(ForkChoiceStrategy::Custom(true)); + + // Reset block weight + aux_schema::write_block_weight(hash, 0, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + // First make the client import the state + let aux = match self.inner.import_block(block).await { + Ok(ImportResult::Imported(aux)) => aux, + Ok(r) => + return Err(ConsensusError::ClientImport(format!( + "Unexpected import result: {:?}", + r + ))), + Err(e) => return Err(e.into()), + }; + + // Read epoch info from the imported state + let curr_epoch = self.client.runtime_api().current_epoch(hash).map_err(|e| { + ConsensusError::ClientImport(sassafras_err::(Error::RuntimeApi(e)).into()) + })?; + let next_epoch = self.client.runtime_api().next_epoch(hash).map_err(|e| { + ConsensusError::ClientImport(sassafras_err::(Error::RuntimeApi(e)).into()) + })?; + + let mut epoch_changes = self.epoch_changes.shared_data(); + epoch_changes.reset(parent_hash, hash, number, curr_epoch.into(), next_epoch.into()); + + aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + self.client.insert_aux(insert, []) + }) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + Ok(ImportResult::Imported(aux)) + } +} + +#[async_trait::async_trait] +impl BlockImport for SassafrasBlockImport +where + Block: BlockT, + Inner: BlockImport + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + Send + + Sync, + Client::Api: SassafrasApi + ApiExt, +{ + type Error = ConsensusError; + + async fn import_block( + &mut self, + mut block: BlockImportParams, + ) -> Result { + let hash = block.post_hash(); + let number = *block.header.number(); + let info = self.client.info(); + + let block_status = self + .client + .status(hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + // Skip protocol-specific logic if block already on-chain or importing blocks + // during initial sync, otherwise the check for epoch changes will error + // because trying to re-import an epoch change entry or because of missing epoch + // data in the tree, respectivelly. + if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || + block_status == BlockStatus::InChain + { + // When re-importing existing block strip away intermediates. + // In case of initial sync intermediates should not be present... + let _ = block.remove_intermediate::>(INTERMEDIATE_KEY); + block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); + return self.inner.import_block(block).await.map_err(Into::into) + } + + if block.with_state() { + return self.import_state(block).await + } + + let viable_epoch_desc = block + .remove_intermediate::>(INTERMEDIATE_KEY)? + .epoch_descriptor; + + let claim = find_slot_claim::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.into()))?; + let slot = claim.slot; + + let parent_hash = *block.header.parent_hash(); + let parent_header = self + .client + .header(parent_hash) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup( + sassafras_err(Error::::ParentUnavailable(parent_hash, hash)).into(), + ) + })?; + let parent_slot = find_slot_claim::(&parent_header) + .map(|claim| claim.slot) + .map_err(|e| ConsensusError::ClientImport(e.into()))?; + + // Make sure that slot number is strictly increasing + if slot <= parent_slot { + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::SlotMustIncrease(parent_slot, slot)).into(), + )) + } + + // Check if there's any epoch change expected to happen at this slot. + // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true + // if this is the first block in its chain for that epoch. + + let first_in_epoch = parent_slot < viable_epoch_desc.start_slot(); + + let next_epoch_digest = find_next_epoch_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + match (first_in_epoch, next_epoch_digest.is_some()) { + (true, false) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::ExpectedEpochChange(hash, slot)).into(), + )), + (false, true) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::UnexpectedEpochChange).into(), + )), + _ => (), + } + + // Compute the total weight of the chain, including the imported block. + + let parent_weight = aux_schema::load_block_weight(&*self.client, parent_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .or_else(|| (*parent_header.number() == Zero::zero()).then(|| 0)) + .ok_or_else(|| { + ConsensusError::ClientImport( + sassafras_err(Error::::ParentBlockNoAssociatedWeight(hash)).into(), + ) + })?; + + let total_weight = parent_weight + claim.ticket_claim.is_some() as u32; + + aux_schema::write_block_weight(hash, total_weight, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + // If there's a pending epoch we'll try to update all the involved data while + // saving the previous epoch changes as well. In this way we can revert it if + // there's any error. + let epoch_changes_data = next_epoch_digest + .map(|next_epoch_desc| { + self.import_epoch( + viable_epoch_desc, + next_epoch_desc, + slot, + number, + hash, + parent_hash, + block.origin != BlockOrigin::NetworkInitialSync, + &mut block.auxiliary, + ) + }) + .transpose()?; + + // The fork choice rule is intentionally changed within the context of the + // epoch changes lock to avoid annoying race conditions on what is the current + // best block. That is, the best may be changed by the inner block import. + let is_new_best = self.is_new_best(total_weight, number, parent_hash)?; + block.fork_choice = Some(ForkChoiceStrategy::Custom(is_new_best)); + + let import_result = self.inner.import_block(block).await; + + // Revert to the original epoch changes in case there's an error + // importing the block + // TODO @davxy: shouldn't we check for Ok(Imported(_))? + if import_result.is_err() { + if let Some(data) = epoch_changes_data { + data.rollback(); + } + } + + import_result.map_err(Into::into) + } + + async fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + self.inner.check_block(block).await.map_err(Into::into) + } +} + +/// Gets the best finalized block and its slot, and prunes the given epoch tree. +fn prune_finalized( + client: Arc, + epoch_changes: &mut EpochChangesFor, +) -> Result<(), ConsensusError> +where + B: BlockT, + C: HeaderBackend + HeaderMetadata, +{ + let info = client.info(); + + let finalized_slot = { + let finalized_header = client + .header(info.finalized_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .expect("finalized headers must exist in storage; qed"); + + find_slot_claim::(&finalized_header) + .expect("valid block header have a slot-claim; qed") + .slot + }; + + epoch_changes + .prune_finalized( + descendent_query(&*client), + &info.finalized_hash, + info.finalized_number, + finalized_slot, + ) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + Ok(()) +} + +/// Produce a Sassafras block-import object to be used later on in the construction of +/// an import-queue. +/// +/// Also returns a link object used to correctly instantiate the import queue +/// and authoring worker. +pub fn block_import( + genesis_config: Epoch, + inner_block_import: I, + client: Arc, +) -> ClientResult<(SassafrasBlockImport, SassafrasLink)> +where + C: AuxStore + + HeaderBackend + + HeaderMetadata + + PreCommitActions + + 'static, +{ + let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; + + prune_finalized(client.clone(), &mut epoch_changes.shared_data())?; + + let link = SassafrasLink { + epoch_changes: epoch_changes.clone(), + genesis_config: genesis_config.clone(), + }; + + let block_import = + SassafrasBlockImport::new(inner_block_import, client, epoch_changes, genesis_config); + + Ok((block_import, link)) +} diff --git a/substrate/client/consensus/sassafras/src/inherents.rs b/substrate/client/consensus/sassafras/src/inherents.rs new file mode 100644 index 000000000000..bfcf7bfbc6a1 --- /dev/null +++ b/substrate/client/consensus/sassafras/src/inherents.rs @@ -0,0 +1,86 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Sassafras inherents structures and helpers. + +use sp_inherents::{Error, InherentData, InherentIdentifier}; +use std::ops::Deref; + +/// Inherent identifier. +pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"sassslot"; + +/// The type of inherent. +pub type InherentType = sp_consensus_slots::Slot; + +/// Auxiliary trait to extract inherent data. +pub trait SassafrasInherentData { + /// Get inherent data. + fn sassafras_get_inherent_data(&self) -> Result, Error>; + /// Put inherent data. + fn sassafras_put_inherent_data(&mut self, data: &InherentType) -> Result<(), Error>; + /// Replace inherent data. + fn sassafras_replace_inherent_data(&mut self, data: &InherentType); +} + +impl SassafrasInherentData for InherentData { + fn sassafras_get_inherent_data(&self) -> Result, Error> { + self.get_data(&INHERENT_IDENTIFIER) + } + + fn sassafras_put_inherent_data(&mut self, data: &InherentType) -> Result<(), Error> { + self.put_data(INHERENT_IDENTIFIER, data) + } + + fn sassafras_replace_inherent_data(&mut self, data: &InherentType) { + self.replace_data(INHERENT_IDENTIFIER, data); + } +} + +// TODO: this can be shared and stored in sc-consensus-slots... +/// Provides the slot duration inherent data. +pub struct InherentDataProvider(InherentType); + +impl InherentDataProvider { + /// Create new inherent data provider from the given `slot`. + pub fn new(slot: InherentType) -> Self { + Self(slot) + } + + /// Creates the inherent data provider by calculating the slot from the given + /// `timestamp` and `duration`. + pub fn from_timestamp( + timestamp: sp_timestamp::Timestamp, + slot_duration: sp_consensus_slots::SlotDuration, + ) -> Self { + Self(InherentType::from_timestamp(timestamp, slot_duration)) + } +} + +impl Deref for InherentDataProvider { + type Target = InherentType; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { + async fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + inherent_data.sassafras_put_inherent_data(&self.0) + } +} diff --git a/substrate/client/consensus/sassafras/src/lib.rs b/substrate/client/consensus/sassafras/src/lib.rs new file mode 100644 index 000000000000..d20e4f3ad3ae --- /dev/null +++ b/substrate/client/consensus/sassafras/src/lib.rs @@ -0,0 +1,400 @@ +// This file is part of Substrate. + +// This file is part of SubstrateNonepyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! # Sassafras +//! +//! TODO @davxy: docs + +// TODO @davxy: remove this +//#![deny(warnings)] +#![forbid(unsafe_code, missing_docs)] + +use std::{ + collections::BTreeMap, + future::Future, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + channel::mpsc::{channel, Receiver, Sender}, + prelude::*, +}; +use log::{debug, error, info, log, trace, warn}; +use parking_lot::Mutex; +use prometheus_endpoint::Registry; +use scale_codec::{Decode, Encode}; + +use sc_client_api::{backend::AuxStore, BlockchainEvents, ProvideUncles, UsageProvider}; +use sc_consensus::{ + block_import::{ + BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, + StateAction, + }, + import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue}, + Verifier, +}; +use sc_consensus_epochs::{ + descendent_query, Epoch as EpochT, EpochChangesFor, EpochIdentifier, EpochIdentifierPosition, + SharedEpochChanges, ViableEpochDescriptor, +}; +use sc_consensus_slots::{CheckedHeader, InherentDataProviderExt, SlotInfo, StorageChanges}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_application_crypto::AppCrypto; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult}; +use sp_consensus::{ + BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, +}; +use sp_consensus_slots::Slot; +use sp_core::Pair; +use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider as _}; +use sp_keystore::KeystorePtr; +use sp_runtime::{ + generic::OpaqueDigestItemId, + traits::{Block as BlockT, Header, NumberFor, One, Zero}, + DigestItem, +}; + +// Re-export some primitives. +pub use sp_consensus_sassafras::{ + digests::{ConsensusLog, NextEpochDescriptor, SlotClaim}, + vrf, AuthorityId, AuthorityIndex, AuthorityPair, AuthoritySignature, EpochConfiguration, + SassafrasApi, TicketBody, TicketClaim, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, + SASSAFRAS_ENGINE_ID, +}; + +mod authorship; +mod aux_schema; +mod block_import; +mod inherents; +#[cfg(test)] +mod tests; +mod verification; + +// Export core components. +pub use authorship::{start_sassafras, SassafrasWorker, SassafrasWorkerParams}; +pub use aux_schema::revert; +pub use block_import::{block_import, SassafrasBlockImport}; +pub use inherents::{InherentDataProvider, InherentType}; +pub use verification::SassafrasVerifier; + +const LOG_TARGET: &str = "sassafras 🌳"; + +/// Intermediate key for Babe engine. +pub const INTERMEDIATE_KEY: &[u8] = b"sass1"; + +/// Errors encountered by the Sassafras routines. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Multiple slot claim digests + #[error("Multiple slot-claim digests")] + MultipleSlotClaimDigests, + /// Missing slot claim digest + #[error("No slot-claim digest found")] + MissingSlotClaimDigest, + /// Multiple epoch change digests + #[error("Multiple epoch change digests")] + MultipleEpochChangeDigests, + /// Could not fetch epoch + #[error("Could not fetch epoch at {0:?}")] + FetchEpoch(B::Hash), + /// Header rejected: too far in the future + #[error("Header {0:?} rejected: too far in the future")] + TooFarInFuture(B::Hash), + /// Parent unavailable. Cannot import + #[error("Parent ({0}) of {1} unavailable. Cannot import")] + ParentUnavailable(B::Hash, B::Hash), + /// Slot number must increase + #[error("Slot number must increase: parent slot: {0}, this slot: {1}")] + SlotMustIncrease(Slot, Slot), + /// Header has a bad seal + #[error("Header {0:?} has a bad seal")] + HeaderBadSeal(B::Hash), + /// Header is unsealed + #[error("Header {0:?} is unsealed")] + HeaderUnsealed(B::Hash), + /// Slot author not found + #[error("Slot author not found")] + SlotAuthorNotFound, + /// Bad signature + #[error("Bad signature on {0:?}")] + BadSignature(B::Hash), + /// VRF verification failed + #[error("VRF verification failed")] + VrfVerificationFailed, + /// Missing VRF output entry in the signature + #[error("Missing signed VRF output")] + MissingSignedVrfOutput, + /// Mismatch during verification of reveal public + #[error("Reveal public mismatch")] + RevealPublicMismatch, + /// Unexpected authoring mechanism + #[error("Unexpected authoring mechanism")] + UnexpectedAuthoringMechanism, + /// Could not fetch parent header + #[error("Could not fetch parent header: {0}")] + FetchParentHeader(sp_blockchain::Error), + /// Expected epoch change to happen. + #[error("Expected epoch change to happen at {0:?}, s{1}")] + ExpectedEpochChange(B::Hash, Slot), + /// Unexpected epoch change + #[error("Unexpected epoch change")] + UnexpectedEpochChange, + /// Parent block has no associated weight + #[error("Parent block of {0} has no associated weight")] + ParentBlockNoAssociatedWeight(B::Hash), + /// Check inherents error + #[error("Checking inherents failed: {0}")] + CheckInherents(sp_inherents::Error), + /// Unhandled check inherents error + #[error("Checking inherents unhandled error: {}", String::from_utf8_lossy(.0))] + CheckInherentsUnhandled(sp_inherents::InherentIdentifier), + /// Create inherents error. + #[error("Creating inherents failed: {0}")] + CreateInherents(sp_inherents::Error), + /// Client error + #[error(transparent)] + Client(sp_blockchain::Error), + /// Runtime Api error. + #[error(transparent)] + RuntimeApi(sp_api::ApiError), + /// Fork tree error + #[error(transparent)] + ForkTree(Box>), +} + +impl From> for String { + fn from(error: Error) -> String { + error.to_string() + } +} + +// Convenience function for error logging +fn sassafras_err(err: Error) -> Error { + error!(target: LOG_TARGET, "{}", err); + err +} + +/// Secret seed +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub struct TicketSecret { + /// Attempt index + pub(crate) attempt_idx: u32, + /// Secret seed + pub(crate) seed: [u8; 32], +} + +/// Primitive epoch newtype. +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub struct Epoch { + pub(crate) inner: sp_consensus_sassafras::Epoch, + pub(crate) tickets_aux: BTreeMap, +} + +use std::ops::{Deref, DerefMut}; + +impl Deref for Epoch { + type Target = sp_consensus_sassafras::Epoch; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for Epoch { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl From for Epoch { + fn from(epoch: sp_consensus_sassafras::Epoch) -> Self { + Epoch { inner: epoch, tickets_aux: Default::default() } + } +} + +impl EpochT for Epoch { + type NextEpochDescriptor = NextEpochDescriptor; + type Slot = Slot; + + fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { + sp_consensus_sassafras::Epoch { + index: self.index + 1, + start: self.start + self.length as u64, + length: self.length, + authorities: descriptor.authorities, + randomness: descriptor.randomness, + config: descriptor.config.unwrap_or(self.config), + } + .into() + } + + fn start_slot(&self) -> Slot { + self.start + } + + fn end_slot(&self) -> Slot { + self.start + self.length as u64 + } +} + +impl Epoch { + /// Create the genesis epoch (epoch #0). This is defined to start at the slot of + /// the first block, so that has to be provided. + pub fn genesis(config: &Epoch, slot: Slot) -> Epoch { + let mut epoch = config.clone(); + epoch.index = 0; + epoch.start = slot; + epoch + } +} + +/// Read protocol configuration from the blockchain state corresponding +/// to the last finalized block +pub fn finalized_configuration(client: &C) -> ClientResult +where + B: BlockT, + C: ProvideRuntimeApi + UsageProvider, + C::Api: SassafrasApi, +{ + let info = client.usage_info().chain; + let hash = info.finalized_state.map(|(hash, _)| hash).unwrap_or_else(|| { + debug!(target: LOG_TARGET, "Reading config from genesis"); + info.genesis_hash + }); + + let epoch = client.runtime_api().current_epoch(hash)?; + Ok(epoch.into()) +} + +/// Intermediate value passed to block importer from authoring or validation logic. +pub struct SassafrasIntermediate { + /// The epoch descriptor. + pub epoch_descriptor: ViableEpochDescriptor, Epoch>, +} + +/// Extract the Sassafras slot claim from the given header. +/// +/// Slot claim digest is mandatory, the function will return `Err` if none is found. +fn find_slot_claim(header: &B::Header) -> Result> { + if header.number().is_zero() { + // Genesis block doesn't contain a slot-claim so let's generate a + // dummy one jyst to not break any invariant in the rest of the code. + use sp_core::crypto::VrfSecret; + let pair = sp_consensus_sassafras::AuthorityPair::from_seed(&[0u8; 32]); + let data = vrf::slot_claim_sign_data(&Default::default(), 0.into(), 0); + return Ok(SlotClaim { + authority_idx: 0, + slot: 0.into(), + ticket_claim: None, + vrf_signature: pair.as_ref().vrf_sign(&data), + }) + } + + let mut claim: Option<_> = None; + for log in header.digest().logs() { + match (log.try_into(), claim.is_some()) { + (Ok(_), true) => return Err(sassafras_err(Error::MultipleSlotClaimDigests)), + (Err(_), _) => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), + (Ok(c), false) => claim = Some(c), + } + } + claim.ok_or_else(|| sassafras_err(Error::MissingSlotClaimDigest)) +} + +/// Extract the Sassafras epoch change digest from the given header, if it exists. +fn find_next_epoch_digest( + header: &B::Header, +) -> Result, Error> { + let mut epoch_digest: Option<_> = None; + for log in header.digest().logs() { + let log = log.try_to::(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)); + match (log, epoch_digest.is_some()) { + (Some(ConsensusLog::NextEpochData(_)), true) => + return Err(sassafras_err(Error::MultipleEpochChangeDigests)), + (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), + _ => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), + } + } + + Ok(epoch_digest) +} + +/// State that must be shared between the import queue and the authoring logic. +#[derive(Clone)] +pub struct SassafrasLink { + /// Epoch changes tree + epoch_changes: SharedEpochChanges, + /// Startup configuration. Read from runtime at last finalized block. + genesis_config: Epoch, +} + +impl SassafrasLink { + /// Get the config of this link. + pub fn genesis_config(&self) -> &Epoch { + &self.genesis_config + } +} + +/// Start an import queue for the Sassafras consensus algorithm. +/// +/// This method returns the import queue, some data that needs to be passed to the block authoring +/// logic (`SassafrasLink`), and a future that must be run to completion and is responsible for +/// listening to finality notifications and pruning the epoch changes tree. +/// +/// The block import object provided must be the `SassafrasBlockImport` or a wrapper of it, +/// otherwise crucial import logic will be omitted. +pub fn import_queue( + sassafras_link: SassafrasLink, + block_import: BI, + justification_import: Option>, + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + spawner: &impl sp_core::traits::SpawnEssentialNamed, + registry: Option<&Registry>, + telemetry: Option, +) -> ClientResult> +where + Client: ProvideRuntimeApi + + HeaderBackend + + HeaderMetadata + + AuxStore + + Send + + Sync + + 'static, + Client::Api: BlockBuilderApi + SassafrasApi + ApiExt, + BI: BlockImport + Send + Sync + 'static, + SelectChain: sp_consensus::SelectChain + 'static, + CIDP: CreateInherentDataProviders + Send + Sync + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, +{ + let verifier = SassafrasVerifier::new( + client, + select_chain, + create_inherent_data_providers, + sassafras_link.epoch_changes, + sassafras_link.genesis_config, + telemetry, + ); + + Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) +} diff --git a/substrate/client/consensus/sassafras/src/tests.rs b/substrate/client/consensus/sassafras/src/tests.rs new file mode 100644 index 000000000000..5f416f234fbe --- /dev/null +++ b/substrate/client/consensus/sassafras/src/tests.rs @@ -0,0 +1,937 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Sassafras client tests + +// TODO @davxy +// Missing tests +// - verify block claimed via primary method +// - tests using tickets to claim slots. Curret tests just doesn't register any on-chain ticket +use super::*; + +use futures::executor::block_on; +use std::sync::Arc; + +use sc_block_builder::BlockBuilderBuilder; +use sc_client_api::Finalizer; +use sc_consensus::{BlockImport, BoxJustificationImport}; +use sc_network_test::*; +use sc_transaction_pool_api::{OffchainTransactionPoolFactory, RejectAllTxPool}; +use sp_application_crypto::key_types::SASSAFRAS; +use sp_blockchain::Error as TestError; +use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; +use sp_consensus_sassafras::{EphemeralPublic, SlotDuration}; +use sp_core::crypto::UncheckedFrom; +use sp_keyring::BandersnatchKeyring as Keyring; +use sp_keystore::{testing::MemoryKeystore, Keystore}; +use sp_runtime::{Digest, DigestItem}; +use sp_timestamp::Timestamp; + +use substrate_test_runtime_client::{runtime::Block as TestBlock, Backend as TestBackend}; + +// Specialization of generic structures for test context. + +type TestHeader = ::Header; + +type TestClient = substrate_test_runtime_client::client::Client< + TestBackend, + substrate_test_runtime_client::ExecutorDispatch, + TestBlock, + substrate_test_runtime_client::runtime::RuntimeApi, +>; + +type TestSelectChain = + substrate_test_runtime_client::LongestChain; + +type TestBlockImportParams = BlockImportParams; + +type TestViableEpochDescriptor = sc_consensus_epochs::ViableEpochDescriptor; + +// Monomorphization of Sassafras structures for test context. + +type SassafrasIntermediate = crate::SassafrasIntermediate; + +type SassafrasBlockImport = crate::SassafrasBlockImport>; + +type SassafrasVerifier = crate::SassafrasVerifier< + TestBlock, + PeersFullClient, + TestSelectChain, + Box< + dyn CreateInherentDataProviders< + TestBlock, + (), + InherentDataProviders = (InherentDataProvider,), + >, + >, +>; + +type SassafrasLink = crate::SassafrasLink; + +// Epoch length in slots +const EPOCH_LENGTH: u32 = 6; +// Slot duration is milliseconds +const SLOT_DURATION: SlotDuration = SlotDuration::from_millis(1000_u64); + +struct TestProposer { + client: Arc, + parent_hash: Hash, +} + +impl TestProposer { + fn propose_block(self, digest: Digest) -> TestBlock { + block_on(self.propose(InherentData::default(), digest, Duration::default(), None)) + .expect("Proposing block") + .block + } +} + +impl Proposer for TestProposer { + type Error = TestError; + type Proposal = future::Ready, Self::Error>>; + type ProofRecording = DisableProofRecording; + type Proof = (); + + fn propose( + self, + _: InherentData, + inherent_digests: Digest, + _: Duration, + _: Option, + ) -> Self::Proposal { + let block_builder = BlockBuilderBuilder::new(&*self.client) + .on_parent_block(self.parent_hash) + .fetch_parent_block_number(&*self.client) + .unwrap() + .with_inherent_digests(inherent_digests) + .build() + .unwrap(); + + let block = match block_builder.build().map_err(|e| e.into()) { + Ok(b) => b.block, + Err(e) => return future::ready(Err(e)), + }; + + future::ready(Ok(Proposal { block, proof: (), storage_changes: Default::default() })) + } +} + +struct TestContext { + client: Arc, + backend: Arc, + link: SassafrasLink, + block_import: SassafrasBlockImport, + verifier: SassafrasVerifier, + keystore: KeystorePtr, +} + +fn create_test_verifier( + client: Arc, + link: &SassafrasLink, + config: Epoch, +) -> SassafrasVerifier { + let create_inherent_data_providers = Box::new(move |_, _| async move { + let slot = InherentDataProvider::from_timestamp(Timestamp::current(), SLOT_DURATION); + Ok((slot,)) + }); + + let (_, longest_chain) = TestClientBuilder::with_default_backend().build_with_longest_chain(); + + SassafrasVerifier::new( + client.clone(), + longest_chain, + create_inherent_data_providers, + link.epoch_changes.clone(), + config, + None, + ) +} + +fn create_test_block_import( + client: Arc, + config: Epoch, +) -> (SassafrasBlockImport, SassafrasLink) { + crate::block_import(config, client.clone(), client.clone()) + .expect("can initialize block-import") +} + +fn create_test_keystore(authority: Keyring) -> KeystorePtr { + let keystore = MemoryKeystore::new(); + keystore + .bandersnatch_generate_new(SASSAFRAS, Some(&authority.to_seed())) + .unwrap(); + keystore.into() +} + +fn create_test_epoch() -> Epoch { + sp_consensus_sassafras::Epoch { + index: 0, + start: 0.into(), + length: EPOCH_LENGTH, + randomness: [0; 32], + authorities: vec![ + Keyring::Alice.public().into(), + Keyring::Bob.public().into(), + Keyring::Charlie.public().into(), + ], + config: EpochConfiguration { redundancy_factor: 1, attempts_number: 32 }, + } + .into() +} + +impl TestContext { + fn new() -> Self { + let (client, backend) = TestClientBuilder::with_default_backend().build_with_backend(); + let client = Arc::new(client); + + // Note: configuration is loaded using the `TestClient` instance as the runtime-api + // provider. In practice this will use the values defined within the test runtime + // defined in the `substrate_test_runtime` crate. + let config = crate::finalized_configuration(&*client).expect("config available"); + + let (block_import, link) = create_test_block_import(client.clone(), config.clone()); + + // Create a keystore with default testing key + let keystore = create_test_keystore(Keyring::Alice); + + let verifier = create_test_verifier(client.clone(), &link, config.clone()); + + Self { client, backend, link, block_import, verifier, keystore } + } + + // This is a bit hacky solution to use `TestContext` as an `Environment` implementation + fn new_with_pre_built_components( + client: Arc, + backend: Arc, + link: SassafrasLink, + block_import: SassafrasBlockImport, + keystore: KeystorePtr, + ) -> Self { + let verifier = create_test_verifier(client.clone(), &link, link.genesis_config.clone()); + Self { client, backend, link, block_import, verifier, keystore } + } + + fn import_block(&mut self, mut params: TestBlockImportParams) -> Result { + let post_hash = params.post_hash(); + + if params.post_digests.is_empty() { + // Assume that the seal has not been removed yet. Remove it here... + // NOTE: digest may be empty because of some test intentionally clearing up + // the whole digest logs. + if let Some(seal) = params.header.digest_mut().pop() { + params.post_digests.push(seal); + } + } + + block_on(self.block_import.import_block(params)).map(|ir| match ir { + ImportResult::Imported(_) => post_hash, + _ => panic!("Unexpected outcome"), + }) + } + + fn verify_block(&mut self, params: TestBlockImportParams) -> TestBlockImportParams { + block_on(self.verifier.verify(params)).unwrap() + } + + fn epoch_data(&self, parent_hash: &Hash, parent_number: u64, slot: Slot) -> Epoch { + self.link + .epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_number, + slot, + |slot| Epoch::genesis(&self.link.genesis_config, slot), + ) + .unwrap() + .unwrap() + } + + fn epoch_descriptor( + &self, + parent_hash: &Hash, + parent_number: u64, + slot: Slot, + ) -> TestViableEpochDescriptor { + self.link + .epoch_changes + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_number, + slot, + ) + .unwrap() + .unwrap() + } + + // Propose a block + fn propose_block(&mut self, parent_hash: Hash, slot: Option) -> TestBlockImportParams { + let parent_header = self.client.header(parent_hash).unwrap().unwrap(); + let parent_number = *parent_header.number(); + + let public = self.keystore.bandersnatch_public_keys(SASSAFRAS)[0]; + + let proposer = block_on(self.init(&parent_header)).unwrap(); + + let slot = slot.unwrap_or_else(|| { + let parent_claim = find_slot_claim::(&parent_header).unwrap(); + parent_claim.slot + 1 + }); + + // TODO @davxy: maybe here we can use the epoch.randomness??? + let epoch = self.epoch_data(&parent_hash, parent_number, slot); + let sign_data = + vrf::slot_claim_sign_data(&self.link.genesis_config.randomness, slot, epoch.index); + let vrf_signature = self + .keystore + .bandersnatch_vrf_sign(SASSAFRAS, &public, &sign_data) + .unwrap() + .unwrap(); + + let claim = SlotClaim { slot, authority_idx: 0, vrf_signature, ticket_claim: None }; + let digest = sp_runtime::generic::Digest { logs: vec![DigestItem::from(&claim)] }; + + let mut block = proposer.propose_block(digest); + + let epoch_descriptor = self.epoch_descriptor(&parent_hash, parent_number, slot); + + // Sign the pre-sealed hash of the block and then add it to the digest. + let hash = block.header.hash(); + let signature: AuthoritySignature = self + .keystore + .bandersnatch_sign(SASSAFRAS, &public, hash.as_ref()) + .unwrap() + .unwrap() + .into(); + let seal = DigestItem::from(&signature); + block.header.digest_mut().push(seal); + + let mut params = BlockImportParams::new(BlockOrigin::Own, block.header); + params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + params.body = Some(block.extrinsics); + params.insert_intermediate(INTERMEDIATE_KEY, SassafrasIntermediate { epoch_descriptor }); + + params + } + + // Propose and import a new block on top of the given parent. + // This skips verification. + fn propose_and_import_block(&mut self, parent_hash: Hash, slot: Option) -> Hash { + let params = self.propose_block(parent_hash, slot); + self.import_block(params).unwrap() + } + + // Propose and import n valid blocks that are built on top of the given parent. + // The proposer takes care of producing epoch change digests according to the epoch + // duration (which is set by the test runtime). + fn propose_and_import_blocks(&mut self, mut parent_hash: Hash, n: usize) -> Vec { + let mut hashes = Vec::with_capacity(n); + for _ in 0..n { + let hash = self.propose_and_import_block(parent_hash, None); + hashes.push(hash); + parent_hash = hash; + } + hashes + } +} + +// Check that protocol config returned by the runtime interface is equal to the expected one +#[test] +fn tests_assumptions_sanity_check() { + let env = TestContext::new(); + assert_eq!(env.link.genesis_config, create_test_epoch()); + // Protocol needs at least two VRF ios + assert!(sp_core::bandersnatch::vrf::MAX_VRF_IOS >= 2); +} + +#[test] +fn claim_secondary_slots_works() { + let mut epoch = create_test_epoch(); + epoch.index = 1; + epoch.start = 6.into(); + epoch.randomness = [2; 32]; + + let authorities = [Keyring::Alice, Keyring::Bob, Keyring::Charlie]; + + let mut assignments = vec![usize::MAX; epoch.length as usize]; + + for (auth_idx, auth_id) in authorities.iter().enumerate() { + let keystore = create_test_keystore(*auth_id); + + for slot in 0..epoch.length as u64 { + if let Some((claim, auth_id2)) = + authorship::claim_slot(slot.into(), &mut epoch, None, &keystore) + { + assert_eq!(claim.authority_idx as usize, auth_idx); + assert_eq!(claim.slot, Slot::from(slot)); + assert_eq!(claim.ticket_claim, None); + assert_eq!(auth_id.public(), auth_id2.into()); + + // Check that this slot has not been assigned before + assert_eq!(assignments[slot as usize], usize::MAX); + assignments[slot as usize] = auth_idx; + } + } + } + // Check that every slot has been assigned + assert!(assignments.iter().all(|v| *v != usize::MAX)); + println!("secondary slots assignments: {:?}", assignments); +} + +#[test] +fn claim_primary_slots_works() { + // Here the test is deterministic. + // If a node has in its epoch `tickets_aux` the information corresponding to the + // ticket that is presented. Then the claim ticket should just return the + // ticket auxiliary information. + let mut epoch = create_test_epoch(); + epoch.randomness = [2; 32]; + epoch.index = 1; + epoch.start = 6.into(); + + let keystore = create_test_keystore(Keyring::Alice); + let alice_authority_idx = 0_u32; + + let ticket_id = 123; + let erased_public = EphemeralPublic::unchecked_from([0; 32]); + let revealed_public = erased_public; + let ticket_body = TicketBody { attempt_idx: 0, erased_public, revealed_public }; + let ticket_secret = TicketSecret { attempt_idx: 0, seed: [0; 32] }; + + // Fail if we have authority key in our keystore but not ticket aux data + // ticket-aux = None && authority-key = Some => claim = None + + let claim = authorship::claim_slot( + 0.into(), + &mut epoch, + Some((ticket_id, ticket_body.clone())), + &keystore, + ); + + assert!(claim.is_none()); + assert!(epoch.tickets_aux.is_empty()); + + // Success if we have ticket aux data and the authority key in our keystore + // ticket-aux = Some && authority-key = Some => claim = Some + + epoch + .tickets_aux + .insert(ticket_id, (alice_authority_idx, ticket_secret.clone())); + + let (claim, auth_id) = authorship::claim_slot( + 0.into(), + &mut epoch, + Some((ticket_id, ticket_body.clone())), + &keystore, + ) + .unwrap(); + + assert!(epoch.tickets_aux.is_empty()); + assert_eq!(claim.authority_idx, alice_authority_idx); + assert_eq!(auth_id, Keyring::Alice.public().into()); + + // Fail if we have ticket aux data but not the authority key in out keystore + // ticket-aux = Some && authority-key = None => claim = None + + epoch.tickets_aux.insert(ticket_id, (alice_authority_idx + 1, ticket_secret)); + + let claim = + authorship::claim_slot(0.into(), &mut epoch, Some((ticket_id, ticket_body)), &keystore); + assert!(claim.is_none()); + assert!(epoch.tickets_aux.is_empty()); +} + +#[test] +fn import_rejects_block_without_slot_claim() { + let mut env = TestContext::new(); + + let mut import_params = env.propose_block(env.client.info().genesis_hash, Some(999.into())); + // Remove logs from the header + import_params.header.digest_mut().logs.clear(); + + let res = env.import_block(import_params); + + assert_eq!(res.unwrap_err().to_string(), "Import failed: No slot-claim digest found"); +} + +#[test] +fn import_rejects_block_with_unexpected_epoch_changes() { + let mut env = TestContext::new(); + + let hash1 = env.propose_and_import_block(env.client.info().genesis_hash, None); + + let mut import_params = env.propose_block(hash1, None); + // Insert an epoch change announcement when it is not required. + let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { + authorities: env.link.genesis_config.authorities.clone(), + randomness: env.link.genesis_config.randomness, + config: None, + }) + .encode(); + let digest_item = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, digest_data); + let digest = import_params.header.digest_mut(); + digest.logs.insert(digest.logs.len() - 1, digest_item); + + let res = env.import_block(import_params); + + assert_eq!(res.unwrap_err().to_string(), "Import failed: Unexpected epoch change"); +} + +#[test] +fn import_rejects_block_with_missing_epoch_changes() { + let mut env = TestContext::new(); + + let blocks = + env.propose_and_import_blocks(env.client.info().genesis_hash, EPOCH_LENGTH as usize); + + let mut import_params = env.propose_block(blocks[EPOCH_LENGTH as usize - 1], None); + + let digest = import_params.header.digest_mut(); + // Remove the epoch change announcement. + // (Implementation detail: should be the second to last entry, just before the seal) + digest.logs.remove(digest.logs.len() - 2); + + let res = env.import_block(import_params); + + assert!(res + .unwrap_err() + .to_string() + .contains("Import failed: Expected epoch change to happen")); +} + +#[test] +fn importing_block_one_sets_genesis_epoch() { + let mut env = TestContext::new(); + + let block_hash = env.propose_and_import_block(env.client.info().genesis_hash, Some(999.into())); + + let epoch_for_second_block = env.epoch_data(&block_hash, 1, 1000.into()); + let genesis_epoch = Epoch::genesis(&env.link.genesis_config, 999.into()); + assert_eq!(epoch_for_second_block, genesis_epoch); +} + +#[test] +fn allows_to_skip_epochs() { + // Test scenario. + // Epoch lenght: 6 slots + // + // Block# : [ 1 2 3 4 5 6 ][ 7 - - - - - ][ - - - - - - ][ 8 ... ] + // Slot# : [ 1 2 3 4 5 6 ][ 7 8 9 10 11 12 ][ 13 14 15 16 17 18 ][ 19 ... ] + // Epoch# : [ 0 ][ 1 ][ skipped ][ 3 ] + // + // As a recovery strategy, a fallback epoch 3 is created by reusing part of the + // configuration created for epoch 2. + let mut env = TestContext::new(); + + let blocks = env.propose_and_import_blocks(env.client.info().genesis_hash, 7); + + // First block after the a skipped epoch (block #8 @ slot #19) + let block = env.propose_and_import_block(*blocks.last().unwrap(), Some(19.into())); + + let epoch_changes = env.link.epoch_changes.shared_data(); + let epochs: Vec<_> = epoch_changes.tree().iter().collect(); + assert_eq!(epochs.len(), 3); + assert_eq!(*epochs[0].0, blocks[0]); + assert_eq!(*epochs[0].1, 1); + assert_eq!(*epochs[1].0, blocks[6]); + assert_eq!(*epochs[1].1, 7); + assert_eq!(*epochs[2].0, block); + assert_eq!(*epochs[2].1, 8); + + // Fist block in E0 (B1)) announces E0 (this is special) + let data = epoch_changes + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Genesis0, + hash: blocks[0], + number: 1, + }) + .unwrap(); + assert_eq!(data.index, 0); + assert_eq!(data.start, Slot::from(1)); + + // First block in E0 (B1) also announces E1 + let data = epoch_changes + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Genesis1, + hash: blocks[0], + number: 1, + }) + .unwrap(); + assert_eq!(data.index, 1); + assert_eq!(data.start, Slot::from(7)); + + // First block in E1 (B7) announces E2 + // NOTE: config is used by E3 without altering epoch node values. + // This will break as soon as our assumptions about how fork-tree traversal works + // are not met anymore (this is a good thing) + let data = epoch_changes + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Regular, + hash: blocks[6], + number: 7, + }) + .unwrap(); + assert_eq!(data.index, 2); + assert_eq!(data.start, Slot::from(13)); + + // First block in E3 (B8) announced E4. + let data = epoch_changes + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Regular, + hash: block, + number: 8, + }) + .unwrap(); + assert_eq!(data.index, 4); + assert_eq!(data.start, Slot::from(25)); +} + +#[test] +fn finalization_prunes_epoch_changes_and_removes_weights() { + let mut env = TestContext::new(); + + let canon = env.propose_and_import_blocks(env.client.info().genesis_hash, 21); + + let _fork1 = env.propose_and_import_blocks(canon[0], 10); + let _fork2 = env.propose_and_import_blocks(canon[7], 10); + let _fork3 = env.propose_and_import_blocks(canon[11], 8); + + let epoch_changes = env.link.epoch_changes.clone(); + + // We should be tracking a total of 9 epochs in the fork tree + assert_eq!(epoch_changes.shared_data().tree().iter().count(), 8); + // And only one root + assert_eq!(epoch_changes.shared_data().tree().roots().count(), 1); + + // Pre-finalize scenario. + // + // X(#y): a block (number y) announcing the next epoch data. + // Information for epoch starting at block #19 is produced on three different forks + // at block #13. + // + // Finalize block #14 + // + // *---------------- F(#13) --#18 < fork #2 + // / + // A(#1) ---- B(#7) ----#8----------#12---- C(#13) ---- D(#19) ------#21 < canon + // \ \ + // \ *---- G(#13) ---- H(#19) ---#20 < fork #3 + // \ + // *-----E(#7)---#11 < fork #1 + + // Finalize block #10 so that on next epoch change the tree is pruned + env.client.finalize_block(canon[13], None, true).unwrap(); + let canon_tail = env.propose_and_import_blocks(*canon.last().unwrap(), 4); + + // Post-finalize scenario. + // + // B(#7)------ C(#13) ---- D(#19) ------Z(#25) + + let epoch_changes = epoch_changes.shared_data(); + let epoch_changes: Vec<_> = epoch_changes.tree().iter().map(|(h, _, _)| *h).collect(); + + assert_eq!(epoch_changes, vec![canon[6], canon[12], canon[18], canon_tail[3]]); + + // TODO @davxy + // todo!("Requires aux_storage_cleanup"); +} + +#[test] +fn revert_prunes_epoch_changes_and_removes_weights() { + let mut env = TestContext::new(); + + let canon = env.propose_and_import_blocks(env.client.info().genesis_hash, 21); + let fork1 = env.propose_and_import_blocks(canon[0], 10); + let fork2 = env.propose_and_import_blocks(canon[7], 10); + let fork3 = env.propose_and_import_blocks(canon[11], 8); + + let epoch_changes = env.link.epoch_changes.clone(); + + // We should be tracking a total of 9 epochs in the fork tree + assert_eq!(epoch_changes.shared_data().tree().iter().count(), 8); + // And only one root + assert_eq!(epoch_changes.shared_data().tree().roots().count(), 1); + + // Pre-revert scenario. + // + // X(#y): a block (number y) announcing the next epoch data. + // Information for epoch starting at block #19 is produced on three different forks + // at block #13. + // One branch starts before the revert point (epoch data should be maintained). + // One branch starts after the revert point (epoch data should be removed). + // + // *----------------- F(#13) --#18 < fork #2 + // / + // A(#1) ---- B(#7) ----#8----+-----#12----- C(#13) ---- D(#19) ------#21 < canon + // \ ^ \ + // \ revert *---- G(#13) ---- H(#19) ---#20 < fork #3 + // \ to #10 + // *-----E(#7)---#11 < fork #1 + + // Revert canon chain to block #10 (best(21) - 11) + crate::revert(env.backend.clone(), 11).unwrap(); + + // Post-revert expected scenario. + // + // + // *----------------- F(#13) --#18 + // / + // A(#1) ---- B(#7) ----#8----#10 + // \ + // *------ E(#7)---#11 + + // Load and check epoch changes. + + let actual_nodes = aux_schema::load_epoch_changes::(&*env.client) + .unwrap() + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| *h) + .collect::>(); + + let expected_nodes = vec![ + canon[0], // A + canon[6], // B + fork2[4], // F + fork1[5], // E + ]; + + assert_eq!(actual_nodes, expected_nodes); + + let weight_data_check = |hashes: &[Hash], expected: bool| { + hashes.iter().all(|hash| { + aux_schema::load_block_weight(&*env.client, hash).unwrap().is_some() == expected + }) + }; + assert!(weight_data_check(&canon[..10], true)); + assert!(weight_data_check(&canon[10..], false)); + assert!(weight_data_check(&fork1, true)); + assert!(weight_data_check(&fork2, true)); + assert!(weight_data_check(&fork3, false)); +} + +#[test] +fn revert_stops_at_last_finalized() { + let mut env = TestContext::new(); + + let canon = env.propose_and_import_blocks(env.client.info().genesis_hash, 3); + + // Finalize best block + env.client.finalize_block(canon[2], None, false).unwrap(); + + // Reverts canon chain down to last finalized block + crate::revert(env.backend.clone(), 100).expect("revert should work for baked test scenario"); + + let weight_data_check = |hashes: &[Hash], expected: bool| { + hashes.iter().all(|hash| { + aux_schema::load_block_weight(&*env.client, hash).unwrap().is_some() == expected + }) + }; + assert!(weight_data_check(&canon, true)); +} + +#[test] +fn verify_block_claimed_via_secondary_method() { + let mut env = TestContext::new(); + + let blocks = env.propose_and_import_blocks(env.client.info().genesis_hash, 7); + + let in_params = env.propose_block(blocks[6], Some(6.into())); + + let _out_params = env.verify_block(in_params); +} + +// //================================================================================================= +// // More complex tests involving communication between multiple nodes. +// // +// // These tests are performed via a specially crafted test network. +// // Closer to integration test than unit tests... +// //================================================================================================= + +impl Environment for TestContext { + type CreateProposer = future::Ready>; + type Proposer = TestProposer; + type Error = TestError; + + fn init(&mut self, parent_header: &TestHeader) -> Self::CreateProposer { + future::ready(Ok(TestProposer { + client: self.client.clone(), + parent_hash: parent_header.hash(), + })) + } +} + +struct PeerData { + link: SassafrasLink, + block_import: SassafrasBlockImport, +} + +type SassafrasPeer = Peer, SassafrasBlockImport>; + +#[derive(Default)] +struct SassafrasTestNet { + peers: Vec, +} + +impl TestNetFactory for SassafrasTestNet { + type BlockImport = SassafrasBlockImport; + type Verifier = SassafrasVerifier; + type PeerData = Option; + + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Option, + ) { + let client = client.as_client(); + + let config = crate::finalized_configuration(&*client).expect("config available"); + let (block_import, link) = create_test_block_import(client.clone(), config); + + (BlockImportAdapter::new(block_import.clone()), None, Some(PeerData { link, block_import })) + } + + fn make_verifier(&self, client: PeersClient, maybe_link: &Option) -> Self::Verifier { + let client = client.as_client(); + + let data = maybe_link.as_ref().expect("data provided to verifier instantiation"); + + let config = crate::finalized_configuration(&*client).expect("config available"); + create_test_verifier(client.clone(), &data.link, config) + } + + fn peer(&mut self, i: usize) -> &mut SassafrasPeer { + &mut self.peers[i] + } + + fn peers(&self) -> &Vec { + &self.peers + } + + fn peers_mut(&mut self) -> &mut Vec { + &mut self.peers + } + + fn mut_peers)>(&mut self, closure: F) { + closure(&mut self.peers); + } +} + +// Multiple nodes authoring and validating blocks +#[tokio::test] +async fn sassafras_network_progress() { + env_logger::init(); + let net = SassafrasTestNet::new(3); + let net = Arc::new(Mutex::new(net)); + + let peers = [Keyring::Alice, Keyring::Bob, Keyring::Charlie]; + + let mut import_notifications = Vec::new(); + let mut sassafras_workers = Vec::new(); + + for (peer_id, auth_id) in peers.iter().enumerate() { + let mut net = net.lock(); + let peer = net.peer(peer_id); + let client = peer.client().as_client(); + let backend = peer.client().as_backend(); + let select_chain = peer.select_chain().expect("Full client has select_chain"); + let keystore = create_test_keystore(*auth_id); + let data = peer.data.as_ref().expect("sassafras link set up during initialization"); + + let env = TestContext::new_with_pre_built_components( + client.clone(), + backend.clone(), + data.link.clone(), + data.block_import.clone(), + keystore.clone(), + ); + + // Run the imported block number is less than five and we don't receive a block produced + // by us and one produced by another peer. + let mut got_own = false; + let mut got_other = false; + let import_futures = client + .import_notification_stream() + .take_while(move |n| { + future::ready( + n.header.number() < &5 || { + if n.origin == BlockOrigin::Own { + got_own = true; + } else { + got_other = true; + } + !(got_own && got_other) + }, + ) + }) + .for_each(|_| future::ready(())); + import_notifications.push(import_futures); + + let client_clone = client.clone(); + let create_inherent_data_providers = Box::new(move |parent, _| { + // Get the slot of the parent header and just increase this slot. + // + // Below we will running everything in one big future. If we would use + // time based slot, it can happen that on babe instance imports a block from + // another babe instance and then tries to build a block in the same slot making + // this test fail. + let parent_header = client_clone.header(parent).ok().flatten().unwrap(); + let slot = Slot::from(find_slot_claim::(&parent_header).unwrap().slot + 1); + async move { Ok((InherentDataProvider::new(slot),)) } + }); + let sassafras_params = SassafrasWorkerParams { + client: client.clone(), + keystore, + select_chain, + env, + block_import: data.block_import.clone(), + sassafras_link: data.link.clone(), + sync_oracle: DummyOracle, + justification_sync_link: (), + force_authoring: false, + create_inherent_data_providers, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( + RejectAllTxPool::default(), + ), + }; + let sassafras_worker = start_sassafras(sassafras_params).unwrap(); + sassafras_workers.push(sassafras_worker); + } + + future::select( + futures::future::poll_fn(move |cx| { + let mut net = net.lock(); + net.poll(cx); + net.peers().iter().for_each(|peer| { + peer.failed_verifications().iter().next().map(|(h, e)| { + panic!("Verification failed for {:?}: {}", h, e); + }); + }); + Poll::<()>::Pending + }), + future::select(future::join_all(import_notifications), future::join_all(sassafras_workers)), + ) + .await; +} diff --git a/substrate/client/consensus/sassafras/src/verification.rs b/substrate/client/consensus/sassafras/src/verification.rs new file mode 100644 index 000000000000..ebb8473d5bb1 --- /dev/null +++ b/substrate/client/consensus/sassafras/src/verification.rs @@ -0,0 +1,469 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Types and functions related to block verification. + +use super::*; +use crate::inherents::SassafrasInherentData; +use sp_core::{ + crypto::{VrfPublic, Wraps}, + ed25519::Pair as EphemeralPair, +}; + +// Allowed slot drift. +const MAX_SLOT_DRIFT: u64 = 1; + +/// Verified information +struct VerifiedHeaderInfo { + /// Authority index. + authority_id: AuthorityId, + /// Seal digest found within the header. + seal_digest: DigestItem, +} + +/// Check a header has been signed by the right key. If the slot is too far in +/// the future, an error will be returned. If successful, returns the pre-header +/// and the digest item containing the seal. +/// +/// The seal must be the last digest. Otherwise, the whole header is considered +/// unsigned. This is required for security and must not be changed. +/// +/// The given header can either be from a primary or secondary slot assignment, +/// with each having different validation logic. +fn check_header( + mut header: B::Header, + claim: &SlotClaim, + slot_now: Slot, + epoch: &Epoch, + origin: BlockOrigin, + maybe_ticket: Option<(TicketId, TicketBody)>, +) -> Result, Error> { + // Check that the slot is not in the future, with some drift being allowed. + if claim.slot > slot_now + MAX_SLOT_DRIFT { + // header.digest_mut().push(seal); + return Ok(CheckedHeader::Deferred(header, claim.slot)) + } + + let Some(authority_id) = epoch.authorities.get(claim.authority_idx as usize) else { + return Err(sassafras_err(Error::SlotAuthorNotFound)) + }; + + // Check header signature (aka the Seal) + + let seal_digest = header + .digest_mut() + .pop() + .ok_or_else(|| sassafras_err(Error::HeaderUnsealed(header.hash())))?; + + let signature = AuthoritySignature::try_from(&seal_digest) + .map_err(|_| sassafras_err(Error::HeaderBadSeal(header.hash())))?; + + let pre_hash = header.hash(); + if !AuthorityPair::verify(&signature, &pre_hash, authority_id) { + return Err(sassafras_err(Error::BadSignature(pre_hash))) + } + + // Optionally check ticket ownership + + let mut sign_data = vrf::slot_claim_sign_data(&epoch.randomness, claim.slot, epoch.index); + + match (&maybe_ticket, &claim.ticket_claim) { + (Some((_ticket_id, ticket_body)), ticket_claim) => { + debug!(target: LOG_TARGET, "checking primary"); + + sign_data.push_transcript_data(&ticket_body.encode()); + + // Revealed key check + let revealed_input = + vrf::revealed_key_input(&epoch.randomness, ticket_body.attempt_idx, epoch.index); + let revealed_output = claim + .vrf_signature + .outputs + .get(1) + .ok_or_else(|| sassafras_err(Error::MissingSignedVrfOutput))?; + let revealed_seed = vrf::make_revealed_key_seed(&revealed_input, &revealed_output); + let revealed_public = EphemeralPair::from_seed(&revealed_seed).public(); + if revealed_public != ticket_body.revealed_public { + return Err(sassafras_err(Error::RevealPublicMismatch)) + } + sign_data.push_vrf_input(revealed_input).expect("Can't fail; qed"); + + if let Some(ticket_claim) = ticket_claim { + // Optional check, increases some score... + let challenge = sign_data.challenge::<32>(); + if !EphemeralPair::verify( + &ticket_claim.erased_signature, + &challenge, + &ticket_body.erased_public, + ) { + return Err(sassafras_err(Error::BadSignature(pre_hash))) + } + } + }, + (None, None) => { + debug!(target: LOG_TARGET, "checking secondary"); + let idx = authorship::secondary_authority_index(claim.slot, epoch); + if idx != claim.authority_idx { + error!( + target: LOG_TARGET, + "Bad secondary authority index (expected: {}, got {})", + idx, + claim.authority_idx + ); + return Err(Error::SlotAuthorNotFound) + } + }, + (None, Some(_)) => + if origin != BlockOrigin::NetworkInitialSync { + warn!(target: LOG_TARGET, "Unexpected primary authoring mechanism"); + return Err(Error::UnexpectedAuthoringMechanism) + }, + } + + // Check per-slot vrf proof + if !authority_id.as_inner_ref().vrf_verify(&sign_data, &claim.vrf_signature) { + warn!(target: LOG_TARGET, ">>> VERIFICATION FAILED (pri = {})!!!", maybe_ticket.is_some()); + return Err(sassafras_err(Error::VrfVerificationFailed)) + } + warn!(target: LOG_TARGET, ">>> VERIFICATION OK (pri = {})!!!", maybe_ticket.is_some()); + + let info = VerifiedHeaderInfo { authority_id: authority_id.clone(), seal_digest }; + + Ok(CheckedHeader::Checked(header, info)) +} + +/// A verifier for Sassafras blocks. +pub struct SassafrasVerifier { + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + epoch_changes: SharedEpochChanges, + genesis_config: Epoch, + telemetry: Option, +} + +impl SassafrasVerifier { + /// Constructor. + pub fn new( + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + epoch_changes: SharedEpochChanges, + genesis_config: Epoch, + telemetry: Option, + ) -> Self { + SassafrasVerifier { + client, + select_chain, + create_inherent_data_providers, + epoch_changes, + genesis_config, + telemetry, + } + } +} + +impl SassafrasVerifier +where + Block: BlockT, + Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, + Client::Api: BlockBuilderApi + SassafrasApi, + SelectChain: sp_consensus::SelectChain, + CIDP: CreateInherentDataProviders, +{ + async fn check_inherents( + &self, + block: Block, + at_hash: Block::Hash, + inherent_data: InherentData, + create_inherent_data_providers: CIDP::InherentDataProviders, + ) -> Result<(), Error> { + let inherent_res = self + .client + .runtime_api() + .check_inherents(at_hash, block, inherent_data) + .map_err(Error::RuntimeApi)?; + + if !inherent_res.ok() { + for (i, e) in inherent_res.into_errors() { + match create_inherent_data_providers.try_handle_error(&i, &e).await { + Some(res) => res.map_err(|e| Error::CheckInherents(e))?, + None => return Err(Error::CheckInherentsUnhandled(i)), + } + } + } + + Ok(()) + } + + async fn check_and_report_equivocation( + &self, + slot_now: Slot, + slot: Slot, + header: &Block::Header, + author: &AuthorityId, + origin: &BlockOrigin, + ) -> Result<(), Error> { + // Don't report any equivocations during initial sync as they are most likely stale. + if *origin == BlockOrigin::NetworkInitialSync { + return Ok(()) + } + + // Check if authorship of this header is an equivocation and return a proof if so. + let equivocation_proof = match sc_consensus_slots::check_equivocation( + &*self.client, + slot_now, + slot, + header, + author, + ) + .map_err(Error::Client)? + { + Some(proof) => proof, + None => return Ok(()), + }; + + info!( + target: LOG_TARGET, + "🌳 Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", + author, + slot, + equivocation_proof.first_header.hash(), + equivocation_proof.second_header.hash(), + ); + + // Get the best block on which we will build and send the equivocation report. + let best_hash = self + .select_chain + .best_chain() + .await + .map(|h| h.hash()) + .map_err(|e| Error::Client(e.into()))?; + + // Generate a key ownership proof. We start by trying to generate the key owernship proof + // at the parent of the equivocating header, this will make sure that proof generation is + // successful since it happens during the on-going session (i.e. session keys are available + // in the state to be able to generate the proof). This might fail if the equivocation + // happens on the first block of the session, in which case its parent would be on the + // previous session. If generation on the parent header fails we try with best block as + // well. + let generate_key_owner_proof = |at_hash: Block::Hash| { + self.client + .runtime_api() + .generate_key_ownership_proof(at_hash, equivocation_proof.offender.clone()) + .map_err(Error::RuntimeApi) + }; + + let parent_hash = *header.parent_hash(); + let key_owner_proof = match generate_key_owner_proof(parent_hash)? { + Some(proof) => proof, + None => match generate_key_owner_proof(best_hash)? { + Some(proof) => proof, + None => { + debug!(target: "babe", "Equivocation offender is not part of the authority set."); + return Ok(()) + }, + }, + }; + + // submit equivocation report at best block. + self.client + .runtime_api() + .submit_report_equivocation_unsigned_extrinsic( + best_hash, + equivocation_proof, + key_owner_proof, + ) + .map_err(Error::RuntimeApi)?; + + info!(target: LOG_TARGET, "Submitted equivocation report for author {:?}", author); + + Ok(()) + } +} + +#[async_trait::async_trait] +impl Verifier + for SassafrasVerifier +where + Block: BlockT, + Client: HeaderMetadata + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + AuxStore, + Client::Api: BlockBuilderApi + SassafrasApi, + SelectChain: sp_consensus::SelectChain, + CIDP: CreateInherentDataProviders + Send + Sync, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, +{ + async fn verify( + &mut self, + mut block: BlockImportParams, + ) -> Result, String> { + trace!( + target: LOG_TARGET, + "🌳 Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", + block.origin, + block.header, + block.justifications, + block.body, + ); + + if block.with_state() { + // When importing whole state we don't calculate epoch descriptor, but rather + // read it from the state after import. We also skip all verifications + // because there's no parent state and we trust the sync module to verify + // that the state is correct and finalized. + // Just insert a tag to notify that this is indeed a Sassafras block to the + // `BlockImport` implementation. + block.insert_intermediate(INTERMEDIATE_KEY, ()); + return Ok(block) + } + + let hash = block.header.hash(); + let parent_hash = *block.header.parent_hash(); + + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await + .map_err(|e| Error::::Client(sp_consensus::Error::from(e).into()))?; + + let slot_now = create_inherent_data_providers.slot(); + + let parent_header_metadata = self + .client + .header_metadata(parent_hash) + .map_err(Error::::FetchParentHeader)?; + + let claim = find_slot_claim::(&block.header)?; + + let (checked_header, epoch_descriptor) = { + let epoch_changes = self.epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + claim.slot, + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or(Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) + .ok_or(Error::::FetchEpoch(parent_hash))?; + + let maybe_ticket = self + .client + .runtime_api() + .slot_ticket(parent_hash, claim.slot) + .ok() + .unwrap_or_else(|| None); + + let checked_header = check_header::( + block.header.clone(), + &claim, + slot_now, + viable_epoch.as_ref(), + block.origin, + maybe_ticket, + )?; + + (checked_header, epoch_descriptor) + }; + + match checked_header { + CheckedHeader::Checked(pre_header, verified_info) => { + // The header is valid but let's check if there was something else already + // proposed at the same slot by the given author. If there was, we will + // report the equivocation to the runtime. + if let Err(err) = self + .check_and_report_equivocation( + slot_now, + claim.slot, + &block.header, + &verified_info.authority_id, + &block.origin, + ) + .await + { + warn!( + target: LOG_TARGET, + "Error checking/reporting equivocation: {}", err + ); + } + + // If the body is passed through, we need to use the runtime to check that the + // internally-set timestamp in the inherents actually matches the slot set in the + // seal. + if let Some(inner_body) = block.body { + let new_block = Block::new(pre_header.clone(), inner_body); + + if !block.state_action.skip_execution_checks() { + // TODO @davxy: some comments? + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() + .await + .map_err(Error::::CreateInherents)?; + inherent_data.sassafras_replace_inherent_data(&claim.slot); + self.check_inherents( + new_block.clone(), + parent_hash, + inherent_data, + create_inherent_data_providers, + ) + .await?; + } + + let (_, inner_body) = new_block.deconstruct(); + block.body = Some(inner_body); + } + + trace!(target: LOG_TARGET, "Checked {:?}; importing.", pre_header); + telemetry!( + self.telemetry; + CONSENSUS_TRACE; + "sassafras.checked_and_importing"; + "pre_header" => ?pre_header, + ); + + block.header = pre_header; + block.post_hash = Some(hash); + block.post_digests.push(verified_info.seal_digest); + block.insert_intermediate( + INTERMEDIATE_KEY, + SassafrasIntermediate:: { epoch_descriptor }, + ); + + Ok(block) + }, + CheckedHeader::Deferred(a, b) => { + debug!(target: LOG_TARGET, "Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "sassafras.header_too_far_in_future"; + "hash" => ?hash, "a" => ?a, "b" => ?b + ); + Err(Error::::TooFarInFuture(hash).into()) + }, + } + } +} diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs index 7cdf90877dff..d5d4449fa998 100644 --- a/substrate/client/consensus/slots/src/lib.rs +++ b/substrate/client/consensus/slots/src/lib.rs @@ -209,6 +209,8 @@ pub trait SimpleSlotWorker { ) .map_err(|e| sp_consensus::Error::ClientImport(e.to_string())); + debug!(target: log_target, ">>>>>>> PROPOSAL TIME {}", proposing_remaining_duration.as_millis()); + let proposal = match futures::future::select( proposing, Delay::new(proposing_remaining_duration), diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index 7eb2bda96ffc..ba5c0696a3a9 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -1,11 +1,11 @@ [package] name = "pallet-sassafras" version = "0.3.5-dev" -authors = ["Parity Technologies "] +authors.workspace = true edition.workspace = true license = "Apache-2.0" homepage.workspace = true -repository = "/~https://github.com/paritytech/substrate/" +repository.workspace = true description = "Consensus extension module for Sassafras consensus." readme = "README.md" publish = false @@ -23,13 +23,16 @@ frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } log = { workspace = true } +pallet-session = { workspace = true, optional = true } + sp-consensus-sassafras = { features = ["serde"], workspace = true } +sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } +env_logger = { workspace = true } sp-crypto-hashing = { workspace = true, default-features = true } [features] @@ -40,8 +43,10 @@ std = [ "frame-support/std", "frame-system/std", "log/std", + "pallet-session?/std", "scale-info/std", "sp-consensus-sassafras/std", + "sp-core/std", "sp-io/std", "sp-runtime/std", ] @@ -54,8 +59,10 @@ runtime-benchmarks = [ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "pallet-session?/try-runtime", "sp-runtime/try-runtime", ] # Construct dummy ring context on genesis. # Mostly used for testing and development. construct-dummy-ring-context = [] +session-pallet-support = ["pallet-session"] diff --git a/substrate/frame/sassafras/src/benchmarking.rs b/substrate/frame/sassafras/src/benchmarking.rs index 2b2467c6f84d..e247e9612971 100644 --- a/substrate/frame/sassafras/src/benchmarking.rs +++ b/substrate/frame/sassafras/src/benchmarking.rs @@ -18,7 +18,7 @@ //! Benchmarks for the Sassafras pallet. use crate::*; -use sp_consensus_sassafras::{vrf::VrfSignature, EphemeralPublic, EpochConfiguration}; +use sp_consensus_sassafras::vrf::VrfSignature; use frame_benchmarking::v2::*; use frame_support::traits::Hooks; @@ -26,12 +26,13 @@ use frame_system::RawOrigin; const LOG_TARGET: &str = "sassafras::benchmark"; -const TICKETS_DATA: &[u8] = include_bytes!("data/25_tickets_100_auths.bin"); +// Pre-constructed tickets generated via the `generate_test_teckets` function +const TICKETS_DATA: &[u8] = include_bytes!("data/tickets.bin"); -fn make_dummy_vrf_signature() -> VrfSignature { +fn dummy_vrf_signature() -> VrfSignature { // This leverages our knowledge about serialized vrf signature structure. // Mostly to avoid to import all the bandersnatch primitive just for this test. - let buf = [ + const RAW_VRF_SIGNATURE: [u8; 99] = [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -40,16 +41,13 @@ fn make_dummy_vrf_signature() -> VrfSignature { 0x18, 0xca, 0x07, 0x13, 0xc7, 0x4b, 0xa3, 0x9a, 0x97, 0xd3, 0x76, 0x8f, 0x0c, 0xbf, 0x2e, 0xd4, 0xf9, 0x3a, 0xae, 0xc1, 0x96, 0x2a, 0x64, 0x80, ]; - VrfSignature::decode(&mut &buf[..]).unwrap() + VrfSignature::decode(&mut &RAW_VRF_SIGNATURE[..]).unwrap() } #[benchmarks] mod benchmarks { use super::*; - // For first block (#1) we do some extra operation. - // But is a one shot operation, so we don't account for it here. - // We use 0, as it will be the path used by all the blocks with n != 1 #[benchmark] fn on_initialize() { let block_num = BlockNumberFor::::from(0u32); @@ -57,14 +55,10 @@ mod benchmarks { let slot_claim = SlotClaim { authority_idx: 0, slot: Default::default(), - vrf_signature: make_dummy_vrf_signature(), - ticket_claim: None, + vrf_signature: dummy_vrf_signature(), }; frame_system::Pallet::::deposit_log((&slot_claim).into()); - // We currently don't account for the potential weight added by the `on_finalize` - // incremental sorting of the tickets. - #[block] { // According to `Hooks` trait docs, `on_finalize` `Weight` should be bundled @@ -77,78 +71,44 @@ mod benchmarks { // Weight for the default internal epoch change trigger. // // Parameters: - // - `x`: number of authorities (1:100). - // - `y`: epoch length in slots (1000:5000) + // - `x`: number of authorities [1:100]. + // - `y`: number of tickets [100:1000]; // // This accounts for the worst case which includes: - // - load the full ring context. - // - recompute the ring verifier. - // - sorting the epoch tickets in one shot - // (here we account for the very unlucky scenario where we haven't done any sort work yet) - // - pending epoch change config. - // - // For this bench we assume a redundancy factor of 2 (suggested value to be used in prod). + // - recomputing the ring verifier key from a new authorites set. + // - picking all the tickets from the accumulator in one shot. #[benchmark] - fn enact_epoch_change(x: Linear<1, 100>, y: Linear<1000, 5000>) { + fn enact_epoch_change(x: Linear<1, 100>, y: Linear<100, 1000>) { let authorities_count = x as usize; - let epoch_length = y as u32; - let redundancy_factor = 2; + let accumulated_tickets = y as u32; - let unsorted_tickets_count = epoch_length * redundancy_factor; + let config = Pallet::::protocol_config(); - let mut meta = TicketsMetadata { unsorted_tickets_count, tickets_count: [0, 0] }; - let config = EpochConfiguration { redundancy_factor, attempts_number: 32 }; + // Makes the epoch change legit + let post_init_cache = EphemeralData { + prev_slot: Slot::from(config.epoch_duration as u64 - 1), + block_randomness: Randomness::default(), + }; + TemporaryData::::put(post_init_cache); + CurrentSlot::::set(Slot::from(config.epoch_duration as u64)); - // Triggers ring verifier computation for `x` authorities - let mut raw_data = TICKETS_DATA; - let (authorities, _): (Vec, Vec) = - Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); - let next_authorities: Vec<_> = authorities[..authorities_count].to_vec(); + // Force ring verifier key re-computation + let next_authorities: Vec<_> = + Authorities::::get().into_iter().cycle().take(authorities_count).collect(); let next_authorities = WeakBoundedVec::force_from(next_authorities, None); NextAuthorities::::set(next_authorities); - // Triggers JIT sorting tickets - (0..meta.unsorted_tickets_count) - .collect::>() - .chunks(SEGMENT_MAX_SIZE as usize) - .enumerate() - .for_each(|(segment_id, chunk)| { - let segment = chunk - .iter() - .map(|i| { - let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes()); - TicketId::from_le_bytes(id_bytes) - }) - .collect::>(); - UnsortedSegments::::insert( - segment_id as u32, - BoundedVec::truncate_from(segment), - ); - }); - - // Triggers some code related to config change (dummy values) - NextEpochConfig::::set(Some(config)); - PendingEpochConfigChange::::set(Some(config)); - - // Triggers the cleanup of the "just elapsed" epoch tickets (i.e. the current one) - let epoch_tag = EpochIndex::::get() & 1; - meta.tickets_count[epoch_tag as usize] = epoch_length; - (0..epoch_length).for_each(|i| { - let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes()); - let id = TicketId::from_le_bytes(id_bytes); - TicketsIds::::insert((epoch_tag as u8, i), id); - let body = TicketBody { - attempt_idx: i, - erased_public: EphemeralPublic::from([i as u8; 32]), - revealed_public: EphemeralPublic::from([i as u8; 32]), - }; - TicketsData::::set(id, Some(body)); + // Add tickets to the accumulator + (0..accumulated_tickets).for_each(|i| { + let mut id = TicketId([0xff; 32]); + id.0[..4].copy_from_slice(&i.to_be_bytes()[..]); + let body = TicketBody { id, attempt: 0, extra: Default::default() }; + TicketsAccumulator::::insert(TicketKey::from(id), &body); }); - TicketsMeta::::set(meta); - #[block] { + // Also account for the call typically done in case of epoch change Pallet::::should_end_epoch(BlockNumberFor::::from(3u32)); let next_authorities = Pallet::::next_authorities(); // Using a different set of authorities triggers the recomputation of ring verifier. @@ -157,55 +117,37 @@ mod benchmarks { } #[benchmark] - fn submit_tickets(x: Linear<1, 25>) { + fn submit_tickets(x: Linear<1, 16>) { let tickets_count = x as usize; let mut raw_data = TICKETS_DATA; - let (authorities, tickets): (Vec, Vec) = - Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); - - log::debug!(target: LOG_TARGET, "PreBuiltTickets: {} tickets, {} authorities", tickets.len(), authorities.len()); - - // Set `NextRandomness` to the same value used for pre-built tickets - // (see `make_tickets_data` test). - NextRandomness::::set([0; 32]); - + let (randomness, authorities, tickets): ( + Randomness, + Vec, + Vec, + ) = Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); + assert!(tickets.len() >= tickets_count); + + // Use the same values used for the pre-built tickets Pallet::::update_ring_verifier(&authorities); - - // Set next epoch config to accept all the tickets - let next_config = EpochConfiguration { attempts_number: 1, redundancy_factor: u32::MAX }; - NextEpochConfig::::set(Some(next_config)); - - // Use the authorities in the pre-build tickets - let authorities = WeakBoundedVec::force_from(authorities, None); - NextAuthorities::::set(authorities); + NextAuthorities::::set(WeakBoundedVec::force_from(authorities, None)); + let mut randomness_buf = RandomnessBuf::::get(); + randomness_buf[2] = randomness; + RandomnessBuf::::set(randomness_buf); let tickets = tickets[..tickets_count].to_vec(); let tickets = BoundedVec::truncate_from(tickets); - log::debug!(target: LOG_TARGET, "Submitting {} tickets", tickets_count); - #[extrinsic_call] submit_tickets(RawOrigin::None, tickets); } - #[benchmark] - fn plan_config_change() { - let config = EpochConfiguration { redundancy_factor: 1, attempts_number: 10 }; - - #[extrinsic_call] - plan_config_change(RawOrigin::Root, config); - } - // Construction of ring verifier #[benchmark] fn update_ring_verifier(x: Linear<1, 100>) { let authorities_count = x as usize; - - let mut raw_data = TICKETS_DATA; - let (authorities, _): (Vec, Vec) = - Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); - let authorities: Vec<_> = authorities[..authorities_count].to_vec(); + let authorities: Vec<_> = + Authorities::::get().into_iter().cycle().take(authorities_count).collect(); #[block] { @@ -221,52 +163,7 @@ mod benchmarks { fn load_ring_context() { #[block] { - let _ring_ctx = RingContext::::get().unwrap(); - } - } - - // Tickets segments sorting function benchmark. - #[benchmark] - fn sort_segments(x: Linear<1, 100>) { - let segments_count = x as u32; - let tickets_count = segments_count * SEGMENT_MAX_SIZE; - - // Construct a bunch of dummy tickets - let tickets: Vec<_> = (0..tickets_count) - .map(|i| { - let body = TicketBody { - attempt_idx: i, - erased_public: EphemeralPublic::from([i as u8; 32]), - revealed_public: EphemeralPublic::from([i as u8; 32]), - }; - let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes()); - let id = TicketId::from_le_bytes(id_bytes); - (id, body) - }) - .collect(); - - for (chunk_id, chunk) in tickets.chunks(SEGMENT_MAX_SIZE as usize).enumerate() { - let segment: Vec = chunk - .iter() - .map(|(id, body)| { - TicketsData::::set(id, Some(body.clone())); - *id - }) - .collect(); - let segment = BoundedVec::truncate_from(segment); - UnsortedSegments::::insert(chunk_id as u32, segment); - } - - // Update metadata - let mut meta = TicketsMeta::::get(); - meta.unsorted_tickets_count = tickets_count; - TicketsMeta::::set(meta); - - log::debug!(target: LOG_TARGET, "Before sort: {:?}", meta); - #[block] - { - Pallet::::sort_segments(u32::MAX, 0, &mut meta); + let _ = RingContext::::get().unwrap(); } - log::debug!(target: LOG_TARGET, "After sort: {:?}", meta); } } diff --git a/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin b/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin deleted file mode 100644 index 6e81f216455a..000000000000 Binary files a/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin and /dev/null differ diff --git a/substrate/frame/sassafras/src/data/benchmark-results.md b/substrate/frame/sassafras/src/data/benchmark-results.md deleted file mode 100644 index 8682f96cbe5a..000000000000 --- a/substrate/frame/sassafras/src/data/benchmark-results.md +++ /dev/null @@ -1,99 +0,0 @@ -# Benchmarks High Level Results - -- **Ring size**: the actual number of validators for an epoch -- **Domain size**: a value which bounds the max size of the ring (max_ring_size = domain_size - 256) - -## Verify Submitted Tickets (extrinsic) - -`x` = Number of tickets - -### Domain=1024, Uncompressed (~ 13 ms + 11·x ms) - - Time ~= 13400 - + x 11390 - µs - -### Domain=1024, Compressed (~ 13 ms + 11·x ms) - - Time ~= 13120 - + x 11370 - µs - -### Domain=2048, Uncompressed (~ 26 ms + 11·x ms) - - Time ~= 26210 - + x 11440 - µs - -### Domain=2048, Compressed (~ 26 ms + 11·x ms) - - Time ~= 26250 - + x 11460 - µs - -### Conclusions - -- Verification doesn't depend on ring size as verification key is already constructed. -- The call is fast as far as the max number of tickets which can be submitted in one shot - is appropriately bounded. -- Currently, the bound is set equal epoch length, which iirc for Polkadot is 3600. - In this case if all the tickets are submitted in one shot timing is expected to be - ~39 seconds, which is not acceptable. TODO: find a sensible bound - ---- - -## Recompute Ring Verifier Key (on epoch change) - -`x` = Ring size - -### Domain=1024, Uncompressed (~ 50 ms) - - Time ~= 54070 - + x 98.53 - µs - -### Domain=1024, Compressed (~ 700 ms) - - Time ~= 733700 - + x 90.49 - µs - -### Domain=2048, Uncompressed (~ 100 ms) - - Time ~= 107700 - + x 108.5 - µs - -### Domain=2048, Compressed (~ 1.5 s) - - Time ~= 1462400 - + x 65.14 - µs - -### Conclusions - -- Here we load the full ring context data to recompute verification key for the epoch -- Ring size influence is marginal (e.g. for 1500 validators → ~98 ms to be added to the base time) -- This step is performed at most once per epoch (if validator set changes). -- Domain size for ring context influence the PoV size (see next paragraph) -- Decompression heavily influence timings (1.5sec vs 100ms for same domain size) - ---- - -## Ring Context Data Size - -### Domain=1024, Uncompressed - - 295412 bytes = ~ 300 KiB - -### Domain=1024, Compressed - - 147716 bytes = ~ 150 KiB - -### Domain=2048, Uncompressed - - 590324 bytes = ~ 590 KiB - -### Domain=2048, Compressed - - 295172 bytes = ~ 300 KiB diff --git a/substrate/frame/sassafras/src/data/tickets-sort.md b/substrate/frame/sassafras/src/data/tickets-sort.md deleted file mode 100644 index 64fc45e4fb00..000000000000 --- a/substrate/frame/sassafras/src/data/tickets-sort.md +++ /dev/null @@ -1,274 +0,0 @@ -# Segments Incremental Sorting Strategy Empirical Results - -Parameters: -- 128 segments -- segment max length 128 -- 32767 random tickets ids -- epoch length 3600 (== max tickets to keep) - -The table shows the comparison between the segments left in the unsorted segments buffer -and the number of new tickets which are added from the last segment to the sorted tickets -buffer (i.e. how many tickets we retain from the last processed segment) - -| Segments Left | Tickets Pushed | -|-----|-----| -| 255 | 128 | -| 254 | 128 | -| 253 | 128 | -| 252 | 128 | -| 251 | 128 | -| 250 | 128 | -| 249 | 128 | -| 248 | 128 | -| 247 | 128 | -| 246 | 128 | -| 245 | 128 | -| 244 | 128 | -| 243 | 128 | -| 242 | 128 | -| 241 | 128 | -| 240 | 128 | -| 239 | 128 | -| 238 | 128 | -| 237 | 128 | -| 236 | 128 | -| 235 | 128 | -| 234 | 128 | -| 233 | 128 | -| 232 | 128 | -| 231 | 128 | -| 230 | 128 | -| 229 | 128 | -| 228 | 128 | -| 227 | 128 | -| 226 | 126 | -| 225 | 117 | -| 224 | 120 | -| 223 | 110 | -| 222 | 110 | -| 221 | 102 | -| 220 | 107 | -| 219 | 96 | -| 218 | 105 | -| 217 | 92 | -| 216 | 91 | -| 215 | 85 | -| 214 | 84 | -| 213 | 88 | -| 212 | 77 | -| 211 | 86 | -| 210 | 73 | -| 209 | 73 | -| 208 | 81 | -| 207 | 83 | -| 206 | 70 | -| 205 | 84 | -| 204 | 71 | -| 203 | 63 | -| 202 | 60 | -| 201 | 53 | -| 200 | 73 | -| 199 | 55 | -| 198 | 65 | -| 197 | 62 | -| 196 | 55 | -| 195 | 63 | -| 194 | 61 | -| 193 | 48 | -| 192 | 67 | -| 191 | 61 | -| 190 | 55 | -| 189 | 49 | -| 188 | 60 | -| 187 | 49 | -| 186 | 51 | -| 185 | 53 | -| 184 | 47 | -| 183 | 51 | -| 182 | 51 | -| 181 | 53 | -| 180 | 42 | -| 179 | 43 | -| 178 | 48 | -| 177 | 46 | -| 176 | 39 | -| 175 | 54 | -| 174 | 39 | -| 173 | 44 | -| 172 | 51 | -| 171 | 49 | -| 170 | 48 | -| 169 | 48 | -| 168 | 41 | -| 167 | 39 | -| 166 | 41 | -| 165 | 40 | -| 164 | 43 | -| 163 | 53 | -| 162 | 51 | -| 161 | 36 | -| 160 | 45 | -| 159 | 40 | -| 158 | 29 | -| 157 | 37 | -| 156 | 31 | -| 155 | 38 | -| 154 | 31 | -| 153 | 38 | -| 152 | 39 | -| 151 | 30 | -| 150 | 37 | -| 149 | 42 | -| 148 | 35 | -| 147 | 33 | -| 146 | 35 | -| 145 | 37 | -| 144 | 38 | -| 143 | 31 | -| 142 | 38 | -| 141 | 38 | -| 140 | 27 | -| 139 | 31 | -| 138 | 25 | -| 137 | 31 | -| 136 | 26 | -| 135 | 30 | -| 134 | 31 | -| 133 | 37 | -| 132 | 29 | -| 131 | 24 | -| 130 | 31 | -| 129 | 34 | -| 128 | 31 | -| 127 | 28 | -| 126 | 28 | -| 125 | 19 | -| 124 | 27 | -| 123 | 29 | -| 122 | 36 | -| 121 | 32 | -| 120 | 29 | -| 119 | 28 | -| 118 | 33 | -| 117 | 18 | -| 116 | 28 | -| 115 | 27 | -| 114 | 28 | -| 113 | 21 | -| 112 | 23 | -| 111 | 19 | -| 110 | 21 | -| 109 | 20 | -| 108 | 26 | -| 107 | 23 | -| 106 | 30 | -| 105 | 31 | -| 104 | 19 | -| 103 | 25 | -| 102 | 23 | -| 101 | 29 | -| 100 | 18 | -| 99 | 19 | -| 98 | 20 | -| 97 | 21 | -| 96 | 23 | -| 95 | 20 | -| 94 | 27 | -| 93 | 20 | -| 92 | 22 | -| 91 | 23 | -| 90 | 23 | -| 89 | 20 | -| 88 | 15 | -| 87 | 17 | -| 86 | 28 | -| 85 | 25 | -| 84 | 10 | -| 83 | 20 | -| 82 | 23 | -| 81 | 28 | -| 80 | 17 | -| 79 | 23 | -| 78 | 24 | -| 77 | 22 | -| 76 | 18 | -| 75 | 25 | -| 74 | 31 | -| 73 | 27 | -| 72 | 19 | -| 71 | 13 | -| 70 | 17 | -| 69 | 24 | -| 68 | 20 | -| 67 | 12 | -| 66 | 17 | -| 65 | 16 | -| 64 | 26 | -| 63 | 24 | -| 62 | 12 | -| 61 | 19 | -| 60 | 18 | -| 59 | 20 | -| 58 | 18 | -| 57 | 12 | -| 56 | 15 | -| 55 | 17 | -| 54 | 14 | -| 53 | 25 | -| 52 | 22 | -| 51 | 15 | -| 50 | 17 | -| 49 | 15 | -| 48 | 17 | -| 47 | 18 | -| 46 | 17 | -| 45 | 23 | -| 44 | 17 | -| 43 | 13 | -| 42 | 15 | -| 41 | 18 | -| 40 | 11 | -| 39 | 19 | -| 38 | 18 | -| 37 | 12 | -| 36 | 19 | -| 35 | 18 | -| 34 | 15 | -| 33 | 12 | -| 32 | 25 | -| 31 | 20 | -| 30 | 24 | -| 29 | 20 | -| 28 | 10 | -| 27 | 15 | -| 26 | 16 | -| 25 | 15 | -| 24 | 15 | -| 23 | 13 | -| 22 | 12 | -| 21 | 14 | -| 20 | 19 | -| 19 | 17 | -| 18 | 17 | -| 17 | 18 | -| 16 | 15 | -| 15 | 13 | -| 14 | 11 | -| 13 | 16 | -| 12 | 13 | -| 11 | 18 | -| 10 | 19 | -| 9 | 10 | -| 8 | 7 | -| 7 | 15 | -| 6 | 12 | -| 5 | 12 | -| 4 | 17 | -| 3 | 14 | -| 2 | 17 | -| 1 | 9 | -| 0 | 13 | - -# Graph of the same data - -![graph](tickets-sort.png) diff --git a/substrate/frame/sassafras/src/data/tickets-sort.png b/substrate/frame/sassafras/src/data/tickets-sort.png deleted file mode 100644 index b34ce3f37ba9..000000000000 Binary files a/substrate/frame/sassafras/src/data/tickets-sort.png and /dev/null differ diff --git a/substrate/frame/sassafras/src/data/tickets.bin b/substrate/frame/sassafras/src/data/tickets.bin new file mode 100644 index 000000000000..af6fd097b1b9 Binary files /dev/null and b/substrate/frame/sassafras/src/data/tickets.bin differ diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs index 285758afbe6d..a781885b5cf8 100644 --- a/substrate/frame/sassafras/src/lib.rs +++ b/substrate/frame/sassafras/src/lib.rs @@ -21,28 +21,28 @@ //! is a constant-time block production protocol that aims to ensure that there is //! exactly one block produced with constant time intervals rather than multiple or none. //! -//! We run a lottery to distribute block production slots in an epoch and to fix the -//! order validators produce blocks in, by the beginning of an epoch. +//! We run a lottery to distribute block production slots for a *target* epoch and to fix +//! the order validators produce blocks in. //! -//! Each validator signs the same VRF input and publishes the output on-chain. This -//! value is their lottery ticket that can be validated against their public key. +//! Each validator signs some unbiasable VRF input and publishes the VRF output on-chain. +//! This value is their lottery ticket that can be eventually validated against their +//! public key. //! -//! We want to keep lottery winners secret, i.e. do not publish their public keys. -//! At the beginning of the epoch all the validators tickets are published but not -//! their public keys. +//! We want to keep lottery winners secret, i.e. do not disclose their public keys. +//! At the beginning of the *target* epoch all the validators tickets are published but +//! not the corresponding author public keys. //! -//! A valid tickets is validated when an honest validator reclaims it on block -//! production. +//! The association is revealed by the ticket's owner during block production when he will +//! claim his ticket, and thus the associated slot, by showing a proof which ships with the +//! produced block. //! -//! To prevent submission of fake tickets, resulting in empty slots, the validator -//! when submitting the ticket accompanies it with a SNARK of the statement: "Here's -//! my VRF output that has been generated using the given VRF input and my secret -//! key. I'm not telling you my keys, but my public key is among those of the -//! nominated validators", that is validated before the lottery. -//! -//! To anonymously publish the ticket to the chain a validator sends their tickets -//! to a random validator who later puts it on-chain as a transaction. +//! To prevent submission of invalid tickets, resulting in empty slots, the validator +//! when submitting a ticket accompanies it with a zk-SNARK of the statement: +//! "Here's my VRF output that has been generated using the given VRF input and my secret +//! key. I'm not telling you who I am, but my public key is among those of the nominated +//! validators for the target epoch". +#![allow(unused)] #![deny(warnings)] #![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] @@ -55,27 +55,24 @@ use scale_info::TypeInfo; use alloc::vec::Vec; use frame_support::{ - dispatch::{DispatchResultWithPostInfo, Pays}, - traits::{Defensive, Get}, - weights::Weight, - BoundedVec, WeakBoundedVec, -}; -use frame_system::{ - offchain::{SendTransactionTypes, SubmitTransaction}, - pallet_prelude::BlockNumberFor, + dispatch::DispatchResult, traits::Get, weights::Weight, BoundedVec, WeakBoundedVec, }; +use frame_system::pallet_prelude::BlockNumberFor; use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, SlotClaim}, - vrf, AuthorityId, Epoch, EpochConfiguration, Randomness, Slot, TicketBody, TicketEnvelope, - TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, + vrf, AuthorityId, Configuration, Epoch, EquivocationProof, InherentError, InherentType, + Randomness, Slot, TicketBody, TicketEnvelope, TicketId, INHERENT_IDENTIFIER, RANDOMNESS_LENGTH, + SASSAFRAS_ENGINE_ID, }; use sp_io::hashing; use sp_runtime::{ generic::DigestItem, traits::{One, Zero}, - BoundToRuntimeAppPublic, + BoundToRuntimeAppPublic, Percent, }; +pub use pallet::*; + #[cfg(feature = "runtime-benchmarks")] mod benchmarking; #[cfg(all(feature = "std", test))] @@ -83,40 +80,83 @@ mod mock; #[cfg(all(feature = "std", test))] mod tests; +// To manage epoch changes via session pallet instead of the built-in method +// (`EpochChangeInternalTrigger`). +#[cfg(feature = "session-pallet-support")] +pub mod session; + pub mod weights; pub use weights::WeightInfo; -pub use pallet::*; - const LOG_TARGET: &str = "sassafras::runtime"; // Contextual string used by the VRF to generate per-block randomness. const RANDOMNESS_VRF_CONTEXT: &[u8] = b"SassafrasOnChainRandomness"; -// Max length for segments holding unsorted tickets. -const SEGMENT_MAX_SIZE: u32 = 128; +/// Randomness buffer. +pub type RandomnessBuffer = [Randomness; 4]; -/// Authorities bounded vector convenience type. +/// Number of tickets available for current and next epoch. +/// +/// These tickets are held by the [`Tickets`] storage map. +/// +/// Current counter index is computed as current epoch index modulo 2 +/// Next counter index is computed as the other entry. +pub type TicketsCounter = [u32; 2]; + +/// Ephemeral data constructed by `on_initialize` and destroyed by `on_finalize`. +/// +/// Contains some temporary data that may be useful later during code execution. +#[derive(Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct EphemeralData { + /// Previous block slot. + prev_slot: Slot, + /// Per block randomness to be deposited after block execution (on finalization). + block_randomness: Randomness, +} + +/// Key used for the tickets accumulator map. +/// +/// Ticket keys are constructed by taking the bitwise negation of the ticket identifier. +/// As the tickets accumulator sorts entries according to the key values from smaller +/// to larger, we end up with a sequence of tickets identifiers sorted from larger to +/// smaller. +/// +/// This strategy comes handy when we quickly need to check if a new ticket chunk has been +/// completely absorbed by the accumulator, when this is already full and without loading +/// the whole sequence in memory. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Encode, Decode, MaxEncodedLen, TypeInfo, +)] +pub struct TicketKey([u8; 32]); + +impl From for TicketKey { + fn from(mut value: TicketId) -> Self { + TicketKey(value.0.map(|b| !b)) + } +} + +/// Authorities sequence. pub type AuthoritiesVec = WeakBoundedVec::MaxAuthorities>; -/// Epoch length defined by the configuration. -pub type EpochLengthFor = ::EpochLength; +/// Tickets sequence. +pub type TicketsVec = BoundedVec::TicketsChunkLength>; -/// Tickets metadata. -#[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)] -pub struct TicketsMetadata { - /// Number of outstanding next epoch tickets requiring to be sorted. - /// - /// These tickets are held by the [`UnsortedSegments`] storage map in segments - /// containing at most `SEGMENT_MAX_SIZE` items. - pub unsorted_tickets_count: u32, +trait EpochTag { + fn tag(&self) -> u8; + fn next_tag(&self) -> u8; +} - /// Number of tickets available for current and next epoch. - /// - /// These tickets are held by the [`TicketsIds`] storage map. - /// - /// The array entry to be used for the current epoch is computed as epoch index modulo 2. - pub tickets_count: [u32; 2], +impl EpochTag for u64 { + #[inline(always)] + fn tag(&self) -> u8 { + (self % 2) as u8 + } + + #[inline(always)] + fn next_tag(&self) -> u8 { + self.tag() ^ 1 + } } #[frame_support::pallet] @@ -131,15 +171,34 @@ pub mod pallet { /// Configuration parameters. #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config { /// Amount of slots that each epoch should last. #[pallet::constant] - type EpochLength: Get; + type EpochDuration: Get; /// Max number of authorities allowed. #[pallet::constant] type MaxAuthorities: Get; + /// Redundancy factor + #[pallet::constant] + type RedundancyFactor: Get; + + /// Max attempts number + #[pallet::constant] + type AttemptsNumber: Get; + + /// Max number of tickets that can be submitted in one block. + #[pallet::constant] + type TicketsChunkLength: Get; + + /// Epoch lottery duration percent relative to the epoch `EpochDuration`. + /// + /// Tickets lottery starts with the start of an epoch. + /// When epoch lottery ends no more tickets are allowed to be submitted on-chain. + #[pallet::constant] + type LotteryDurationPercent: Get; + /// Epoch change trigger. /// /// Logic to be triggered on every block to query for whether an epoch has ended @@ -147,21 +206,32 @@ pub mod pallet { type EpochChangeTrigger: EpochChangeTrigger; /// Weight information for all calls of this pallet. - type WeightInfo: WeightInfo; + type WeightInfo: weights::WeightInfo; } /// Sassafras runtime errors. #[pallet::error] pub enum Error { - /// Submitted configuration is invalid. - InvalidConfiguration, + /// Tickets were found after the lottery is over. + TicketUnexpected, + /// Ticket identifier is too big. + TicketOverThreshold, + /// Bad ticket order. + TicketBadOrder, + /// Invalid ticket signature. + TicketBadProof, + /// Invalid ticket attempt number. + TicketBadAttempt, + /// Some submitted ticket has not been persisted because of its score. + TicketDropped, + /// Duplicate ticket. + TicketDuplicate, + /// Invalid VRF output. + TicketBadVrfOutput, + /// Uninitialized Ring Verifier + TicketVerifierNotInitialized, } - /// Current epoch index. - #[pallet::storage] - #[pallet::getter(fn epoch_index)] - pub type EpochIndex = StorageValue<_, u64, ValueQuery>; - /// Current epoch authorities. #[pallet::storage] #[pallet::getter(fn authorities)] @@ -172,113 +242,62 @@ pub mod pallet { #[pallet::getter(fn next_authorities)] pub type NextAuthorities = StorageValue<_, AuthoritiesVec, ValueQuery>; - /// First block slot number. - /// - /// As the slots may not be zero-based, we record the slot value for the fist block. - /// This allows to always compute relative indices for epochs and slots. - #[pallet::storage] - #[pallet::getter(fn genesis_slot)] - pub type GenesisSlot = StorageValue<_, Slot, ValueQuery>; - /// Current block slot number. #[pallet::storage] #[pallet::getter(fn current_slot)] pub type CurrentSlot = StorageValue<_, Slot, ValueQuery>; - /// Current epoch randomness. - #[pallet::storage] - #[pallet::getter(fn randomness)] - pub type CurrentRandomness = StorageValue<_, Randomness, ValueQuery>; - - /// Next epoch randomness. - #[pallet::storage] - #[pallet::getter(fn next_randomness)] - pub type NextRandomness = StorageValue<_, Randomness, ValueQuery>; - - /// Randomness accumulator. - /// - /// Excluded the first imported block, its value is updated on block finalization. - #[pallet::storage] - #[pallet::getter(fn randomness_accumulator)] - pub(crate) type RandomnessAccumulator = StorageValue<_, Randomness, ValueQuery>; - - /// The configuration for the current epoch. + /// Randomness buffer. #[pallet::storage] - #[pallet::getter(fn config)] - pub type EpochConfig = StorageValue<_, EpochConfiguration, ValueQuery>; + #[pallet::getter(fn randomness_buf)] + pub type RandomnessBuf = StorageValue<_, RandomnessBuffer, ValueQuery>; - /// The configuration for the next epoch. - #[pallet::storage] - #[pallet::getter(fn next_config)] - pub type NextEpochConfig = StorageValue<_, EpochConfiguration>; - - /// Pending epoch configuration change that will be set as `NextEpochConfig` when the next - /// epoch is enacted. - /// - /// In other words, a configuration change submitted during epoch N will be enacted on epoch - /// N+2. This is to maintain coherence for already submitted tickets for epoch N+1 that where - /// computed using configuration parameters stored for epoch N+1. + /// Tickets accumulator. #[pallet::storage] - pub type PendingEpochConfigChange = StorageValue<_, EpochConfiguration>; + #[pallet::getter(fn tickets_accumulator)] + pub type TicketsAccumulator = CountedStorageMap<_, Identity, TicketKey, TicketBody>; - /// Stored tickets metadata. + /// Tickets counters for the current and next epoch. #[pallet::storage] - pub type TicketsMeta = StorageValue<_, TicketsMetadata, ValueQuery>; + #[pallet::getter(fn tickets_count)] + pub type TicketsCount = StorageValue<_, TicketsCounter, ValueQuery>; - /// Tickets identifiers map. + /// Tickets map. /// - /// The map holds tickets ids for the current and next epoch. + /// The map holds tickets identifiers for the current and next epoch. /// /// The key is a tuple composed by: - /// - `u8` equal to epoch's index modulo 2; - /// - `u32` equal to the ticket's index in a sorted list of epoch's tickets. + /// - `u8`: equal to epoch's index modulo 2; + /// - `u32` equal to the ticket's index in an abstract sorted sequence of epoch's tickets. /// - /// Epoch X first N-th ticket has key (X mod 2, N) + /// For example, the key for the `N`-th ticket for epoch `E` is `(E mod 2, N)` /// - /// Note that the ticket's index doesn't directly correspond to the slot index within the epoch. - /// The assignment is computed dynamically using an *outside-in* strategy. + /// Note that the ticket's index `N` doesn't correspond to the offset of the associated + /// slot within the epoch. The assignment is computed using an *outside-in* strategy + /// and correctly returned by the [`slot_ticket`] method. /// - /// Be aware that entries within this map are never removed, only overwritten. - /// Last element index should be fetched from the [`TicketsMeta`] value. + /// Be aware that entries within this map are never removed, but only overwritten. + /// The number of tickets available for epoch `E` is stored in the `E mod 2` entry + /// of [`TicketsCount`]. #[pallet::storage] - pub type TicketsIds = StorageMap<_, Identity, (u8, u32), TicketId>; - - /// Tickets to be used for current and next epoch. - #[pallet::storage] - pub type TicketsData = StorageMap<_, Identity, TicketId, TicketBody>; - - /// Next epoch tickets unsorted segments. - /// - /// Contains lists of tickets where each list represents a batch of tickets - /// received via the `submit_tickets` extrinsic. - /// - /// Each segment has max length [`SEGMENT_MAX_SIZE`]. - #[pallet::storage] - pub type UnsortedSegments = - StorageMap<_, Identity, u32, BoundedVec>, ValueQuery>; - - /// The most recently set of tickets which are candidates to become the next - /// epoch tickets. - #[pallet::storage] - pub type SortedCandidates = - StorageValue<_, BoundedVec>, ValueQuery>; + #[pallet::getter(fn tickets)] + pub type Tickets = StorageMap<_, Identity, (u8, u32), TicketBody>; /// Parameters used to construct the epoch's ring verifier. /// - /// In practice: Updatable Universal Reference String and the seed. + /// In practice, this is the SNARK "Universal Reference String" (powers of tau). #[pallet::storage] #[pallet::getter(fn ring_context)] pub type RingContext = StorageValue<_, vrf::RingContext>; /// Ring verifier data for the current epoch. #[pallet::storage] - pub type RingVerifierData = StorageValue<_, vrf::RingVerifierData>; + #[pallet::getter(fn ring_verifier_key)] + pub type RingVerifierKey = StorageValue<_, vrf::RingVerifierKey>; - /// Slot claim VRF pre-output used to generate per-slot randomness. - /// - /// The value is ephemeral and is cleared on block finalization. + /// Ephemeral data we retain until block finalization. #[pallet::storage] - pub(crate) type ClaimTemporaryData = StorageValue<_, vrf::VrfPreOutput>; + pub(crate) type TemporaryData = StorageValue<_, EphemeralData>; /// Genesis configuration for Sassafras protocol. #[pallet::genesis_config] @@ -286,8 +305,6 @@ pub mod pallet { pub struct GenesisConfig { /// Genesis authorities. pub authorities: Vec, - /// Genesis epoch configuration. - pub epoch_config: EpochConfiguration, /// Phantom config #[serde(skip)] pub _phantom: core::marker::PhantomData, @@ -296,7 +313,6 @@ pub mod pallet { #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { - EpochConfig::::put(self.epoch_config); Pallet::::genesis_authorities_initialize(&self.authorities); #[cfg(feature = "construct-dummy-ring-context")] @@ -314,65 +330,78 @@ pub mod pallet { fn on_initialize(block_num: BlockNumberFor) -> Weight { debug_assert_eq!(block_num, frame_system::Pallet::::block_number()); + // Since `on_initialize` can be called twice (e.g. if `session` pallet is used + // as session manager) we ensure that we only do the the initialization once + // per block. We rely on the only volatile value to check if `on_initialize` + // has been already called. + #[cfg(feature = "session-pallet-support")] + if TemporaryData::::exists() { + return Weight::zero() + } + let claim = >::digest() .logs .iter() .find_map(|item| item.pre_runtime_try_to::(&SASSAFRAS_ENGINE_ID)) .expect("Valid block must have a slot claim. qed"); + let randomness_accumulator = Self::randomness_accumulator(); + let randomness_input = vrf::block_randomness_input(&randomness_accumulator, claim.slot); + + // Verification has already been done by the host + debug_assert!({ + use sp_core::crypto::{VrfPublic, Wraps}; + let authorities = Authorities::::get(); + let public = authorities + .get(claim.authority_idx as usize) + .expect("Bad authority index in claim"); + let data = vrf::block_randomness_sign_data(&randomness_accumulator, claim.slot); + public.as_inner_ref().vrf_verify(&data, &claim.vrf_signature) + }); + + let block_randomness = claim.vrf_signature.pre_outputs[0] + .make_bytes::(RANDOMNESS_VRF_CONTEXT, &randomness_input); + + TemporaryData::::put(EphemeralData { + prev_slot: CurrentSlot::::get(), + block_randomness, + }); + CurrentSlot::::put(claim.slot); if block_num == One::one() { - Self::post_genesis_initialize(claim.slot); + Self::post_genesis_initialize(); } - let randomness_pre_output = claim - .vrf_signature - .pre_outputs - .get(0) - .expect("Valid claim must have VRF signature; qed"); - ClaimTemporaryData::::put(randomness_pre_output); - let trigger_weight = T::EpochChangeTrigger::trigger::(block_num); T::WeightInfo::on_initialize() + trigger_weight } fn on_finalize(_: BlockNumberFor) { - // At the end of the block, we can safely include the current slot randomness + // TODO @davxy: check if the validator has been disabled during execution. + + // At the end of the block, we can safely include the current block randomness // to the accumulator. If we've determined that this block was the first in // a new epoch, the changeover logic has already occurred at this point // (i.e. `enact_epoch_change` has already been called). - let randomness_input = vrf::slot_claim_input( - &Self::randomness(), - CurrentSlot::::get(), - EpochIndex::::get(), - ); - let randomness_pre_output = ClaimTemporaryData::::take() - .expect("Unconditionally populated in `on_initialize`; `on_finalize` is always called after; qed"); - let randomness = randomness_pre_output - .make_bytes::(RANDOMNESS_VRF_CONTEXT, &randomness_input); - Self::deposit_slot_randomness(&randomness); - - // Check if we are in the epoch's second half. - // If so, start sorting the next epoch tickets. - let epoch_length = T::EpochLength::get(); + let block_randomness = TemporaryData::::take() + .expect("Unconditionally populated in `on_initialize`; `on_finalize` is always called after; qed") + .block_randomness; + Self::deposit_randomness(block_randomness); + + // Check if tickets lottery is over, and if so, start sorting the next epoch tickets. + let epoch_duration = T::EpochDuration::get(); + let lottery_over_idx = T::LotteryDurationPercent::get() * epoch_duration; let current_slot_idx = Self::current_slot_index(); - if current_slot_idx >= epoch_length / 2 { - let mut metadata = TicketsMeta::::get(); - if metadata.unsorted_tickets_count != 0 { - let next_epoch_idx = EpochIndex::::get() + 1; - let next_epoch_tag = (next_epoch_idx & 1) as u8; - let slots_left = epoch_length.checked_sub(current_slot_idx).unwrap_or(1); - Self::sort_segments( - metadata - .unsorted_tickets_count - .div_ceil(SEGMENT_MAX_SIZE * slots_left as u32), - next_epoch_tag, - &mut metadata, - ); - TicketsMeta::::set(metadata); + let mut outstanding_count = TicketsAccumulator::::count() as usize; + if current_slot_idx >= lottery_over_idx && outstanding_count != 0 { + let slots_left = epoch_duration.checked_sub(current_slot_idx).unwrap_or(1); + if slots_left > 0 { + outstanding_count = outstanding_count.div_ceil(slots_left as usize); } + let next_epoch_tag = Self::current_epoch_index().next_tag(); + Self::consume_tickets_accumulator(outstanding_count, next_epoch_tag); } } } @@ -380,201 +409,137 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Submit next epoch tickets candidates. - /// - /// The number of tickets allowed to be submitted in one call is equal to the epoch length. #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::submit_tickets(tickets.len() as u32))] - pub fn submit_tickets( - origin: OriginFor, - tickets: BoundedVec>, - ) -> DispatchResultWithPostInfo { + #[pallet::weight(( + T::WeightInfo::submit_tickets(envelopes.len() as u32), + DispatchClass::Mandatory + ))] + pub fn submit_tickets(origin: OriginFor, envelopes: TicketsVec) -> DispatchResult { ensure_none(origin)?; - debug!(target: LOG_TARGET, "Received {} tickets", tickets.len()); + debug!(target: LOG_TARGET, "Received {} tickets", envelopes.len()); - let epoch_length = T::EpochLength::get(); + let epoch_duration = T::EpochDuration::get(); let current_slot_idx = Self::current_slot_index(); - if current_slot_idx > epoch_length / 2 { - warn!(target: LOG_TARGET, "Tickets shall be submitted in the first epoch half",); - return Err("Tickets shall be submitted in the first epoch half".into()) + let lottery_over_idx = T::LotteryDurationPercent::get() * epoch_duration; + + if current_slot_idx >= lottery_over_idx { + warn!(target: LOG_TARGET, "Lottery is over, tickets must be submitted before slot index {}", lottery_over_idx); + return Err(Error::::TicketUnexpected.into()) } - let Some(verifier) = RingVerifierData::::get().map(|v| v.into()) else { + let Some(verifier) = RingVerifierKey::::get().map(|v| v.into()) else { warn!(target: LOG_TARGET, "Ring verifier key not initialized"); - return Err("Ring verifier key not initialized".into()) + return Err(Error::::TicketVerifierNotInitialized.into()) }; - let next_authorities = Self::next_authorities(); + // Get next epoch parameters + let randomness = Self::next_randomness(); + let authorities = Self::next_authorities(); // Compute tickets threshold - let next_config = Self::next_config().unwrap_or_else(|| Self::config()); let ticket_threshold = sp_consensus_sassafras::ticket_id_threshold( - next_config.redundancy_factor, - epoch_length as u32, - next_config.attempts_number, - next_authorities.len() as u32, + epoch_duration as u32, + authorities.len() as u32, + T::AttemptsNumber::get(), + T::RedundancyFactor::get(), ); - // Get next epoch params - let randomness = NextRandomness::::get(); - let epoch_idx = EpochIndex::::get() + 1; + let attempts_num = T::AttemptsNumber::get(); - let mut valid_tickets = BoundedVec::with_bounded_capacity(tickets.len()); - - for ticket in tickets { - debug!(target: LOG_TARGET, "Checking ring proof"); + let mut candidates = Vec::new(); + for envelope in envelopes { + if envelope.attempt >= attempts_num { + debug!(target: LOG_TARGET, "Bad ticket attempt"); + return Err(Error::::TicketBadAttempt.into()) + } - let Some(ticket_id_pre_output) = ticket.signature.pre_outputs.get(0) else { + let Some(ticket_id_pre_output) = envelope.signature.pre_outputs.get(0) else { debug!(target: LOG_TARGET, "Missing ticket VRF pre-output from ring signature"); - continue + return Err(Error::::TicketBadVrfOutput.into()) }; - let ticket_id_input = - vrf::ticket_id_input(&randomness, ticket.body.attempt_idx, epoch_idx); + let ticket_id_input = vrf::ticket_id_input(&randomness, envelope.attempt); // Check threshold constraint let ticket_id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_pre_output); - if ticket_id >= ticket_threshold { - debug!(target: LOG_TARGET, "Ignoring ticket over threshold ({:032x} >= {:032x})", ticket_id, ticket_threshold); - continue - } + trace!(target: LOG_TARGET, "Checking ticket {:?}", ticket_id); - // Check for duplicates - if TicketsData::::contains_key(ticket_id) { - debug!(target: LOG_TARGET, "Ignoring duplicate ticket ({:032x})", ticket_id); - continue + if ticket_id >= ticket_threshold { + debug!(target: LOG_TARGET, "Ticket over threshold ({:?} >= {:?})", ticket_id, ticket_threshold); + return Err(Error::::TicketOverThreshold.into()) } // Check ring signature - let sign_data = vrf::ticket_body_sign_data(&ticket.body, ticket_id_input); - if !ticket.signature.ring_vrf_verify(&sign_data, &verifier) { - debug!(target: LOG_TARGET, "Proof verification failure for ticket ({:032x})", ticket_id); - continue + let sign_data = vrf::ticket_id_sign_data(ticket_id_input, &envelope.extra); + if !envelope.signature.ring_vrf_verify(&sign_data, &verifier) { + debug!(target: LOG_TARGET, "Proof verification failure for ticket ({:?})", ticket_id); + return Err(Error::::TicketBadProof.into()) } - if let Ok(_) = valid_tickets.try_push(ticket_id).defensive_proof( - "Input segment has same length as bounded destination vector; qed", - ) { - TicketsData::::set(ticket_id, Some(ticket.body)); - } + candidates.push(TicketBody { + id: ticket_id, + attempt: envelope.attempt, + extra: envelope.extra, + }); } - if !valid_tickets.is_empty() { - Self::append_tickets(valid_tickets); - } + Self::deposit_tickets(candidates)?; - Ok(Pays::No.into()) + Ok(()) } - /// Plan an epoch configuration change. + /// Report authority equivocation. + /// + /// This method will verify the equivocation proof and validate the given key ownership + /// proof against the extracted offender. If both are valid, the offence will be reported. /// - /// The epoch configuration change is recorded and will be announced at the beginning - /// of the next epoch together with next epoch authorities information. - /// In other words, the configuration will be enacted one epoch later. + /// This extrinsic must be called unsigned and it is expected that only block authors will + /// call it (validated in `ValidateUnsigned`), as such if the block author is defined it + /// will be defined as the equivocation reporter. /// - /// Multiple calls to this method will replace any existing planned config change - /// that has not been enacted yet. + /// TODO @davxy #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::plan_config_change())] - pub fn plan_config_change( + #[pallet::weight({0})] + pub fn report_equivocation_unsigned( origin: OriginFor, - config: EpochConfiguration, + _equivocation_proof: EquivocationProof>, + //key_owner_proof: T::KeyOwnerProof, ) -> DispatchResult { - ensure_root(origin)?; + ensure_none(origin)?; - ensure!( - config.redundancy_factor != 0 && config.attempts_number != 0, - Error::::InvalidConfiguration - ); - PendingEpochConfigChange::::put(config); + // Self::do_report_equivocation( + // T::HandleEquivocation::block_author(), + // *equivocation_proof, + // key_owner_proof, + // ) Ok(()) } } - #[pallet::validate_unsigned] - impl ValidateUnsigned for Pallet { + #[pallet::inherent] + impl ProvideInherent for Pallet { type Call = Call; + type Error = InherentError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { - let Call::submit_tickets { tickets } = call else { - return InvalidTransaction::Call.into() - }; - - // Discard tickets not coming from the local node or that are not included in a block - if source == TransactionSource::External { - warn!( - target: LOG_TARGET, - "Rejecting unsigned `submit_tickets` transaction from external source", - ); - return InvalidTransaction::BadSigner.into() - } - - // Current slot should be less than half of epoch length. - let epoch_length = T::EpochLength::get(); - let current_slot_idx = Self::current_slot_index(); - if current_slot_idx > epoch_length / 2 { - warn!(target: LOG_TARGET, "Tickets shall be proposed in the first epoch half",); - return InvalidTransaction::Stale.into() - } + fn create_inherent(data: &InherentData) -> Option { + let envelopes = data + .get_data::(&INHERENT_IDENTIFIER) + .expect("Sassafras inherent data not correctly encoded") + .expect("Sassafras inherent data must be provided"); - // This should be set such that it is discarded after the first epoch half - let tickets_longevity = epoch_length / 2 - current_slot_idx; - let tickets_tag = tickets.using_encoded(|bytes| hashing::blake2_256(bytes)); + let envelopes = BoundedVec::truncate_from(envelopes); + Some(Call::submit_tickets { envelopes }) + } - ValidTransaction::with_tag_prefix("Sassafras") - .priority(TransactionPriority::max_value()) - .longevity(tickets_longevity as u64) - .and_provides(tickets_tag) - .propagate(true) - .build() + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::submit_tickets { .. }) } } } // Inherent methods impl Pallet { - /// Determine whether an epoch change should take place at this block. - /// - /// Assumes that initialization has already taken place. - pub(crate) fn should_end_epoch(block_num: BlockNumberFor) -> bool { - // The epoch has technically ended during the passage of time between this block and the - // last, but we have to "end" the epoch now, since there is no earlier possible block we - // could have done it. - // - // The exception is for block 1: the genesis has slot 0, so we treat epoch 0 as having - // started at the slot of block 1. We want to use the same randomness and validator set as - // signalled in the genesis, so we don't rotate the epoch. - block_num > One::one() && Self::current_slot_index() >= T::EpochLength::get() - } - - /// Current slot index relative to the current epoch. - fn current_slot_index() -> u32 { - Self::slot_index(CurrentSlot::::get()) - } - - /// Slot index relative to the current epoch. - fn slot_index(slot: Slot) -> u32 { - slot.checked_sub(*Self::current_epoch_start()) - .and_then(|v| v.try_into().ok()) - .unwrap_or(u32::MAX) - } - - /// Finds the start slot of the current epoch. - /// - /// Only guaranteed to give correct results after `initialize` of the first - /// block in the chain (as its result is based off of `GenesisSlot`). - fn current_epoch_start() -> Slot { - Self::epoch_start(EpochIndex::::get()) - } - - /// Get the epoch's first slot. - fn epoch_start(epoch_index: u64) -> Slot { - const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ - if u64 is not enough we should crash for safety; qed."; - - let epoch_start = epoch_index.checked_mul(T::EpochLength::get() as u64).expect(PROOF); - GenesisSlot::::get().checked_add(epoch_start).expect(PROOF).into() - } - pub(crate) fn update_ring_verifier(authorities: &[AuthorityId]) { debug!(target: LOG_TARGET, "Loading ring context"); let Some(ring_ctx) = RingContext::::get() else { @@ -585,11 +550,15 @@ impl Pallet { let pks: Vec<_> = authorities.iter().map(|auth| *auth.as_ref()).collect(); debug!(target: LOG_TARGET, "Building ring verifier (ring size: {})", pks.len()); - let verifier_data = ring_ctx - .verifier_data(&pks) - .expect("Failed to build ring verifier. This is a bug"); - - RingVerifierData::::put(verifier_data); + let maybe_verifier_key = ring_ctx.verifier_key(&pks); + if maybe_verifier_key.is_none() { + error!( + target: LOG_TARGET, + "Failed to build verifier key. This should never happen,\n + falling back to AURA for next epoch as last resort" + ); + } + RingVerifierKey::::set(maybe_verifier_key); } /// Enact an epoch change. @@ -597,12 +566,16 @@ impl Pallet { /// WARNING: Should be called on every block once and if and only if [`should_end_epoch`] /// has returned `true`. /// + /// WARNING: Here we trust the caller that `next_authorities == NextAuthorities` + /// /// If we detect one or more skipped epochs the policy is to use the authorities and values /// from the first skipped epoch. The tickets data is invalidated. pub(crate) fn enact_epoch_change( authorities: WeakBoundedVec, next_authorities: WeakBoundedVec, ) { + debug_assert_eq!(authorities, NextAuthorities::::get()); + if next_authorities != authorities { Self::update_ring_verifier(&next_authorities); } @@ -612,13 +585,22 @@ impl Pallet { NextAuthorities::::put(&next_authorities); // Update epoch index - let mut epoch_idx = EpochIndex::::get() + 1; + let expected_epoch_idx = TemporaryData::::get() + .map(|cache| Self::epoch_index(cache.prev_slot) + 1) + .expect("Unconditionally populated in `on_initialize`; `enact_epoch_change` is always called after; qed"); + let mut epoch_idx = Self::current_epoch_index(); + + if epoch_idx < expected_epoch_idx { + panic!( + "Unexpected epoch value, expected: {} - found: {}, aborting", + expected_epoch_idx, epoch_idx + ); + } - let slot_idx = CurrentSlot::::get().saturating_sub(Self::epoch_start(epoch_idx)); - if slot_idx >= T::EpochLength::get() { + if expected_epoch_idx != epoch_idx { // Detected one or more skipped epochs, clear tickets data and recompute epoch index. Self::reset_tickets_data(); - let skipped_epochs = *slot_idx / T::EpochLength::get() as u64; + let skipped_epochs = epoch_idx - expected_epoch_idx; epoch_idx += skipped_epochs; warn!( target: LOG_TARGET, @@ -628,95 +610,93 @@ impl Pallet { ); } - let mut metadata = TicketsMeta::::get(); - let mut metadata_dirty = false; - - EpochIndex::::put(epoch_idx); - - let next_epoch_idx = epoch_idx + 1; - - // Updates current epoch randomness and computes the *next* epoch randomness. - let next_randomness = Self::update_epoch_randomness(next_epoch_idx); - - if let Some(config) = NextEpochConfig::::take() { - EpochConfig::::put(config); - } - - let next_config = PendingEpochConfigChange::::take(); - if let Some(next_config) = next_config { - NextEpochConfig::::put(next_config); - } - // After we update the current epoch, we signal the *next* epoch change // so that nodes can track changes. - let next_epoch = NextEpochDescriptor { - randomness: next_randomness, + let epoch_signal = NextEpochDescriptor { + randomness: Self::update_randomness_buffer(), authorities: next_authorities.into_inner(), - config: next_config, }; - Self::deposit_next_epoch_descriptor_digest(next_epoch); + Self::deposit_next_epoch_descriptor_digest(epoch_signal); - let epoch_tag = (epoch_idx & 1) as u8; + Self::consume_tickets_accumulator(usize::MAX, epoch_idx.tag()); - // Optionally finish sorting - if metadata.unsorted_tickets_count != 0 { - Self::sort_segments(u32::MAX, epoch_tag, &mut metadata); - metadata_dirty = true; - } + // Reset next epoch counter as we're start accumulating. + let mut tickets_count = TicketsCount::::get(); + tickets_count[epoch_idx.next_tag() as usize] = 0; + TicketsCount::::set(tickets_count); + } - // Clear the "prev ≡ next (mod 2)" epoch tickets counter and bodies. - // Ids are left since are just cyclically overwritten on-the-go. - let prev_epoch_tag = epoch_tag ^ 1; - let prev_epoch_tickets_count = &mut metadata.tickets_count[prev_epoch_tag as usize]; - if *prev_epoch_tickets_count != 0 { - for idx in 0..*prev_epoch_tickets_count { - if let Some(ticket_id) = TicketsIds::::get((prev_epoch_tag, idx)) { - TicketsData::::remove(ticket_id); + pub(crate) fn deposit_tickets(tickets: Vec) -> Result<(), Error> { + let prev_count = TicketsAccumulator::::count(); + let mut prev_id = None; + for ticket in &tickets { + if prev_id.map(|prev| ticket.id <= prev).unwrap_or_default() { + return Err(Error::TicketBadOrder) + } + prev_id = Some(ticket.id); + TicketsAccumulator::::insert(TicketKey::from(ticket.id), ticket); + } + let count = TicketsAccumulator::::count(); + if count != prev_count + tickets.len() as u32 { + return Err(Error::TicketDuplicate) + } + let diff = count.saturating_sub(T::EpochDuration::get()); + if diff > 0 { + let dropped_entries: Vec<_> = + TicketsAccumulator::::iter().take(diff as usize).collect(); + // Assess that no new ticket has been dropped + for (key, ticket) in dropped_entries { + if tickets.binary_search_by_key(&ticket.id, |t| t.id).is_ok() { + return Err(Error::TicketDropped) } + TicketsAccumulator::::remove(key); } - *prev_epoch_tickets_count = 0; - metadata_dirty = true; } + Ok(()) + } - if metadata_dirty { - TicketsMeta::::set(metadata); + // Consumes the tickets accumulator relative to `epoch_tag` by depositing at most + // `max_items` into the `Tickets` map. Ticket bodies are stored in the `Tickets` + // map from smaller to bigger wrt ticket identifier (as required by the protocol). + fn consume_tickets_accumulator(max_items: usize, epoch_tag: u8) { + let mut tickets_count = TicketsCount::::get(); + let mut accumulator_count = TicketsAccumulator::::count(); + let mut idx = accumulator_count; + for (_, ticket) in TicketsAccumulator::::drain().take(max_items) { + idx -= 1; + Tickets::::insert((epoch_tag, idx), ticket); } + tickets_count[epoch_tag as usize] += (accumulator_count - idx); + TicketsCount::::set(tickets_count); } // Call this function on epoch change to enact current epoch randomness. - // - // Returns the next epoch randomness. - fn update_epoch_randomness(next_epoch_index: u64) -> Randomness { - let curr_epoch_randomness = NextRandomness::::get(); - CurrentRandomness::::put(curr_epoch_randomness); - - let accumulator = RandomnessAccumulator::::get(); - - let mut buf = [0; RANDOMNESS_LENGTH + 8]; - buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[..]); - buf[RANDOMNESS_LENGTH..].copy_from_slice(&next_epoch_index.to_le_bytes()); - - let next_randomness = hashing::blake2_256(&buf); - NextRandomness::::put(&next_randomness); - - next_randomness + fn update_randomness_buffer() -> Randomness { + let mut randomness = RandomnessBuf::::get(); + randomness[3] = randomness[2]; + randomness[2] = randomness[1]; + randomness[1] = randomness[0]; + let announce = randomness[2]; + RandomnessBuf::::put(randomness); + announce } - // Deposit per-slot randomness. - fn deposit_slot_randomness(randomness: &Randomness) { - let accumulator = RandomnessAccumulator::::get(); - + // Deposit per-block randomness. + fn deposit_randomness(randomness: Randomness) { + let mut accumulator = RandomnessBuf::::get(); let mut buf = [0; 2 * RANDOMNESS_LENGTH]; - buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[..]); + buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[0][..]); buf[RANDOMNESS_LENGTH..].copy_from_slice(&randomness[..]); - - let accumulator = hashing::blake2_256(&buf); - RandomnessAccumulator::::put(accumulator); + accumulator[0] = hashing::blake2_256(&buf); + RandomnessBuf::::put(accumulator); } // Deposit next epoch descriptor in the block header digest. fn deposit_next_epoch_descriptor_digest(desc: NextEpochDescriptor) { - let item = ConsensusLog::NextEpochData(desc); + Self::deposit_consensus(ConsensusLog::NextEpochData(desc)); + } + + pub(crate) fn deposit_consensus(item: ConsensusLog) { let log = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, item.encode()); >::deposit_log(log) } @@ -746,65 +726,34 @@ impl Pallet { } // Method to be called on first block `on_initialize` to properly populate some key parameters. - fn post_genesis_initialize(slot: Slot) { - // Keep track of the actual first slot used (may not be zero based). - GenesisSlot::::put(slot); - - // Properly initialize randomness using genesis hash and current slot. - // This is important to guarantee that a different set of tickets are produced for: - // - different chains which share the same ring parameters and - // - same chain started with a different slot base. + fn post_genesis_initialize() { + // Properly initialize randomness using genesis hash. + // This is important to guarantee that a different set of tickets are produced for + // different chains sharing the same ring parameters. let genesis_hash = frame_system::Pallet::::parent_hash(); - let mut buf = genesis_hash.as_ref().to_vec(); - buf.extend_from_slice(&slot.to_le_bytes()); - let randomness = hashing::blake2_256(buf.as_slice()); - RandomnessAccumulator::::put(randomness); - - let next_randomness = Self::update_epoch_randomness(1); + let mut accumulator = RandomnessBuffer::default(); + accumulator[0] = hashing::blake2_256(genesis_hash.as_ref()); + accumulator[1] = hashing::blake2_256(&accumulator[0]); + accumulator[2] = hashing::blake2_256(&accumulator[1]); + accumulator[3] = hashing::blake2_256(&accumulator[2]); + RandomnessBuf::::put(accumulator); // Deposit a log as this is the first block in first epoch. let next_epoch = NextEpochDescriptor { - randomness: next_randomness, + randomness: accumulator[2], authorities: Self::next_authorities().into_inner(), - config: None, }; Self::deposit_next_epoch_descriptor_digest(next_epoch); } - /// Current epoch information. - pub fn current_epoch() -> Epoch { - let index = EpochIndex::::get(); - Epoch { - index, - start: Self::epoch_start(index), - length: T::EpochLength::get(), - authorities: Self::authorities().into_inner(), - randomness: Self::randomness(), - config: Self::config(), - } - } - - /// Next epoch information. - pub fn next_epoch() -> Epoch { - let index = EpochIndex::::get() + 1; - Epoch { - index, - start: Self::epoch_start(index), - length: T::EpochLength::get(), - authorities: Self::next_authorities().into_inner(), - randomness: Self::next_randomness(), - config: Self::next_config().unwrap_or_else(|| Self::config()), - } - } - /// Fetch expected ticket-id for the given slot according to an "outside-in" sorting strategy. /// /// Given an ordered sequence of tickets [t0, t1, t2, ..., tk] to be assigned to n slots, /// with n >= k, then the tickets are assigned to the slots according to the following /// strategy: /// - /// slot-index : [ 0, 1, 2, ............ , n ] - /// tickets : [ t1, t3, t5, ... , t4, t2, t0 ]. + /// slot-index : [ 0, 1, 2, 3, .................. ,n ] + /// tickets : [ t1, tk, t2, t_{k-1} ..... ]. /// /// With slot-index computed as `epoch_start() - slot`. /// @@ -814,224 +763,177 @@ impl Pallet { /// If `slot` value falls within the next epoch then we fetch tickets from the next epoch /// tickets ids list. Note that in this case we may have not finished receiving all the tickets /// for that epoch yet. The next epoch tickets should be considered "stable" only after the - /// current epoch first half slots were elapsed (see `submit_tickets_unsigned_extrinsic`). + /// current epoch "submission period" is completed. /// /// Returns `None` if, according to the sorting strategy, there is no ticket associated to the - /// specified slot-index (happens if a ticket falls in the middle of an epoch and n > k), - /// or if the slot falls beyond the next epoch. + /// specified slot-index (may happen if n > k and we are requesting for a ticket for a slot with + /// relative index i > k) or if the slot falls beyond the next epoch. /// /// Before importing the first block this returns `None`. - pub fn slot_ticket_id(slot: Slot) -> Option { + pub fn slot_ticket(slot: Slot) -> Option { if frame_system::Pallet::::block_number().is_zero() { return None } - let epoch_idx = EpochIndex::::get(); - let epoch_len = T::EpochLength::get(); - let mut slot_idx = Self::slot_index(slot); - let mut metadata = TicketsMeta::::get(); - - let get_ticket_idx = |slot_idx| { - let ticket_idx = if slot_idx < epoch_len / 2 { - 2 * slot_idx + 1 - } else { - 2 * (epoch_len - (slot_idx + 1)) - }; - debug!( - target: LOG_TARGET, - "slot-idx {} <-> ticket-idx {}", - slot_idx, - ticket_idx - ); - ticket_idx as u32 - }; - let mut epoch_tag = (epoch_idx & 1) as u8; + let curr_epoch_idx = Self::current_epoch_index(); + let slot_epoch_idx = Self::epoch_index(slot); + if slot_epoch_idx < curr_epoch_idx || slot_epoch_idx > curr_epoch_idx + 1 { + return None + } + + let mut epoch_tag = slot_epoch_idx.tag(); + let slot_idx = Self::slot_index(slot); - if epoch_len <= slot_idx && slot_idx < 2 * epoch_len { - // Try to get a ticket for the next epoch. Since its state values were not enacted yet, - // we may have to finish sorting the tickets. - epoch_tag ^= 1; - slot_idx -= epoch_len; - if metadata.unsorted_tickets_count != 0 { - Self::sort_segments(u32::MAX, epoch_tag, &mut metadata); - TicketsMeta::::set(metadata); - } - } else if slot_idx >= 2 * epoch_len { + if slot_epoch_idx == curr_epoch_idx + 1 && TicketsAccumulator::::count() != 0 { + // JIT enactment of next epoch tickets when the accumulator has not been + // fully consumed yet. Drain and enact the accumulator for next epoch. + Self::consume_tickets_accumulator(usize::MAX, epoch_tag); + } + + let tickets_count = TicketsCount::::get()[epoch_tag as usize]; + if tickets_count <= slot_idx { + // Slot not bound to a ticket. return None } - let ticket_idx = get_ticket_idx(slot_idx); - if ticket_idx < metadata.tickets_count[epoch_tag as usize] { - TicketsIds::::get((epoch_tag, ticket_idx)) - } else { - None + // Outside-in sort. + let mut ticket_idx = slot_idx / 2; + if slot_idx & 1 != 0 { + ticket_idx = tickets_count - (ticket_idx + 1); } + + debug!( + target: LOG_TARGET, + "slot-idx {} <-> ticket-idx {}", + slot_idx, + ticket_idx + ); + Tickets::::get((epoch_tag, ticket_idx)) } - /// Returns ticket id and data associated with the given `slot`. + /// Reset tickets related data. /// - /// Refer to the `slot_ticket_id` documentation for the slot-ticket association - /// criteria. - pub fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketBody)> { - Self::slot_ticket_id(slot).and_then(|id| TicketsData::::get(id).map(|body| (id, body))) + /// Optimization note: tickets are left in place, only the associated counters are resetted. + #[inline(always)] + fn reset_tickets_data() { + TicketsCount::::kill(); + let _ = TicketsAccumulator::::clear(u32::MAX, None); + } + + /// Static protocol configuration. + #[inline(always)] + pub fn protocol_config() -> Configuration { + let epoch_duration = T::EpochDuration::get(); + let lottery_duration = T::LotteryDurationPercent::get() * epoch_duration; + Configuration { + epoch_duration, + lottery_duration, + max_authorities: T::MaxAuthorities::get(), + redundancy_factor: T::RedundancyFactor::get(), + attempts_number: T::AttemptsNumber::get(), + } } - // Sort and truncate candidate tickets, cleanup storage. - fn sort_and_truncate(candidates: &mut Vec, max_tickets: usize) -> u128 { - candidates.sort_unstable(); - candidates.drain(max_tickets..).for_each(TicketsData::::remove); - candidates[max_tickets - 1] + /// Current epoch information. + #[inline(always)] + pub fn current_epoch() -> Epoch { + Epoch { + start: Self::current_epoch_start(), + authorities: Self::authorities().into_inner(), + randomness: Self::randomness_buf(), + } } - /// Sort the tickets which belong to the epoch with the specified `epoch_tag`. + /// Randomness buffer entries. /// - /// At most `max_segments` are taken from the `UnsortedSegments` structure. + /// Assuming we're executing a block during epoch with index `N`. /// - /// The tickets of the removed segments are merged with the tickets on the `SortedCandidates` - /// which is then sorted an truncated to contain at most `MaxTickets` entries. + /// Entries: + /// - 0 : randomness accumulator after execution of previous block. + /// - 1 : randomness accumulator snapshot after execution of epoch `N-1` last block. + /// - 2 : randomness accumulator snapshot after execution of epoch `N-2` last block. + /// - 3 : randomness accumulator snapshot after execution of epoch `N-3` last block. /// - /// If all the entries in `UnsortedSegments` are consumed, then `SortedCandidates` is elected - /// as the next epoch tickets, else it is saved to be used by next calls of this function. - pub(crate) fn sort_segments(max_segments: u32, epoch_tag: u8, metadata: &mut TicketsMetadata) { - let unsorted_segments_count = metadata.unsorted_tickets_count.div_ceil(SEGMENT_MAX_SIZE); - let max_segments = max_segments.min(unsorted_segments_count); - let max_tickets = Self::epoch_length() as usize; - - // Fetch the sorted candidates (if any). - let mut candidates = SortedCandidates::::take().into_inner(); - - // There is an upper bound to check only if we already sorted the max number - // of allowed tickets. - let mut upper_bound = *candidates.get(max_tickets - 1).unwrap_or(&TicketId::MAX); - - let mut require_sort = false; - - // Consume at most `max_segments` segments. - // During the process remove every stale ticket from `TicketsData` storage. - for segment_idx in (0..unsorted_segments_count).rev().take(max_segments as usize) { - let segment = UnsortedSegments::::take(segment_idx); - metadata.unsorted_tickets_count -= segment.len() as u32; - - // Push only ids with a value less than the current `upper_bound`. - let prev_len = candidates.len(); - for ticket_id in segment { - if ticket_id < upper_bound { - candidates.push(ticket_id); - } else { - TicketsData::::remove(ticket_id); - } - } - require_sort = candidates.len() != prev_len; - - // As we approach the tail of the segments buffer the `upper_bound` value is expected - // to decrease (fast). We thus expect the number of tickets pushed into the - // `candidates` vector to follow an exponential drop. - // - // Given this, sorting and truncating after processing each segment may be an overkill - // as we may find pushing few tickets more and more often. Is preferable to perform - // the sort and truncation operations only when we reach some bigger threshold - // (currently set as twice the capacity of `SortCandidate`). - // - // The more is the protocol's redundancy factor (i.e. the ratio between tickets allowed - // to be submitted and the epoch length) the more this check becomes relevant. - if candidates.len() > 2 * max_tickets { - upper_bound = Self::sort_and_truncate(&mut candidates, max_tickets); - require_sort = false; - } - } - - if candidates.len() > max_tickets { - Self::sort_and_truncate(&mut candidates, max_tickets); - } else if require_sort { - candidates.sort_unstable(); - } - - if metadata.unsorted_tickets_count == 0 { - // Sorting is over, write to next epoch map. - candidates.iter().enumerate().for_each(|(i, id)| { - TicketsIds::::insert((epoch_tag, i as u32), id); - }); - metadata.tickets_count[epoch_tag as usize] = candidates.len() as u32; - } else { - // Keep the partial result for the next calls. - SortedCandidates::::set(BoundedVec::truncate_from(candidates)); - } + /// The semantic of these entries is defined as: + /// - 3 : epoch `N` randomness + /// - 2 : epoch `N+1` randomness + /// - 1 : epoch `N+2` randomness + /// - 0 : accumulator for epoch `N+3` randomness + /// + /// If `index` is greater than 3 the `Default` is returned. + #[inline(always)] + fn randomness(index: usize) -> Randomness { + Self::randomness_buf().get(index).cloned().unwrap_or_default() } - /// Append a set of tickets to the segments map. - pub(crate) fn append_tickets(mut tickets: BoundedVec>) { - debug!(target: LOG_TARGET, "Appending batch with {} tickets", tickets.len()); - tickets.iter().for_each(|t| trace!(target: LOG_TARGET, " + {t:032x}")); - - let mut metadata = TicketsMeta::::get(); - let mut segment_idx = metadata.unsorted_tickets_count / SEGMENT_MAX_SIZE; - - while !tickets.is_empty() { - let rem = metadata.unsorted_tickets_count % SEGMENT_MAX_SIZE; - let to_be_added = tickets.len().min((SEGMENT_MAX_SIZE - rem) as usize); + /// Current epoch's randomness. + #[inline(always)] + fn current_randomness() -> Randomness { + Self::randomness(3) + } - let mut segment = UnsortedSegments::::get(segment_idx); - let _ = segment - .try_extend(tickets.drain(..to_be_added)) - .defensive_proof("We don't add more than `SEGMENT_MAX_SIZE` and this is the maximum bound for the vector."); - UnsortedSegments::::insert(segment_idx, segment); + /// Next epoch's randomness. + #[inline(always)] + fn next_randomness() -> Randomness { + Self::randomness(2) + } - metadata.unsorted_tickets_count += to_be_added as u32; - segment_idx += 1; - } + /// Randomness accumulator + #[inline(always)] + fn randomness_accumulator() -> Randomness { + Self::randomness(0) + } - TicketsMeta::::set(metadata); + /// Determine whether an epoch change should take place at this block. + #[inline(always)] + fn should_end_epoch(block_num: BlockNumberFor) -> bool { + Self::current_slot_index() == 0 && block_num != Zero::zero() } - /// Remove all tickets related data. - /// - /// May not be efficient as the calling places may repeat some of this operations - /// but is a very extraordinary operation (hopefully never happens in production) - /// and better safe than sorry. - fn reset_tickets_data() { - let metadata = TicketsMeta::::get(); + /// Current slot index relative to the current epoch. + #[inline(always)] + fn current_slot_index() -> u32 { + Self::slot_index(CurrentSlot::::get()) + } - // Remove even/odd-epoch data. - for epoch_tag in 0..=1 { - for idx in 0..metadata.tickets_count[epoch_tag] { - if let Some(id) = TicketsIds::::get((epoch_tag as u8, idx)) { - TicketsData::::remove(id); - } - } - } + /// Slot index relative to the current epoch. + #[inline(always)] + fn slot_index(slot: Slot) -> u32 { + (*slot % ::EpochDuration::get() as u64) as u32 + } - // Remove all unsorted tickets segments. - let segments_count = metadata.unsorted_tickets_count.div_ceil(SEGMENT_MAX_SIZE); - (0..segments_count).for_each(UnsortedSegments::::remove); + /// Current epoch index. + #[inline(always)] + fn current_epoch_index() -> u64 { + Self::epoch_index(Self::current_slot()) + } - // Reset sorted candidates - SortedCandidates::::kill(); + /// Epoch's index from slot. + #[inline(always)] + fn epoch_index(slot: Slot) -> u64 { + *slot / ::EpochDuration::get() as u64 + } - // Reset tickets metadata - TicketsMeta::::kill(); + /// Get current epoch first slot. + #[inline(always)] + fn current_epoch_start() -> Slot { + let curr_slot = *Self::current_slot(); + let epoch_start = curr_slot - curr_slot % ::EpochDuration::get() as u64; + Slot::from(epoch_start) } - /// Submit next epoch validator tickets via an unsigned extrinsic constructed with a call to - /// `submit_unsigned_transaction`. - /// - /// The submitted tickets are added to the next epoch outstanding tickets as long as the - /// extrinsic is called within the first half of the epoch. Tickets received during the - /// second half are dropped. - pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { - let tickets = BoundedVec::truncate_from(tickets); - let call = Call::submit_tickets { tickets }; - match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { - Ok(_) => true, - Err(e) => { - error!(target: LOG_TARGET, "Error submitting tickets {:?}", e); - false - }, - } + /// Get the epoch's first slot. + #[inline(always)] + fn epoch_start(epoch_index: u64) -> Slot { + const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed."; + epoch_index.checked_mul(T::EpochDuration::get() as u64).expect(PROOF).into() } - /// Epoch length - pub fn epoch_length() -> u32 { - T::EpochLength::get() + /// Epoch duration. + #[inline(always)] + fn epoch_duration() -> u32 { + T::EpochDuration::get() } } @@ -1071,7 +973,7 @@ impl EpochChangeTrigger for EpochChangeInternalTrigger { let next_authorities = authorities.clone(); let len = next_authorities.len() as u32; Pallet::::enact_epoch_change(authorities, next_authorities); - T::WeightInfo::enact_epoch_change(len, T::EpochLength::get()) + T::WeightInfo::enact_epoch_change(len, T::EpochDuration::get()) } else { Weight::zero() } diff --git a/substrate/frame/sassafras/src/mock.rs b/substrate/frame/sassafras/src/mock.rs index f145bffa3a05..e260748ec6a1 100644 --- a/substrate/frame/sassafras/src/mock.rs +++ b/substrate/frame/sassafras/src/mock.rs @@ -20,17 +20,16 @@ use crate::{self as pallet_sassafras, EpochChangeInternalTrigger, *}; use frame_support::{ - derive_impl, - traits::{ConstU32, OnFinalize, OnInitialize}, + derive_impl, parameter_types, + traits::{ConstU32, ConstU8, OnFinalize, OnInitialize}, }; use sp_consensus_sassafras::{ digests::SlotClaim, vrf::{RingProver, VrfSignature}, - AuthorityIndex, AuthorityPair, EpochConfiguration, Slot, TicketBody, TicketEnvelope, TicketId, + AuthorityIndex, AuthorityPair, Slot, TicketBody, TicketEnvelope, TicketId, }; use sp_core::{ - crypto::{ByteArray, Pair, UncheckedFrom, VrfSecret, Wraps}, - ed25519::Public as EphemeralPublic, + crypto::{ByteArray, Pair, VrfSecret, Wraps}, H256, U256, }; use sp_runtime::{ @@ -40,8 +39,13 @@ use sp_runtime::{ const LOG_TARGET: &str = "sassafras::tests"; -const EPOCH_LENGTH: u32 = 10; +// Configuration constants +const EPOCH_DURATION: u32 = 10; +const LOTTERY_PERCENT: u8 = 85; const MAX_AUTHORITIES: u32 = 100; +const REDUNDANCY_FACTOR: u8 = 32; +const ATTEMPTS_NUMBER: u8 = 2; +const TICKETS_CHUNK_LENGTH: u32 = 16; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { @@ -56,9 +60,17 @@ where type Extrinsic = TestXt; } +parameter_types! { + pub const LotteryPercent: Percent = Percent::from_percent(LOTTERY_PERCENT); +} + impl pallet_sassafras::Config for Test { - type EpochLength = ConstU32; + type EpochDuration = ConstU32; type MaxAuthorities = ConstU32; + type RedundancyFactor = ConstU8; + type AttemptsNumber = ConstU8; + type TicketsChunkLength = ConstU32; + type LotteryDurationPercent = LotteryPercent; type EpochChangeTrigger = EpochChangeInternalTrigger; type WeightInfo = (); } @@ -70,14 +82,6 @@ frame_support::construct_runtime!( } ); -// Default used for most of the tests. -// -// The redundancy factor has been set to max value to accept all submitted -// tickets without worrying about the threshold. -pub const TEST_EPOCH_CONFIGURATION: EpochConfiguration = - EpochConfiguration { redundancy_factor: u32::MAX, attempts_number: 5 }; - -/// Build and returns test storage externalities pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { new_test_ext_with_pairs(authorities_len, false).1 } @@ -98,7 +102,6 @@ pub fn new_test_ext_with_pairs( pallet_sassafras::GenesisConfig:: { authorities: authorities.clone(), - epoch_config: TEST_EPOCH_CONFIGURATION, _phantom: core::marker::PhantomData, } .assimilate_storage(&mut storage) @@ -118,183 +121,68 @@ pub fn new_test_ext_with_pairs( (pairs, ext) } -fn make_ticket_with_prover( - attempt: u32, - pair: &AuthorityPair, - prover: &RingProver, -) -> TicketEnvelope { - log::debug!("attempt: {}", attempt); - - // Values are referring to the next epoch - let epoch = Sassafras::epoch_index() + 1; - let randomness = Sassafras::next_randomness(); - - // Make a dummy ephemeral public that hopefully is unique within one test instance. - // In the tests, the values within the erased public are just used to compare - // ticket bodies, so it is not important to be a valid key. - let mut raw: [u8; 32] = [0; 32]; - raw.copy_from_slice(&pair.public().as_slice()[0..32]); - let erased_public = EphemeralPublic::unchecked_from(raw); - let revealed_public = erased_public; - - let ticket_id_input = vrf::ticket_id_input(&randomness, attempt, epoch); - - let body = TicketBody { attempt_idx: attempt, erased_public, revealed_public }; - let sign_data = vrf::ticket_body_sign_data(&body, ticket_id_input); - - let signature = pair.as_ref().ring_vrf_sign(&sign_data, &prover); - - // Ticket-id can be generated via vrf-preout. - // We don't care that much about its value here. - TicketEnvelope { body, signature } +fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { + let randomness = Sassafras::randomness_accumulator(); + let data = vrf::block_randomness_sign_data(&randomness, slot); + pair.as_ref().vrf_sign(&data) } -pub fn make_prover(pair: &AuthorityPair) -> RingProver { - let public = pair.public(); - let mut prover_idx = None; - - let ring_ctx = Sassafras::ring_context().unwrap(); - - let pks: Vec = Sassafras::authorities() - .iter() - .enumerate() - .map(|(idx, auth)| { - if public == *auth { - prover_idx = Some(idx); - } - *auth.as_ref() - }) - .collect(); - - log::debug!("Building prover. Ring size: {}", pks.len()); - let prover = ring_ctx.prover(&pks, prover_idx.unwrap()).unwrap(); - log::debug!("Done"); - - prover +/// Construct a `PreDigest` instance for the given parameters. +pub fn make_slot_claim( + authority_idx: AuthorityIndex, + slot: Slot, + pair: &AuthorityPair, +) -> SlotClaim { + let vrf_signature = slot_claim_vrf_signature(slot, pair); + SlotClaim { authority_idx, slot, vrf_signature } } -/// Construct `attempts` tickets envelopes for the next epoch. -/// -/// E.g. by passing an optional threshold -pub fn make_tickets(attempts: u32, pair: &AuthorityPair) -> Vec { - let prover = make_prover(pair); - (0..attempts) - .into_iter() - .map(|attempt| make_ticket_with_prover(attempt, pair, &prover)) - .collect() +/// Construct a `Digest` with a `SlotClaim` item. +pub fn make_digest(authority_idx: AuthorityIndex, slot: Slot, pair: &AuthorityPair) -> Digest { + let claim = make_slot_claim(authority_idx, slot, pair); + Digest { logs: vec![DigestItem::from(&claim)] } } -pub fn make_ticket_body(attempt_idx: u32, pair: &AuthorityPair) -> (TicketId, TicketBody) { - // Values are referring to the next epoch - let epoch = Sassafras::epoch_index() + 1; +/// Make a ticket which is claimable during the next epoch. +pub fn make_ticket_body(attempt: u8, pair: &AuthorityPair) -> TicketBody { let randomness = Sassafras::next_randomness(); - let ticket_id_input = vrf::ticket_id_input(&randomness, attempt_idx, epoch); + let ticket_id_input = vrf::ticket_id_input(&randomness, attempt); let ticket_id_pre_output = pair.as_inner_ref().vrf_pre_output(&ticket_id_input); let id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_pre_output); - // Make a dummy ephemeral public that hopefully is unique within one test instance. - // In the tests, the values within the erased public are just used to compare - // ticket bodies, so it is not important to be a valid key. - let mut raw: [u8; 32] = [0; 32]; - raw[..16].copy_from_slice(&pair.public().as_slice()[0..16]); - raw[16..].copy_from_slice(&id.to_le_bytes()); - let erased_public = EphemeralPublic::unchecked_from(raw); - let revealed_public = erased_public; + // Make dummy extra data. + let mut extra = [pair.public().as_slice(), &id.0[..]].concat(); + let extra = BoundedVec::truncate_from(extra); - let body = TicketBody { attempt_idx, erased_public, revealed_public }; - - (id, body) + TicketBody { id, attempt, extra } } -pub fn make_dummy_ticket_body(attempt_idx: u32) -> (TicketId, TicketBody) { - let hash = sp_crypto_hashing::blake2_256(&attempt_idx.to_le_bytes()); - - let erased_public = EphemeralPublic::unchecked_from(hash); - let revealed_public = erased_public; - - let body = TicketBody { attempt_idx, erased_public, revealed_public }; - - let mut bytes = [0u8; 16]; - bytes.copy_from_slice(&hash[..16]); - let id = TicketId::from_le_bytes(bytes); - - (id, body) +pub fn make_dummy_ticket_body(attempt: u8) -> TicketBody { + let hash = sp_crypto_hashing::blake2_256(&[attempt]); + let id = TicketId(hash); + let hash = sp_crypto_hashing::blake2_256(&hash); + let extra = BoundedVec::truncate_from(hash.to_vec()); + TicketBody { id, attempt, extra } } pub fn make_ticket_bodies( - number: u32, + attempts: u8, pair: Option<&AuthorityPair>, -) -> Vec<(TicketId, TicketBody)> { - (0..number) + sort: bool, +) -> Vec { + let mut bodies: Vec<_> = (0..attempts) .into_iter() .map(|i| match pair { Some(pair) => make_ticket_body(i, pair), None => make_dummy_ticket_body(i), }) - .collect() -} - -/// Persist the given tickets in the unsorted segments buffer. -/// -/// This function skips all the checks performed by the `submit_tickets` extrinsic and -/// directly appends the tickets to the `UnsortedSegments` structure. -pub fn persist_next_epoch_tickets_as_segments(tickets: &[(TicketId, TicketBody)]) { - let mut ids = Vec::with_capacity(tickets.len()); - tickets.iter().for_each(|(id, body)| { - TicketsData::::set(id, Some(body.clone())); - ids.push(*id); - }); - let max_chunk_size = Sassafras::epoch_length() as usize; - ids.chunks(max_chunk_size).for_each(|chunk| { - Sassafras::append_tickets(BoundedVec::truncate_from(chunk.to_vec())); - }) -} - -/// Calls the [`persist_next_epoch_tickets_as_segments`] and then proceeds to the -/// sorting of the candidates. -/// -/// Only "winning" tickets are left. -pub fn persist_next_epoch_tickets(tickets: &[(TicketId, TicketBody)]) { - persist_next_epoch_tickets_as_segments(tickets); - // Force sorting of next epoch tickets (enactment) by explicitly querying the first of them. - let next_epoch = Sassafras::next_epoch(); - assert_eq!(TicketsMeta::::get().unsorted_tickets_count, tickets.len() as u32); - Sassafras::slot_ticket(next_epoch.start).unwrap(); - assert_eq!(TicketsMeta::::get().unsorted_tickets_count, 0); -} - -fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { - let mut epoch = Sassafras::epoch_index(); - let mut randomness = Sassafras::randomness(); - - // Check if epoch is going to change on initialization. - let epoch_start = Sassafras::current_epoch_start(); - let epoch_length = EPOCH_LENGTH.into(); - if epoch_start != 0_u64 && slot >= epoch_start + epoch_length { - epoch += slot.saturating_sub(epoch_start).saturating_div(epoch_length); - randomness = crate::NextRandomness::::get(); + .collect(); + if sort { + bodies.sort_unstable(); } - - let data = vrf::slot_claim_sign_data(&randomness, slot, epoch); - pair.as_ref().vrf_sign(&data) -} - -/// Construct a `PreDigest` instance for the given parameters. -pub fn make_slot_claim( - authority_idx: AuthorityIndex, - slot: Slot, - pair: &AuthorityPair, -) -> SlotClaim { - let vrf_signature = slot_claim_vrf_signature(slot, pair); - SlotClaim { authority_idx, slot, vrf_signature, ticket_claim: None } -} - -/// Construct a `Digest` with a `SlotClaim` item. -pub fn make_digest(authority_idx: AuthorityIndex, slot: Slot, pair: &AuthorityPair) -> Digest { - let claim = make_slot_claim(authority_idx, slot, pair); - Digest { logs: vec![DigestItem::from(&claim)] } + bodies } pub fn initialize_block( @@ -341,3 +229,59 @@ pub fn progress_to_block(number: u64, pair: &AuthorityPair) -> Option { } digest } + +fn make_ticket_with_prover( + attempt: u8, + pair: &AuthorityPair, + prover: &RingProver, +) -> (TicketId, TicketEnvelope) { + log::debug!("attempt: {}", attempt); + + // Values are referring to the next epoch + let randomness = Sassafras::next_randomness(); + + let ticket_id_input = vrf::ticket_id_input(&randomness, attempt); + let sign_data = vrf::ticket_id_sign_data(ticket_id_input.clone(), &[]); + let signature = pair.as_ref().ring_vrf_sign(&sign_data, &prover); + let pre_output = &signature.pre_outputs[0]; + + let ticket_id = vrf::make_ticket_id(&ticket_id_input, pre_output); + let envelope = TicketEnvelope { attempt, extra: Default::default(), signature }; + + (ticket_id, envelope) +} + +pub fn make_prover(pair: &AuthorityPair) -> RingProver { + let public = pair.public(); + let mut prover_idx = None; + + let ring_ctx = Sassafras::ring_context().unwrap(); + + let pks: Vec = Sassafras::authorities() + .iter() + .enumerate() + .map(|(idx, auth)| { + if public == *auth { + prover_idx = Some(idx); + } + *auth.as_ref() + }) + .collect(); + + log::debug!("Building prover. Ring size: {}", pks.len()); + let prover = ring_ctx.prover(&pks, prover_idx.unwrap()).unwrap(); + log::debug!("Done"); + + prover +} + +/// Construct `attempts` tickets envelopes for the next epoch. +/// +/// E.g. by passing an optional threshold +pub fn make_tickets(attempts: u8, pair: &AuthorityPair) -> Vec<(TicketId, TicketEnvelope)> { + let prover = make_prover(pair); + (0..attempts) + .into_iter() + .map(|attempt| make_ticket_with_prover(attempt, pair, &prover)) + .collect() +} diff --git a/substrate/frame/sassafras/src/session.rs b/substrate/frame/sassafras/src/session.rs new file mode 100644 index 000000000000..e36a0a4f7662 --- /dev/null +++ b/substrate/frame/sassafras/src/session.rs @@ -0,0 +1,119 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation for traits required by Session pallet. + +use super::*; +use frame_support::traits::{EstimateNextSessionRotation, Hooks, OneSessionHandler}; +use pallet_session::ShouldEndSession; +use sp_runtime::{ + traits::{SaturatedConversion, Saturating}, + Permill, +}; + +impl ShouldEndSession> for Pallet { + fn should_end_session(now: BlockNumberFor) -> bool { + // It might be (and it is in current implementation) that session module is calling + // `should_end_session` from it's own `on_initialize` handler, in which case it's + // possible that Sassafras's own `on_initialize` has not run yet, so let's ensure that we + // have initialized the pallet and updated the current slot. + Self::on_initialize(now); + Self::should_end_epoch(now) + } +} + +impl OneSessionHandler for Pallet { + type Key = AuthorityId; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + { + let authorities: Vec<_> = validators.map(|(_, k)| k).collect(); + Self::genesis_authorities_initialize(&authorities); + } + + fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, queued_validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_account, k)| k).collect(); + let bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + authorities, + Some( + "Warning: The session has more validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); + + let next_authorities = queued_validators.map(|(_account, k)| k).collect(); + let next_bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + next_authorities, + Some( + "Warning: The session has more queued validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); + + Self::enact_epoch_change(bounded_authorities, next_bounded_authorities) + } + + fn on_disabled(i: u32) { + Self::deposit_consensus(ConsensusLog::OnDisabled(i)) + } +} + +impl EstimateNextSessionRotation> for Pallet { + fn average_session_length() -> BlockNumberFor { + T::EpochDuration::get().saturated_into() + } + + fn estimate_current_session_progress(_now: BlockNumberFor) -> (Option, Weight) { + let elapsed = CurrentSlot::::get().saturating_sub(Self::current_epoch_start()) + 1; + let progress = Permill::from_rational(*elapsed, T::EpochDuration::get() as u64); + + // TODO @davxy: Read: Current Slot, Epoch Index, Genesis Slot + (Some(progress), T::DbWeight::get().reads(3)) + } + + /// Return the _best guess_ block number, at which the next epoch change is predicted to happen. + /// + /// Returns None if the prediction is in the past; This implies an internal error and should + /// not happen under normal circumstances. + /// + /// In other word, this is only accurate if no slots are missed. Given missed slots, the slot + /// number will grow while the block number will not. Hence, the result can be interpreted as an + /// upper bound. + // + // ## IMPORTANT NOTE + // + // This implementation is linked to how [`should_session_change`] is working. This might need + // to be updated accordingly, if the underlying mechanics of slot and epochs change. + fn estimate_next_session_rotation( + now: BlockNumberFor, + ) -> (Option>, Weight) { + let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); + let upper_bound = next_slot.checked_sub(*CurrentSlot::::get()).map(|slots_remaining| { + // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. + let blocks_remaining: BlockNumberFor = slots_remaining.saturated_into(); + now.saturating_add(blocks_remaining) + }); + + // TODO @davxy: Read: Current Slot, Epoch Index, Genesis Slot + (upper_bound, T::DbWeight::get().reads(3)) + } +} diff --git a/substrate/frame/sassafras/src/tests.rs b/substrate/frame/sassafras/src/tests.rs index ec3425cce7bf..68c981768414 100644 --- a/substrate/frame/sassafras/src/tests.rs +++ b/substrate/frame/sassafras/src/tests.rs @@ -21,20 +21,123 @@ use crate::*; use mock::*; use sp_consensus_sassafras::Slot; +use sp_runtime::DispatchError; -fn h2b(hex: &str) -> [u8; N] { - array_bytes::hex2array_unchecked(hex) +const TICKETS_FILE: &str = "src/data/tickets.bin"; + +const GENESIS_SLOT: u64 = 100; + +fn h2b(hex: &str) -> Vec { + array_bytes::hex2bytes_unchecked(hex) +} + +fn b2h(bytes: &[u8]) -> String { + array_bytes::bytes2hex("", bytes) } -fn b2h(bytes: [u8; N]) -> String { - array_bytes::bytes2hex("", &bytes) +macro_rules! prefix_eq { + ($a:expr, $b:expr) => {{ + let len = $a.len().min($b.len()); + if &$a[..len] != &$b[..len] { + panic!("left: {}, right: {}", b2h(&$a[..len]), b2h(&$b[..len])); + } + }}; } #[test] -fn genesis_values_assumptions_check() { +fn assumptions_check() { + let mut tickets = make_ticket_bodies(100, None, false); + + // Check that the returned tickets are not sorted to start with. + assert!(tickets.windows(2).any(|w| w[0] > w[1])); + new_test_ext(3).execute_with(|| { assert_eq!(Sassafras::authorities().len(), 3); - assert_eq!(Sassafras::config(), TEST_EPOCH_CONFIGURATION); + + // Check that entries are stored sorted (bigger first) + tickets + .iter() + .for_each(|t| TicketsAccumulator::::insert(TicketKey::from(t.id), t)); + assert_eq!(TicketsAccumulator::::count(), 100); + tickets.sort_unstable_by_key(|t| TicketKey::from(t.id)); + let accumulator: Vec<_> = TicketsAccumulator::::iter_values().collect(); + assert_eq!(tickets, accumulator); + + // Check accumulator clear + let _ = TicketsAccumulator::::clear(u32::MAX, None); + assert_eq!(TicketsAccumulator::::count(), 0); + }); +} + +#[test] +fn deposit_tickets_works() { + let mut tickets = make_ticket_bodies(15, None, false); + + new_test_ext(1).execute_with(|| { + // Try to append an unsorted chunk + let mut candidates = tickets[..5].to_vec(); + let err = Sassafras::deposit_tickets(candidates).unwrap_err(); + assert!(matches!(err, Error::TicketBadOrder)); + let _ = TicketsAccumulator::::clear(u32::MAX, None); + + // Correctly append the first sorted chunk + let mut candidates = tickets[..5].to_vec(); + candidates.sort_unstable(); + Sassafras::deposit_tickets(candidates).unwrap(); + assert_eq!(TicketsAccumulator::::count(), 5); + // Note: internally the tickets are stored in reverse order (bigger first) + let stored: Vec<_> = TicketsAccumulator::::iter_values().collect(); + let mut expected = tickets[..5].to_vec(); + expected.sort_unstable_by_key(|t| TicketKey::from(t.id)); + assert_eq!(expected, stored); + + // Try to append a chunk with a ticket already pushed + let mut candidates = tickets[4..10].to_vec(); + candidates.sort_unstable(); + let err = Sassafras::deposit_tickets(candidates).unwrap_err(); + assert!(matches!(err, Error::TicketDuplicate)); + // Restore last correct state + let _ = TicketsAccumulator::::clear(u32::MAX, None); + let mut candidates = tickets[..5].to_vec(); + candidates.sort_unstable(); + Sassafras::deposit_tickets(candidates).unwrap(); + + // Correctly push the second sorted chunk + let mut candidates = tickets[5..10].to_vec(); + candidates.sort_unstable(); + Sassafras::deposit_tickets(candidates).unwrap(); + assert_eq!(TicketsAccumulator::::count(), 10); + // Note: internally the tickets are stored in reverse order (bigger first) + let mut stored: Vec<_> = TicketsAccumulator::::iter_values().collect(); + let mut expected = tickets[..10].to_vec(); + expected.sort_unstable_by_key(|t| TicketKey::from(t.id)); + assert_eq!(expected, stored); + + // Now the buffer is full, pick only the tickets that will eventually fit. + let mut candidates = tickets[10..].to_vec(); + candidates.sort_unstable(); + let mut eligible = Vec::new(); + for candidate in candidates { + if stored.is_empty() { + break + } + let bigger = stored.remove(0); + if bigger.id <= candidate.id { + break + } + eligible.push(candidate); + } + candidates = eligible; + + // Correctly push the last candidates chunk + Sassafras::deposit_tickets(candidates).unwrap(); + + assert_eq!(TicketsAccumulator::::count(), 10); + // Note: internally the tickets are stored in reverse order (bigger first) + let mut stored: Vec<_> = TicketsAccumulator::::iter_values().collect(); + tickets.sort_unstable_by_key(|t| TicketKey::from(t.id)); + + assert_eq!(tickets[5..], stored); }); } @@ -42,244 +145,80 @@ fn genesis_values_assumptions_check() { fn post_genesis_randomness_initialization() { let (pairs, mut ext) = new_test_ext_with_pairs(1, false); let pair = &pairs[0]; + let first_slot = (GENESIS_SLOT + 1).into(); ext.execute_with(|| { - assert_eq!(Sassafras::randomness(), [0; 32]); - assert_eq!(Sassafras::next_randomness(), [0; 32]); - assert_eq!(Sassafras::randomness_accumulator(), [0; 32]); + let genesis_randomness = Sassafras::randomness_buf(); + assert_eq!(genesis_randomness, RandomnessBuffer::default()); // Test the values with a zero genesis block hash - let _ = initialize_block(1, 123.into(), [0x00; 32].into(), pair); - assert_eq!(Sassafras::randomness(), [0; 32]); - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("b9497550deeeb4adc134555930de61968a0558f8947041eb515b2f5fa68ffaf7") - ); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("febcc7fe9539fe17ed29f525831394edfb30b301755dc9bd91584a1f065faf87") - ); - let (id1, _) = make_ticket_bodies(1, Some(pair))[0]; + let _ = initialize_block(1, first_slot, [0x00; 32].into(), pair); - // Reset what is relevant - NextRandomness::::set([0; 32]); - RandomnessAccumulator::::set([0; 32]); + let randomness = Sassafras::randomness_buf(); + prefix_eq!(randomness[0], h2b("89eb0d6a")); + prefix_eq!(randomness[1], h2b("4e8c71d2")); + prefix_eq!(randomness[2], h2b("3a4c0005")); + prefix_eq!(randomness[3], h2b("0dd43c54")); - // Test the values with a non-zero genesis block hash - let _ = initialize_block(1, 123.into(), [0xff; 32].into(), pair); - - assert_eq!(Sassafras::randomness(), [0; 32]); - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("51c1e3b3a73d2043b3cabae98ff27bdd4aad8967c21ecda7b9465afaa0e70f37") - ); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("466bf3007f2e17bffee0b3c42c90f33d654f5ff61eff28b0cc650825960abd52") - ); - let (id2, _) = make_ticket_bodies(1, Some(pair))[0]; - - // Ticket ids should be different when next epoch randomness is different - assert_ne!(id1, id2); + let ticket1 = make_ticket_body(0, pair); // Reset what is relevant - NextRandomness::::set([0; 32]); - RandomnessAccumulator::::set([0; 32]); + RandomnessBuf::::set(genesis_randomness); // Test the values with a non-zero genesis block hash - let _ = initialize_block(1, 321.into(), [0x00; 32].into(), pair); - - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("d85d84a54f79453000eb62e8a17b30149bd728d3232bc2787a89d51dc9a36008") - ); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("8a035eed02b5b8642b1515ed19752df8df156627aea45c4ef6e3efa88be9a74d") - ); - let (id2, _) = make_ticket_bodies(1, Some(pair))[0]; - - // Ticket ids should be different when next epoch randomness is different - assert_ne!(id1, id2); - }); -} - -// Tests if the sorted tickets are assigned to each slot outside-in. -#[test] -fn slot_ticket_id_outside_in_fetch() { - let genesis_slot = Slot::from(100); - let tickets_count = 6; - - // Current epoch tickets - let curr_tickets: Vec = (0..tickets_count).map(|i| i as TicketId).collect(); - - // Next epoch tickets - let next_tickets: Vec = - (0..tickets_count - 1).map(|i| (i + tickets_count) as TicketId).collect(); - new_test_ext(0).execute_with(|| { - // Some corner cases - TicketsIds::::insert((0, 0_u32), 1_u128); - - // Cleanup - (0..3).for_each(|i| TicketsIds::::remove((0, i as u32))); - - curr_tickets - .iter() - .enumerate() - .for_each(|(i, id)| TicketsIds::::insert((0, i as u32), id)); - - next_tickets - .iter() - .enumerate() - .for_each(|(i, id)| TicketsIds::::insert((1, i as u32), id)); - - TicketsMeta::::set(TicketsMetadata { - tickets_count: [curr_tickets.len() as u32, next_tickets.len() as u32], - unsorted_tickets_count: 0, - }); + let _ = initialize_block(1, first_slot, [0xff; 32].into(), pair); - // Before importing the first block the pallet always return `None` - // This is a kind of special hardcoded case that should never happen in practice - // as the first thing the pallet does is to initialize the genesis slot. + let randomness = Sassafras::randomness_buf(); + prefix_eq!(randomness[0], h2b("e2021160")); + prefix_eq!(randomness[1], h2b("3b0c0905")); + prefix_eq!(randomness[2], h2b("632ac0d9")); + prefix_eq!(randomness[3], h2b("575088c3")); - assert_eq!(Sassafras::slot_ticket_id(0.into()), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 100), None); - - // Initialize genesis slot.. - GenesisSlot::::set(genesis_slot); - frame_system::Pallet::::set_block_number(One::one()); - - // Try to fetch a ticket for a slot before current epoch. - assert_eq!(Sassafras::slot_ticket_id(0.into()), None); - - // Current epoch tickets. - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), Some(curr_tickets[1])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), Some(curr_tickets[3])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 2), Some(curr_tickets[5])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 3), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 4), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 5), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 6), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 7), Some(curr_tickets[4])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 8), Some(curr_tickets[2])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 9), Some(curr_tickets[0])); - - // Next epoch tickets (note that only 5 tickets are available) - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 10), Some(next_tickets[1])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 11), Some(next_tickets[3])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 12), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 13), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 14), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 15), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 16), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 17), Some(next_tickets[4])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 18), Some(next_tickets[2])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 19), Some(next_tickets[0])); - - // Try to fetch the tickets for slots beyond the next epoch. - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 20), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 42), None); - }); -} - -// Different test for outside-in test with more focus on corner case correctness. -#[test] -fn slot_ticket_id_outside_in_fetch_corner_cases() { - new_test_ext(0).execute_with(|| { - frame_system::Pallet::::set_block_number(One::one()); + let ticket2 = make_ticket_body(0, pair); - let mut meta = TicketsMetadata { tickets_count: [0, 0], unsorted_tickets_count: 0 }; - let curr_epoch_idx = EpochIndex::::get(); - - let mut epoch_test = |epoch_idx| { - let tag = (epoch_idx & 1) as u8; - let epoch_start = Sassafras::epoch_start(epoch_idx); - - // cleanup - meta.tickets_count = [0, 0]; - TicketsMeta::::set(meta); - assert!((0..10).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); - - meta.tickets_count[tag as usize] += 1; - TicketsMeta::::set(meta); - TicketsIds::::insert((tag, 0_u32), 1_u128); - assert_eq!(Sassafras::slot_ticket_id((epoch_start + 9).into()), Some(1_u128)); - assert!((0..9).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); - - meta.tickets_count[tag as usize] += 1; - TicketsMeta::::set(meta); - TicketsIds::::insert((tag, 1_u32), 2_u128); - assert_eq!(Sassafras::slot_ticket_id((epoch_start + 0).into()), Some(2_u128)); - assert!((1..9).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); - - meta.tickets_count[tag as usize] += 2; - TicketsMeta::::set(meta); - TicketsIds::::insert((tag, 2_u32), 3_u128); - assert_eq!(Sassafras::slot_ticket_id((epoch_start + 8).into()), Some(3_u128)); - assert!((1..8).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); - }; - - // Even epoch - epoch_test(curr_epoch_idx); - epoch_test(curr_epoch_idx + 1); + // Ticket ids should be different when next epoch randomness is different + assert_ne!(ticket1.id, ticket2.id); }); } #[test] -fn on_first_block_after_genesis() { +fn on_first_block() { let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let start_slot = (GENESIS_SLOT + 1).into(); + let start_block = 1; ext.execute_with(|| { - let start_slot = Slot::from(100); - let start_block = 1; - - let digest = initialize_block(start_block, start_slot, Default::default(), &pairs[0]); - - let common_assertions = || { - assert_eq!(Sassafras::genesis_slot(), start_slot); + let common_assertions = |initialized| { assert_eq!(Sassafras::current_slot(), start_slot); - assert_eq!(Sassafras::epoch_index(), 0); - assert_eq!(Sassafras::current_epoch_start(), start_slot); - assert_eq!(Sassafras::current_slot_index(), 0); - assert_eq!(Sassafras::randomness(), [0; 32]); - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e") - ); + assert_eq!(Sassafras::current_slot_index(), 1); + assert_eq!(TemporaryData::::exists(), initialized); }; // Post-initialization status - assert!(ClaimTemporaryData::::exists()); - common_assertions(); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("f0d42f6b7c0d157ecbd788be44847b80a96c290c04b5dfa5d1d40c98aa0c04ed") - ); + assert_eq!(Sassafras::randomness_buf(), RandomnessBuffer::default()); - let header = finalize_block(start_block); + let digest = initialize_block(start_block, start_slot, Default::default(), &pairs[0]); - // Post-finalization status + common_assertions(true); + let post_init_randomness = Sassafras::randomness_buf(); + prefix_eq!(post_init_randomness[0], h2b("89eb0d6a")); + prefix_eq!(post_init_randomness[1], h2b("4e8c71d2")); + prefix_eq!(post_init_randomness[2], h2b("3a4c0005")); + prefix_eq!(post_init_randomness[3], h2b("0dd43c54")); - assert!(!ClaimTemporaryData::::exists()); - common_assertions(); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("9f2b9fd19a772c34d437dcd8b84a927e73a5cb43d3d1cd00093223d60d2b4843"), - ); + // // Post-finalization status + + let header = finalize_block(start_block); + + common_assertions(false); + let post_fini_randomness = Sassafras::randomness_buf(); + prefix_eq!(post_fini_randomness[0], h2b("334d1a4c")); + prefix_eq!(post_fini_randomness[1], post_init_randomness[1]); + prefix_eq!(post_fini_randomness[2], post_init_randomness[2]); + prefix_eq!(post_fini_randomness[3], post_init_randomness[3]); // Header data check @@ -289,9 +228,8 @@ fn on_first_block_after_genesis() { // Genesis epoch start deposits consensus let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( sp_consensus_sassafras::digests::NextEpochDescriptor { - authorities: Sassafras::next_authorities().into_inner(), randomness: Sassafras::next_randomness(), - config: None, + authorities: Sassafras::next_authorities().into_inner(), }, ); let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); @@ -302,7 +240,7 @@ fn on_first_block_after_genesis() { #[test] fn on_normal_block() { let (pairs, mut ext) = new_test_ext_with_pairs(4, false); - let start_slot = Slot::from(100); + let start_slot = (GENESIS_SLOT + 1).into(); let start_block = 1; let end_block = start_block + 1; @@ -310,46 +248,37 @@ fn on_normal_block() { initialize_block(start_block, start_slot, Default::default(), &pairs[0]); // We don't want to trigger an epoch change in this test. - let epoch_length = Sassafras::epoch_length() as u64; - assert!(epoch_length > end_block); + let epoch_duration = Sassafras::epoch_duration() as u64; + assert!(epoch_duration > end_block); // Progress to block 2 let digest = progress_to_block(end_block, &pairs[0]).unwrap(); - let common_assertions = || { - assert_eq!(Sassafras::genesis_slot(), start_slot); + let common_assertions = |initialized| { assert_eq!(Sassafras::current_slot(), start_slot + 1); - assert_eq!(Sassafras::epoch_index(), 0); - assert_eq!(Sassafras::current_epoch_start(), start_slot); - assert_eq!(Sassafras::current_slot_index(), 1); - assert_eq!(Sassafras::randomness(), [0; 32]); - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e") - ); + assert_eq!(Sassafras::current_slot_index(), 2); + assert_eq!(TemporaryData::::exists(), initialized); }; // Post-initialization status - assert!(ClaimTemporaryData::::exists()); - common_assertions(); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("9f2b9fd19a772c34d437dcd8b84a927e73a5cb43d3d1cd00093223d60d2b4843"), - ); + common_assertions(true); + let post_init_randomness = Sassafras::randomness_buf(); + prefix_eq!(post_init_randomness[0], h2b("334d1a4c")); + prefix_eq!(post_init_randomness[1], h2b("4e8c71d2")); + prefix_eq!(post_init_randomness[2], h2b("3a4c0005")); + prefix_eq!(post_init_randomness[3], h2b("0dd43c54")); let header = finalize_block(end_block); // Post-finalization status - assert!(!ClaimTemporaryData::::exists()); - common_assertions(); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("be9261adb9686dfd3f23f8a276b7acc7f4beb3137070beb64c282ac22d84cbf0"), - ); + common_assertions(false); + let post_fini_randomness = Sassafras::randomness_buf(); + prefix_eq!(post_fini_randomness[0], h2b("277138ab")); + prefix_eq!(post_fini_randomness[1], post_init_randomness[1]); + prefix_eq!(post_fini_randomness[2], post_init_randomness[2]); + prefix_eq!(post_fini_randomness[3], post_init_randomness[3]); // Header data check @@ -359,65 +288,35 @@ fn on_normal_block() { } #[test] -fn produce_epoch_change_digest_no_config() { +fn produce_epoch_change_digest() { let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let start_slot = (GENESIS_SLOT + 1).into(); + let start_block = 1; ext.execute_with(|| { - let start_slot = Slot::from(100); - let start_block = 1; - initialize_block(start_block, start_slot, Default::default(), &pairs[0]); // We want to trigger an epoch change in this test. - let epoch_length = Sassafras::epoch_length() as u64; - let end_block = start_block + epoch_length; - - let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + let epoch_duration = Sassafras::epoch_duration() as u64; + let end_block = start_block + epoch_duration - 1; - let common_assertions = || { - assert_eq!(Sassafras::genesis_slot(), start_slot); - assert_eq!(Sassafras::current_slot(), start_slot + epoch_length); - assert_eq!(Sassafras::epoch_index(), 1); - assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_length); + let common_assertions = |initialized| { + assert_eq!(Sassafras::current_slot(), GENESIS_SLOT + epoch_duration); assert_eq!(Sassafras::current_slot_index(), 0); - println!("[DEBUG] {}", b2h(Sassafras::randomness())); - assert_eq!( - Sassafras::randomness(), - h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e") - ); + assert_eq!(TemporaryData::::exists(), initialized); }; + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + // Post-initialization status - assert!(ClaimTemporaryData::::exists()); - common_assertions(); - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("d3a18b857af6ecc7b52f047107e684fff0058b5722d540a296d727e37eaa55b3"), - ); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("bf0f1228f4ff953c8c1bda2cceb668bf86ea05d7ae93e26d021c9690995d5279"), - ); + common_assertions(true); let header = finalize_block(end_block); // Post-finalization status - assert!(!ClaimTemporaryData::::exists()); - common_assertions(); - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("d3a18b857af6ecc7b52f047107e684fff0058b5722d540a296d727e37eaa55b3"), - ); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("8a1ceb346036c386d021264b10912c8b656799668004c4a487222462b394cd89"), - ); + common_assertions(false); // Header data check @@ -428,7 +327,6 @@ fn produce_epoch_change_digest_no_config() { sp_consensus_sassafras::digests::NextEpochDescriptor { authorities: Sassafras::next_authorities().into_inner(), randomness: Sassafras::next_randomness(), - config: None, }, ); let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); @@ -436,366 +334,307 @@ fn produce_epoch_change_digest_no_config() { }) } -#[test] -fn produce_epoch_change_digest_with_config() { - let (pairs, mut ext) = new_test_ext_with_pairs(4, false); - - ext.execute_with(|| { - let start_slot = Slot::from(100); - let start_block = 1; - - initialize_block(start_block, start_slot, Default::default(), &pairs[0]); +// Tests if the sorted tickets are assigned to each slot outside-in. +fn slot_ticket_id_outside_in_fetch(jit_accumulator_drain: bool) { + let genesis_slot = Slot::from(GENESIS_SLOT); + let curr_count = 8; + let next_count = 6; + let tickets = make_ticket_bodies(curr_count + next_count, None, false); + + // Current epoch tickets (incrementally sorted as expected by the protocol) + let mut curr_tickets = tickets[..curr_count as usize].to_vec(); + curr_tickets.sort_unstable(); + // Next epoch tickets (incrementally sorted as expected by the protocol) + let mut next_tickets = tickets[curr_count as usize..].to_vec(); + next_tickets.sort_unstable(); - let config = EpochConfiguration { redundancy_factor: 1, attempts_number: 123 }; - Sassafras::plan_config_change(RuntimeOrigin::root(), config).unwrap(); + new_test_ext(0).execute_with(|| { + // Store current epoch tickets in place. + curr_tickets + .iter() + .enumerate() + .for_each(|(i, t)| Tickets::::insert((0, i as u32), t)); + + if jit_accumulator_drain { + // Store next epoch tickets in the accumulator (to test the JIT sorting logic as well) + next_tickets + .iter() + .for_each(|t| TicketsAccumulator::::insert(TicketKey::from(t.id), t)); + TicketsCount::::set([curr_count as u32, 0]); + } else { + // Directly store in the tickets buffer + next_tickets + .iter() + .enumerate() + .for_each(|(i, t)| Tickets::::insert((1, i as u32), t)); + TicketsCount::::set([curr_count as u32, next_count as u32]); + } + + CurrentSlot::::set(genesis_slot); + + // Before importing the first block (on frame System pallet) `slot_ticket` always + // returns `None`. This is a kind of special hardcoded case that should never happen + // in practice as the first thing the pallet does is to initialize the genesis slot. + + assert_eq!(Sassafras::slot_ticket(0.into()), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 0), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 1), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 100), None); + + // Manually set block number to simulate that frame system initialize has been + // called for the first block. + frame_system::Pallet::::set_block_number(One::one()); - // We want to trigger an epoch change in this test. - let epoch_length = Sassafras::epoch_length() as u64; - let end_block = start_block + epoch_length; + // Try to fetch a ticket for a slot before current epoch. + assert_eq!(Sassafras::slot_ticket(0.into()), None); - let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + // Current epoch tickets. + assert_eq!(Sassafras::slot_ticket(genesis_slot + 0).unwrap(), curr_tickets[0]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 1).unwrap(), curr_tickets[7]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 2).unwrap(), curr_tickets[1]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 3).unwrap(), curr_tickets[6]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 4).unwrap(), curr_tickets[2]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 5).unwrap(), curr_tickets[5]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 6).unwrap(), curr_tickets[3]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 7).unwrap(), curr_tickets[4]); + assert!(Sassafras::slot_ticket(genesis_slot + 8).is_none()); + assert!(Sassafras::slot_ticket(genesis_slot + 9).is_none()); + + // Next epoch tickets. + assert_eq!(Sassafras::slot_ticket(genesis_slot + 10).unwrap(), next_tickets[0]); + if jit_accumulator_drain { + // After first fetch tickets are moved to the buffer + assert_eq!(TicketsCount::::get()[1], 6); + } + assert_eq!(Sassafras::slot_ticket(genesis_slot + 11).unwrap(), next_tickets[5]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 12).unwrap(), next_tickets[1]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 13).unwrap(), next_tickets[4]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 14).unwrap(), next_tickets[2]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 15).unwrap(), next_tickets[3]); + assert!(Sassafras::slot_ticket(genesis_slot + 16).is_none()); + assert!(Sassafras::slot_ticket(genesis_slot + 17).is_none()); + assert!(Sassafras::slot_ticket(genesis_slot + 18).is_none()); + assert!(Sassafras::slot_ticket(genesis_slot + 19).is_none()); - let header = finalize_block(end_block); + // Try to fetch the tickets for slots beyond the next epoch. + assert!(Sassafras::slot_ticket(genesis_slot + 20).is_none()); + assert!(Sassafras::slot_ticket(genesis_slot + 42).is_none()); + }); +} - // Header data check. - // Skip pallet status checks that were already performed by other tests. +#[test] +fn slot_ticket_id_outside_in_fetch_jit_accumulator_drain() { + slot_ticket_id_outside_in_fetch(true); +} - assert_eq!(header.digest.logs.len(), 2); - assert_eq!(header.digest.logs[0], digest.logs[0]); - // Deposits consensus log on epoch change - let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( - sp_consensus_sassafras::digests::NextEpochDescriptor { - authorities: Sassafras::next_authorities().into_inner(), - randomness: Sassafras::next_randomness(), - config: Some(config), - }, - ); - let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); - assert_eq!(header.digest.logs[1], consensus_digest) - }) +#[test] +fn slot_ticket_id_outside_in_fetch_no_jit_accumulator_drain() { + slot_ticket_id_outside_in_fetch(false); } #[test] -fn segments_incremental_sort_works() { - let (pairs, mut ext) = new_test_ext_with_pairs(1, false); - let pair = &pairs[0]; - let segments_count = 14; - let start_slot = Slot::from(100); +fn slot_and_epoch_helpers_works() { let start_block = 1; + let start_slot = (GENESIS_SLOT + 1).into(); + + let (pairs, mut ext) = new_test_ext_with_pairs(1, false); ext.execute_with(|| { - let epoch_length = Sassafras::epoch_length() as u64; - // -3 just to have the last segment not full... - let submitted_tickets_count = segments_count * SEGMENT_MAX_SIZE - 3; + let epoch_duration = Sassafras::epoch_duration() as u64; + assert_eq!(epoch_duration, 10); + + let check = |slot, slot_idx, epoch_slot, epoch_idx| { + assert_eq!(Sassafras::current_slot(), Slot::from(slot)); + assert_eq!(Sassafras::current_slot_index(), slot_idx); + assert_eq!(Sassafras::current_epoch_start(), Slot::from(epoch_slot)); + assert_eq!(Sassafras::current_epoch_index(), epoch_idx); + }; - initialize_block(start_block, start_slot, Default::default(), pair); + // Post genesis state (before first initialization of epoch N) + check(0, 0, 0, 0); - // Manually populate the segments to skip the threshold check - let mut tickets = make_ticket_bodies(submitted_tickets_count, None); - persist_next_epoch_tickets_as_segments(&tickets); - - // Proceed to half of the epoch (sortition should not have been started yet) - let half_epoch_block = start_block + epoch_length / 2; - progress_to_block(half_epoch_block, pair); - - let mut unsorted_tickets_count = submitted_tickets_count; - - // Check that next epoch tickets sortition is not started yet - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); - assert_eq!(meta.tickets_count, [0, 0]); - - // Follow the incremental sortition block by block - - progress_to_block(half_epoch_block + 1, pair); - unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE - 3; - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count,); - assert_eq!(meta.tickets_count, [0, 0]); - - progress_to_block(half_epoch_block + 2, pair); - unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE; - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); - assert_eq!(meta.tickets_count, [0, 0]); - - progress_to_block(half_epoch_block + 3, pair); - unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE; - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); - assert_eq!(meta.tickets_count, [0, 0]); - - progress_to_block(half_epoch_block + 4, pair); - unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE; - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); - assert_eq!(meta.tickets_count, [0, 0]); - - let header = finalize_block(half_epoch_block + 4); - - // Sort should be finished now. - // Check that next epoch tickets count have the correct value. - // Bigger ticket ids were discarded during sortition. - unsorted_tickets_count -= 2 * SEGMENT_MAX_SIZE; - assert_eq!(unsorted_tickets_count, 0); - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); - assert_eq!(meta.tickets_count, [0, epoch_length as u32]); - // Epoch change log should have been pushed as well - assert_eq!(header.digest.logs.len(), 1); - // No tickets for the current epoch - assert_eq!(TicketsIds::::get((0, 0)), None); - - // Check persistence of "winning" tickets - tickets.sort_by_key(|t| t.0); - (0..epoch_length as usize).into_iter().for_each(|i| { - let id = TicketsIds::::get((1, i as u32)).unwrap(); - let body = TicketsData::::get(id).unwrap(); - assert_eq!((id, body), tickets[i]); - }); - // Check removal of "loosing" tickets - (epoch_length as usize..tickets.len()).into_iter().for_each(|i| { - assert!(TicketsIds::::get((1, i as u32)).is_none()); - assert!(TicketsData::::get(tickets[i].0).is_none()); - }); + // Epoch N first block + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + check(101, 1, 100, 10); - // The next block will be the first produced on the new epoch. - // At this point the tickets are found already sorted and ready to be used. - let slot = Sassafras::current_slot() + 1; - let number = System::block_number() + 1; - initialize_block(number, slot, header.hash(), pair); - let header = finalize_block(number); - // Epoch changes digest is also produced - assert_eq!(header.digest.logs.len(), 2); - }); -} + // Progress to epoch N last block + let end_block = start_block + epoch_duration - 2; + progress_to_block(end_block, &pairs[0]).unwrap(); + check(109, 9, 100, 10); -#[test] -fn tickets_fetch_works_after_epoch_change() { - let (pairs, mut ext) = new_test_ext_with_pairs(4, false); - let pair = &pairs[0]; - let start_slot = Slot::from(100); - let start_block = 1; - let submitted_tickets = 300; + // Progres to epoch N+1 first block + progress_to_block(end_block + 1, &pairs[0]).unwrap(); + check(110, 0, 110, 11); - ext.execute_with(|| { - initialize_block(start_block, start_slot, Default::default(), pair); + // Progress to epoch N+1 last block + let end_block = end_block + epoch_duration; + progress_to_block(end_block, &pairs[0]).unwrap(); + check(119, 9, 110, 11); - // We don't want to trigger an epoch change in this test. - let epoch_length = Sassafras::epoch_length() as u64; - assert!(epoch_length > 2); - progress_to_block(2, &pairs[0]).unwrap(); - - // Persist tickets as three different segments. - let tickets = make_ticket_bodies(submitted_tickets, None); - persist_next_epoch_tickets_as_segments(&tickets); - - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, submitted_tickets); - assert_eq!(meta.tickets_count, [0, 0]); - - // Progress up to the last epoch slot (do not enact epoch change) - progress_to_block(epoch_length, &pairs[0]).unwrap(); - - // At this point next epoch tickets should have been sorted and ready to be used - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, 0); - assert_eq!(meta.tickets_count, [0, epoch_length as u32]); - - // Compute and sort the tickets ids (aka tickets scores) - let mut expected_ids: Vec<_> = tickets.into_iter().map(|(id, _)| id).collect(); - expected_ids.sort(); - expected_ids.truncate(epoch_length as usize); - - // Check if we can fetch next epoch tickets ids (outside-in). - let slot = Sassafras::current_slot(); - assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[1]); - assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[3]); - assert_eq!(Sassafras::slot_ticket_id(slot + 3).unwrap(), expected_ids[5]); - assert_eq!(Sassafras::slot_ticket_id(slot + 4).unwrap(), expected_ids[7]); - assert_eq!(Sassafras::slot_ticket_id(slot + 7).unwrap(), expected_ids[6]); - assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[4]); - assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[2]); - assert_eq!(Sassafras::slot_ticket_id(slot + 10).unwrap(), expected_ids[0]); - assert!(Sassafras::slot_ticket_id(slot + 11).is_none()); - - // Enact epoch change by progressing one more block - - progress_to_block(epoch_length + 1, &pairs[0]).unwrap(); - - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, 0); - assert_eq!(meta.tickets_count, [0, 10]); - - // Check if we can fetch current epoch tickets ids (outside-in). - let slot = Sassafras::current_slot(); - assert_eq!(Sassafras::slot_ticket_id(slot).unwrap(), expected_ids[1]); - assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[3]); - assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[5]); - assert_eq!(Sassafras::slot_ticket_id(slot + 3).unwrap(), expected_ids[7]); - assert_eq!(Sassafras::slot_ticket_id(slot + 6).unwrap(), expected_ids[6]); - assert_eq!(Sassafras::slot_ticket_id(slot + 7).unwrap(), expected_ids[4]); - assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[2]); - assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[0]); - assert!(Sassafras::slot_ticket_id(slot + 10).is_none()); - - // Enact another epoch change, for which we don't have any ticket - progress_to_block(2 * epoch_length + 1, &pairs[0]).unwrap(); - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, 0); - assert_eq!(meta.tickets_count, [0, 0]); - }); + // Progres to epoch N+2 first block + progress_to_block(end_block + 1, &pairs[0]).unwrap(); + check(120, 0, 120, 12); + }) } #[test] -fn block_allowed_to_skip_epochs() { - let (pairs, mut ext) = new_test_ext_with_pairs(4, false); - let pair = &pairs[0]; - let start_slot = Slot::from(100); +fn tickets_accumulator_works() { let start_block = 1; + let start_slot = (GENESIS_SLOT + 1).into(); + let e1_count = 6; + let e2_count = 10; + let tickets = make_ticket_bodies(e1_count + e2_count, None, false); + let e1_tickets = tickets[..e1_count as usize].to_vec(); + let e2_tickets = tickets[e1_count as usize..].to_vec(); + + let (pairs, mut ext) = new_test_ext_with_pairs(1, false); ext.execute_with(|| { - let epoch_length = Sassafras::epoch_length() as u64; + let epoch_duration = Sassafras::epoch_duration() as u64; - initialize_block(start_block, start_slot, Default::default(), pair); + let epoch_idx = Sassafras::current_epoch_index(); + let epoch_tag = (epoch_idx % 2) as u8; + let next_epoch_tag = epoch_tag ^ 1; - let tickets = make_ticket_bodies(3, Some(pair)); - persist_next_epoch_tickets(&tickets); + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); - let next_random = Sassafras::next_randomness(); + // Append some tickets to the accumulator + e1_tickets + .iter() + .for_each(|t| TicketsAccumulator::::insert(TicketKey::from(t.id), t)); - // We want to skip 3 epochs in this test. - let offset = 4 * epoch_length; - go_to_block(start_block + offset, start_slot + offset, &pairs[0]); + // Progress to epoch's last block + let end_block = start_block + epoch_duration - 2; + progress_to_block(end_block, &pairs[0]).unwrap(); - // Post-initialization status + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[epoch_tag as usize], 0); + assert!( + 0 < tickets_count[next_epoch_tag as usize] && + tickets_count[next_epoch_tag as usize] < e1_count as u32 + ); - assert!(ClaimTemporaryData::::exists()); - assert_eq!(Sassafras::genesis_slot(), start_slot); - assert_eq!(Sassafras::current_slot(), start_slot + offset); - assert_eq!(Sassafras::epoch_index(), 4); - assert_eq!(Sassafras::current_epoch_start(), start_slot + offset); - assert_eq!(Sassafras::current_slot_index(), 0); + finalize_block(end_block); - // Tickets data has been discarded - assert_eq!(TicketsMeta::::get(), TicketsMetadata::default()); - assert!(tickets.iter().all(|(id, _)| TicketsData::::get(id).is_none())); - assert_eq!(SortedCandidates::::get().len(), 0); + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[epoch_tag as usize], 0); + assert_eq!(tickets_count[next_epoch_tag as usize], e1_count as u32); - // We used the last known next epoch randomness as a fallback - assert_eq!(next_random, Sassafras::randomness()); - }); -} + // Start new epoch -#[test] -fn obsolete_tickets_are_removed_on_epoch_change() { - let (pairs, mut ext) = new_test_ext_with_pairs(4, false); - let pair = &pairs[0]; - let start_slot = Slot::from(100); - let start_block = 1; + initialize_block( + end_block + 1, + Sassafras::current_slot() + 1, + Default::default(), + &pairs[0], + ); - ext.execute_with(|| { - let epoch_length = Sassafras::epoch_length() as u64; + let next_epoch_tag = epoch_tag; + let epoch_tag = epoch_tag ^ 1; + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[epoch_tag as usize], e1_count as u32); + assert_eq!(tickets_count[next_epoch_tag as usize], 0); - initialize_block(start_block, start_slot, Default::default(), pair); + // Append some tickets to the accumulator + e2_tickets + .iter() + .for_each(|t| TicketsAccumulator::::insert(TicketKey::from(t.id), t)); - let tickets = make_ticket_bodies(10, Some(pair)); - let mut epoch1_tickets = tickets[..4].to_vec(); - let mut epoch2_tickets = tickets[4..].to_vec(); - - // Persist some tickets for next epoch (N) - persist_next_epoch_tickets(&epoch1_tickets); - assert_eq!(TicketsMeta::::get().tickets_count, [0, 4]); - // Check next epoch tickets presence - epoch1_tickets.sort_by_key(|t| t.0); - (0..epoch1_tickets.len()).into_iter().for_each(|i| { - let id = TicketsIds::::get((1, i as u32)).unwrap(); - let body = TicketsData::::get(id).unwrap(); - assert_eq!((id, body), epoch1_tickets[i]); - }); + // Progress to epoch's last block + let end_block = end_block + epoch_duration; + progress_to_block(end_block, &pairs[0]).unwrap(); - // Advance one epoch to enact the tickets - go_to_block(start_block + epoch_length, start_slot + epoch_length, pair); - assert_eq!(TicketsMeta::::get().tickets_count, [0, 4]); - - // Persist some tickets for next epoch (N+1) - persist_next_epoch_tickets(&epoch2_tickets); - assert_eq!(TicketsMeta::::get().tickets_count, [6, 4]); - epoch2_tickets.sort_by_key(|t| t.0); - // Check for this epoch and next epoch tickets presence - (0..epoch1_tickets.len()).into_iter().for_each(|i| { - let id = TicketsIds::::get((1, i as u32)).unwrap(); - let body = TicketsData::::get(id).unwrap(); - assert_eq!((id, body), epoch1_tickets[i]); - }); - (0..epoch2_tickets.len()).into_iter().for_each(|i| { - let id = TicketsIds::::get((0, i as u32)).unwrap(); - let body = TicketsData::::get(id).unwrap(); - assert_eq!((id, body), epoch2_tickets[i]); - }); + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[epoch_tag as usize], e1_count as u32); + assert!( + 0 < tickets_count[next_epoch_tag as usize] && + tickets_count[next_epoch_tag as usize] < e2_count as u32 + ); - // Advance to epoch 2 and check for cleanup + finalize_block(end_block); - go_to_block(start_block + 2 * epoch_length, start_slot + 2 * epoch_length, pair); - assert_eq!(TicketsMeta::::get().tickets_count, [6, 0]); + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[epoch_tag as usize], e1_count as u32); + assert_eq!(tickets_count[next_epoch_tag as usize], e2_count as u32); - (0..epoch1_tickets.len()).into_iter().for_each(|i| { - let id = TicketsIds::::get((1, i as u32)).unwrap(); - assert!(TicketsData::::get(id).is_none()); - }); - (0..epoch2_tickets.len()).into_iter().for_each(|i| { - let id = TicketsIds::::get((0, i as u32)).unwrap(); - let body = TicketsData::::get(id).unwrap(); - assert_eq!((id, body), epoch2_tickets[i]); - }); - }) + // Start new epoch + initialize_block( + end_block + 1, + Sassafras::current_slot() + 1, + Default::default(), + &pairs[0], + ); + + let next_epoch_tag = epoch_tag; + let epoch_tag = epoch_tag ^ 1; + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[epoch_tag as usize], e2_count as u32); + assert_eq!(tickets_count[next_epoch_tag as usize], 0); + }); } -const TICKETS_FILE: &str = "src/data/25_tickets_100_auths.bin"; +#[test] +fn incremental_accumulator_drain() { + let tickets = make_ticket_bodies(10, None, false); -fn data_read(filename: &str) -> T { - use std::{fs::File, io::Read}; - let mut file = File::open(filename).unwrap(); - let mut buf = Vec::new(); - file.read_to_end(&mut buf).unwrap(); - T::decode(&mut &buf[..]).unwrap() -} + new_test_ext(0).execute_with(|| { + tickets + .iter() + .for_each(|t| TicketsAccumulator::::insert(TicketKey::from(t.id), t)); -fn data_write(filename: &str, data: T) { - use std::{fs::File, io::Write}; - let mut file = File::create(filename).unwrap(); - let buf = data.encode(); - file.write_all(&buf).unwrap(); -} + let accumulator: Vec<_> = TicketsAccumulator::::iter_values().collect(); + // Assess accumulator expected order (bigger id first) + assert!(accumulator.windows(2).all(|chunk| chunk[0].id > chunk[1].id)); + + let mut onchain_expected = accumulator.clone(); + onchain_expected.sort_unstable(); + + Sassafras::consume_tickets_accumulator(5, 0); + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[0], 5); + assert_eq!(tickets_count[1], 0); -// We don't want to implement anything secure here. -// Just a trivial shuffle for the tests. -fn trivial_fisher_yates_shuffle(vector: &mut Vec, random_seed: u64) { - let mut rng = random_seed as usize; - for i in (1..vector.len()).rev() { - let j = rng % (i + 1); - vector.swap(i, j); - rng = (rng.wrapping_mul(6364793005) + 1) as usize; // Some random number generation - } + accumulator.iter().rev().enumerate().skip(5).for_each(|(i, t)| { + let t2 = Tickets::::get((0, i as u32)).unwrap(); + assert_eq!(t.id, t2.id); + }); + + Sassafras::consume_tickets_accumulator(3, 0); + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[0], 8); + assert_eq!(tickets_count[1], 0); + accumulator.iter().rev().enumerate().skip(2).for_each(|(i, t)| { + let t2 = Tickets::::get((0, i as u32)).unwrap(); + assert_eq!(t.id, t2.id); + }); + + Sassafras::consume_tickets_accumulator(5, 0); + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[0], 10); + assert_eq!(tickets_count[1], 0); + accumulator.iter().rev().enumerate().for_each(|(i, t)| { + let t2 = Tickets::::get((0, i as u32)).unwrap(); + assert_eq!(t.id, t2.id); + }); + }); } -// For this test we use a set of pre-constructed tickets from a file. -// Creating a large set of tickets on the fly takes time, and may be annoying -// for test execution. -// -// A valid ring-context is required for this test since we are passing through the -// `submit_ticket` call which tests for ticket validity. #[test] -fn submit_tickets_with_ring_proof_check_works() { +fn submit_tickets_works() { use sp_core::Pair as _; - // env_logger::init(); + let _ = env_logger::try_init(); + let start_block = 1; + let start_slot = (GENESIS_SLOT + 1).into(); + + let (randomness, authorities, mut candidates): ( + Randomness, + Vec, + Vec, + ) = data_read(TICKETS_FILE); - let (authorities, mut tickets): (Vec, Vec) = - data_read(TICKETS_FILE); + let config = Sassafras::protocol_config(); // Also checks that duplicates are discarded - tickets.extend(tickets.clone()); - trivial_fisher_yates_shuffle(&mut tickets, 321); let (pairs, mut ext) = new_test_ext_with_pairs(authorities.len(), true); let pair = &pairs[0]; @@ -803,72 +642,123 @@ fn submit_tickets_with_ring_proof_check_works() { assert!(authorities.iter().zip(pairs.iter()).all(|(auth, pair)| auth == &pair.public())); ext.execute_with(|| { - let start_slot = Slot::from(0); - let start_block = 1; - - // Tweak the config to discard ~half of the tickets. - let mut config = EpochConfig::::get(); - config.redundancy_factor = 25; - EpochConfig::::set(config); - initialize_block(start_block, start_slot, Default::default(), pair); - NextRandomness::::set([0; 32]); - // Check state before tickets submission - assert_eq!( - TicketsMeta::::get(), - TicketsMetadata { unsorted_tickets_count: 0, tickets_count: [0, 0] }, - ); + // Use the same values as the pre-built tickets + Sassafras::update_ring_verifier(&authorities); + let mut randomness_buf = RandomnessBuf::::get(); + randomness_buf[2] = randomness; + RandomnessBuf::::set(randomness_buf); + NextAuthorities::::set(WeakBoundedVec::force_from(authorities, None)); // Submit the tickets - let max_tickets_per_call = Sassafras::epoch_length() as usize; - tickets.chunks(max_tickets_per_call).for_each(|chunk| { - let chunk = BoundedVec::truncate_from(chunk.to_vec()); - Sassafras::submit_tickets(RuntimeOrigin::none(), chunk).unwrap(); - }); + let candidates_per_call = 4; + let mut chunks: Vec<_> = candidates + .chunks(candidates_per_call) + .map(|chunk| BoundedVec::truncate_from(chunk.to_vec())) + .collect(); + assert_eq!(chunks.len(), 5); + + // Try to submit a candidate with an invalid signature. + let mut chunk = chunks[2].clone(); + chunk[0].signature.signature[0] ^= 1; + let e = Sassafras::submit_tickets(RuntimeOrigin::none(), chunk).unwrap_err(); + assert_eq!(e, DispatchError::from(Error::::TicketBadProof)); + assert_eq!(TicketsAccumulator::::count(), 0); + + // Try to submit with invalid attempt number. + let mut chunk = chunks[2].clone(); + chunk[0].attempt = u8::MAX; + let e = Sassafras::submit_tickets(RuntimeOrigin::none(), chunk).unwrap_err(); + assert_eq!(e, DispatchError::from(Error::::TicketBadAttempt)); + assert_eq!(TicketsAccumulator::::count(), 0); + + // Start submitting from the mid valued chunks. + Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[2].clone()).unwrap(); + assert_eq!(TicketsAccumulator::::count(), 4); + + // Submit something bigger, but we have space for all the candidates. + Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[3].clone()).unwrap(); + assert_eq!(TicketsAccumulator::::count(), 8); + + // Try to submit a ticket duplicate + let e = Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[2].clone()).unwrap_err(); + assert_eq!(e, DispatchError::from(Error::::TicketDuplicate)); + assert_eq!(TicketsAccumulator::::count(), 8); + + // Submit something smaller. This is accepted (2 old tickets removed). + Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[1].clone()).unwrap(); + assert_eq!(TicketsAccumulator::::count(), 10); + + // Try to submit a chunk with bigger tickets. This is discarded + let e = Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[4].clone()).unwrap_err(); + assert_eq!(e, DispatchError::from(Error::::TicketDropped)); + assert_eq!(TicketsAccumulator::::count(), 10); + + // Submit the smaller candidates chunks. This is accepted (4 old tickets removed). + Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[0].clone()).unwrap(); + assert_eq!(TicketsAccumulator::::count(), 10); + + // Try to submit a chunk after when the contest is over. + progress_to_block(start_block + (config.epoch_duration as u64 - 2), &pairs[0]).unwrap(); + let e = Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[0].clone()).unwrap_err(); + assert_eq!(e, DispatchError::from(Error::::TicketUnexpected)); + }) +} - // Check state after submission - assert_eq!( - TicketsMeta::::get(), - TicketsMetadata { unsorted_tickets_count: 16, tickets_count: [0, 0] }, - ); - assert_eq!(UnsortedSegments::::get(0).len(), 16); - assert_eq!(UnsortedSegments::::get(1).len(), 0); +fn data_read(filename: &str) -> T { + use std::{fs::File, io::Read}; + let mut file = File::open(filename).unwrap(); + let mut buf = Vec::new(); + file.read_to_end(&mut buf).unwrap(); + T::decode(&mut &buf[..]).unwrap() +} - finalize_block(start_block); - }) +fn data_write(filename: &str, data: T) { + use std::{fs::File, io::Write}; + let mut file = File::create(filename).unwrap(); + let buf = data.encode(); + file.write_all(&buf).unwrap(); } #[test] -#[ignore = "test tickets data generator"] -fn make_tickets_data() { +#[ignore = "test tickets generator"] +fn generate_test_tickets() { use super::*; use sp_core::crypto::Pair; - // Number of authorities who produces tickets (for the sake of this test) - let tickets_authors_count = 5; + let start_block = 1; + let start_slot = (GENESIS_SLOT + 1).into(); + // Total number of authorities (the ring) - let authorities_count = 100; + let authorities_count = 10; let (pairs, mut ext) = new_test_ext_with_pairs(authorities_count, true); let authorities: Vec<_> = pairs.iter().map(|sk| sk.public()).collect(); + let mut tickets = Vec::new(); ext.execute_with(|| { - let config = EpochConfig::::get(); + let config = Sassafras::protocol_config(); + assert!(authorities_count < config.max_authorities as usize); - let tickets_count = tickets_authors_count * config.attempts_number as usize; - let mut tickets = Vec::with_capacity(tickets_count); + let tickets_count = authorities_count * config.attempts_number as usize; - // Construct pre-built tickets with a well known `NextRandomness` value. - NextRandomness::::set([0; 32]); + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); println!("Constructing {} tickets", tickets_count); - pairs.iter().take(tickets_authors_count).enumerate().for_each(|(i, pair)| { + + pairs.iter().take(authorities_count).enumerate().for_each(|(i, pair)| { let t = make_tickets(config.attempts_number, pair); tickets.extend(t); - println!("{:.2}%", 100f32 * ((i + 1) as f32 / tickets_authors_count as f32)); + println!("{:.2}%", 100f32 * ((i + 1) as f32 / authorities_count as f32)); }); - data_write(TICKETS_FILE, (authorities, tickets)); + tickets.sort_unstable_by_key(|t| t.0); + let envelopes: Vec<_> = tickets.into_iter().map(|t| t.1).collect(); + + // Tickets were generated using `next_randomness` + let randomness = Sassafras::next_randomness(); + + data_write(TICKETS_FILE, (randomness, authorities, envelopes)); }); } diff --git a/substrate/frame/sassafras/src/weights.rs b/substrate/frame/sassafras/src/weights.rs index 32ea2d29a180..41a005b26cc5 100644 --- a/substrate/frame/sassafras/src/weights.rs +++ b/substrate/frame/sassafras/src/weights.rs @@ -17,14 +17,14 @@ //! Autogenerated weights for `pallet_sassafras` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-16, STEPS: `20`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-24, STEPS: `20`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `behemoth`, CPU: `AMD Ryzen Threadripper 3970X 32-Core Processor` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/release/node-template +// ./target/release/solochain-template-node // benchmark // pallet // --chain @@ -55,157 +55,115 @@ pub trait WeightInfo { fn on_initialize() -> Weight; fn enact_epoch_change(x: u32, y: u32, ) -> Weight; fn submit_tickets(x: u32, ) -> Weight; - fn plan_config_change() -> Weight; fn update_ring_verifier(x: u32, ) -> Weight; fn load_ring_context() -> Weight; - fn sort_segments(x: u32, ) -> Weight; } /// Weights for `pallet_sassafras` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: `System::Digest` (r:1 w:1) + /// Storage: `System::Digest` (r:1 w:0) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Sassafras::NextRandomness` (r:1 w:0) - /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) - /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::CurrentRandomness` (r:1 w:0) - /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochIndex` (r:1 w:0) - /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:1) - /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::CurrentSlot` (r:0 w:1) + /// Storage: `Sassafras::RandomnessBuf` (r:1 w:1) + /// Proof: `Sassafras::RandomnessBuf` (`max_values`: Some(1), `max_size`: Some(128), added: 623, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentSlot` (r:1 w:1) /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::ClaimTemporaryData` (r:0 w:1) - /// Proof: `Sassafras::ClaimTemporaryData` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::GenesisSlot` (r:0 w:1) - /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CounterForTicketsAccumulator` (r:1 w:0) + /// Proof: `Sassafras::CounterForTicketsAccumulator` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TemporaryData` (r:0 w:1) + /// Proof: `Sassafras::TemporaryData` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) fn on_initialize() -> Weight { // Proof Size summary in bytes: - // Measured: `302` - // Estimated: `4787` - // Minimum execution time: 438_039_000 picoseconds. - Weight::from_parts(439_302_000, 4787) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) + // Measured: `270` + // Estimated: `1755` + // Minimum execution time: 382_223_000 picoseconds. + Weight::from_parts(383_656_000, 1755) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochIndex` (r:1 w:1) - /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) - /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Sassafras::NextAuthorities` (r:1 w:1) /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) /// Storage: `Sassafras::RingContext` (r:1 w:0) /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) - /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextRandomness` (r:1 w:1) - /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:0) - /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextEpochConfig` (r:1 w:1) - /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::PendingEpochConfigChange` (r:1 w:1) - /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TemporaryData` (r:1 w:0) + /// Proof: `Sassafras::TemporaryData` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessBuf` (r:1 w:1) + /// Proof: `Sassafras::RandomnessBuf` (`max_values`: Some(1), `max_size`: Some(128), added: 623, mode: `MaxEncodedLen`) /// Storage: `System::Digest` (r:1 w:1) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) - /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::UnsortedSegments` (r:79 w:79) - /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsIds` (r:5000 w:200) - /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsCount` (r:1 w:1) + /// Proof: `Sassafras::TicketsCount` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CounterForTicketsAccumulator` (r:1 w:1) + /// Proof: `Sassafras::CounterForTicketsAccumulator` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsAccumulator` (r:1001 w:1000) + /// Proof: `Sassafras::TicketsAccumulator` (`max_values`: None, `max_size`: Some(195), added: 2670, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::Tickets` (r:0 w:1000) + /// Proof: `Sassafras::Tickets` (`max_values`: None, `max_size`: Some(168), added: 2643, mode: `MaxEncodedLen`) /// Storage: `Sassafras::Authorities` (r:0 w:1) /// Proof: `Sassafras::Authorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsData` (r:0 w:9896) - /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) - /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochConfig` (r:0 w:1) - /// Proof: `Sassafras::EpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::CurrentRandomness` (r:0 w:1) - /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierKey` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierKey` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 100]`. - /// The range of component `y` is `[1000, 5000]`. + /// The range of component `y` is `[100, 1000]`. fn enact_epoch_change(x: u32, y: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `594909 + x * (33 ±0) + y * (53 ±0)` - // Estimated: `593350 + x * (24 ±1) + y * (2496 ±0)` - // Minimum execution time: 121_279_846_000 picoseconds. - Weight::from_parts(94_454_851_972, 593350) - // Standard Error: 24_177_301 - .saturating_add(Weight::from_parts(8_086_191, 0).saturating_mul(x.into())) - // Standard Error: 601_053 - .saturating_add(Weight::from_parts(15_578_413, 0).saturating_mul(y.into())) - .saturating_add(T::DbWeight::get().reads(13_u64)) + // Measured: `590613 + x * (33 ±0) + y * (68 ±0)` + // Estimated: `592099 + x * (33 ±0) + y * (2670 ±0)` + // Minimum execution time: 142_623_107_000 picoseconds. + Weight::from_parts(135_944_664_003, 592099) + // Standard Error: 3_660_095 + .saturating_add(Weight::from_parts(174_904_510, 0).saturating_mul(x.into())) + // Standard Error: 404_219 + .saturating_add(Weight::from_parts(7_440_688, 0).saturating_mul(y.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) - .saturating_add(T::DbWeight::get().writes(112_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(y.into()))) - .saturating_add(Weight::from_parts(0, 24).saturating_mul(x.into())) - .saturating_add(Weight::from_parts(0, 2496).saturating_mul(y.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(x.into())) + .saturating_add(Weight::from_parts(0, 2670).saturating_mul(y.into())) } /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochIndex` (r:1 w:0) - /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) - /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RingVerifierData` (r:1 w:0) - /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierKey` (r:1 w:0) + /// Proof: `Sassafras::RingVerifierKey` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessBuf` (r:1 w:0) + /// Proof: `Sassafras::RandomnessBuf` (`max_values`: Some(1), `max_size`: Some(128), added: 623, mode: `MaxEncodedLen`) /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextEpochConfig` (r:1 w:0) - /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextRandomness` (r:1 w:0) - /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsData` (r:25 w:25) - /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) - /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::UnsortedSegments` (r:1 w:1) - /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) - /// The range of component `x` is `[1, 25]`. + /// Storage: `Sassafras::CounterForTicketsAccumulator` (r:1 w:1) + /// Proof: `Sassafras::CounterForTicketsAccumulator` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsAccumulator` (r:16 w:16) + /// Proof: `Sassafras::TicketsAccumulator` (`max_values`: None, `max_size`: Some(195), added: 2670, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 16]`. fn submit_tickets(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3869` - // Estimated: `5519 + x * (2559 ±0)` - // Minimum execution time: 36_904_934_000 picoseconds. - Weight::from_parts(25_822_957_295, 5519) - // Standard Error: 11_047_832 - .saturating_add(Weight::from_parts(11_338_353_299, 0).saturating_mul(x.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1029` + // Estimated: `4787 + x * (2670 ±0)` + // Minimum execution time: 52_363_693_000 picoseconds. + Weight::from_parts(38_029_460_770, 4787) + // Standard Error: 15_839_361 + .saturating_add(Weight::from_parts(14_567_084_979, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) - .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(x.into()))) - .saturating_add(Weight::from_parts(0, 2559).saturating_mul(x.into())) - } - /// Storage: `Sassafras::PendingEpochConfigChange` (r:0 w:1) - /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn plan_config_change() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_038_000 picoseconds. - Weight::from_parts(4_499_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(x.into()))) + .saturating_add(Weight::from_parts(0, 2670).saturating_mul(x.into())) } /// Storage: `Sassafras::RingContext` (r:1 w:0) /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) - /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierKey` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierKey` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 100]`. fn update_ring_verifier(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `590485` + // Measured: `590458` // Estimated: `591809` - // Minimum execution time: 105_121_424_000 picoseconds. - Weight::from_parts(105_527_334_385, 591809) - // Standard Error: 2_933_910 - .saturating_add(Weight::from_parts(96_136_261, 0).saturating_mul(x.into())) + // Minimum execution time: 135_738_430_000 picoseconds. + Weight::from_parts(135_840_809_672, 591809) + // Standard Error: 3_319_979 + .saturating_add(Weight::from_parts(173_092_727, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -213,180 +171,118 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) fn load_ring_context() -> Weight { // Proof Size summary in bytes: - // Measured: `590485` + // Measured: `590458` // Estimated: `591809` - // Minimum execution time: 44_005_681_000 picoseconds. - Weight::from_parts(44_312_079_000, 591809) + // Minimum execution time: 55_326_215_000 picoseconds. + Weight::from_parts(55_332_809_000, 591809) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) - /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::UnsortedSegments` (r:100 w:100) - /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsIds` (r:0 w:200) - /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsData` (r:0 w:12600) - /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// The range of component `x` is `[1, 100]`. - fn sort_segments(x: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `222 + x * (2060 ±0)` - // Estimated: `4687 + x * (4529 ±0)` - // Minimum execution time: 183_501_000 picoseconds. - Weight::from_parts(183_501_000, 4687) - // Standard Error: 1_426_363 - .saturating_add(Weight::from_parts(169_156_241, 0).saturating_mul(x.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) - .saturating_add(T::DbWeight::get().writes((129_u64).saturating_mul(x.into()))) - .saturating_add(Weight::from_parts(0, 4529).saturating_mul(x.into())) - } } // For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: `System::Digest` (r:1 w:1) + /// Storage: `System::Digest` (r:1 w:0) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Sassafras::NextRandomness` (r:1 w:0) - /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) - /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::CurrentRandomness` (r:1 w:0) - /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochIndex` (r:1 w:0) - /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:1) - /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::CurrentSlot` (r:0 w:1) + /// Storage: `Sassafras::RandomnessBuf` (r:1 w:1) + /// Proof: `Sassafras::RandomnessBuf` (`max_values`: Some(1), `max_size`: Some(128), added: 623, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentSlot` (r:1 w:1) /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::ClaimTemporaryData` (r:0 w:1) - /// Proof: `Sassafras::ClaimTemporaryData` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::GenesisSlot` (r:0 w:1) - /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CounterForTicketsAccumulator` (r:1 w:0) + /// Proof: `Sassafras::CounterForTicketsAccumulator` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TemporaryData` (r:0 w:1) + /// Proof: `Sassafras::TemporaryData` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) fn on_initialize() -> Weight { // Proof Size summary in bytes: - // Measured: `302` - // Estimated: `4787` - // Minimum execution time: 438_039_000 picoseconds. - Weight::from_parts(439_302_000, 4787) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) + // Measured: `270` + // Estimated: `1755` + // Minimum execution time: 382_223_000 picoseconds. + Weight::from_parts(383_656_000, 1755) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochIndex` (r:1 w:1) - /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) - /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Sassafras::NextAuthorities` (r:1 w:1) /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) /// Storage: `Sassafras::RingContext` (r:1 w:0) /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) - /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextRandomness` (r:1 w:1) - /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:0) - /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextEpochConfig` (r:1 w:1) - /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::PendingEpochConfigChange` (r:1 w:1) - /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TemporaryData` (r:1 w:0) + /// Proof: `Sassafras::TemporaryData` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessBuf` (r:1 w:1) + /// Proof: `Sassafras::RandomnessBuf` (`max_values`: Some(1), `max_size`: Some(128), added: 623, mode: `MaxEncodedLen`) /// Storage: `System::Digest` (r:1 w:1) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) - /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::UnsortedSegments` (r:79 w:79) - /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsIds` (r:5000 w:200) - /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsCount` (r:1 w:1) + /// Proof: `Sassafras::TicketsCount` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CounterForTicketsAccumulator` (r:1 w:1) + /// Proof: `Sassafras::CounterForTicketsAccumulator` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsAccumulator` (r:1001 w:1000) + /// Proof: `Sassafras::TicketsAccumulator` (`max_values`: None, `max_size`: Some(195), added: 2670, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::Tickets` (r:0 w:1000) + /// Proof: `Sassafras::Tickets` (`max_values`: None, `max_size`: Some(168), added: 2643, mode: `MaxEncodedLen`) /// Storage: `Sassafras::Authorities` (r:0 w:1) /// Proof: `Sassafras::Authorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsData` (r:0 w:9896) - /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) - /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochConfig` (r:0 w:1) - /// Proof: `Sassafras::EpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::CurrentRandomness` (r:0 w:1) - /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierKey` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierKey` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 100]`. - /// The range of component `y` is `[1000, 5000]`. + /// The range of component `y` is `[100, 1000]`. fn enact_epoch_change(x: u32, y: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `594909 + x * (33 ±0) + y * (53 ±0)` - // Estimated: `593350 + x * (24 ±1) + y * (2496 ±0)` - // Minimum execution time: 121_279_846_000 picoseconds. - Weight::from_parts(94_454_851_972, 593350) - // Standard Error: 24_177_301 - .saturating_add(Weight::from_parts(8_086_191, 0).saturating_mul(x.into())) - // Standard Error: 601_053 - .saturating_add(Weight::from_parts(15_578_413, 0).saturating_mul(y.into())) - .saturating_add(RocksDbWeight::get().reads(13_u64)) + // Measured: `590613 + x * (33 ±0) + y * (68 ±0)` + // Estimated: `592099 + x * (33 ±0) + y * (2670 ±0)` + // Minimum execution time: 142_623_107_000 picoseconds. + Weight::from_parts(135_944_664_003, 592099) + // Standard Error: 3_660_095 + .saturating_add(Weight::from_parts(174_904_510, 0).saturating_mul(x.into())) + // Standard Error: 404_219 + .saturating_add(Weight::from_parts(7_440_688, 0).saturating_mul(y.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(y.into()))) - .saturating_add(RocksDbWeight::get().writes(112_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(y.into()))) - .saturating_add(Weight::from_parts(0, 24).saturating_mul(x.into())) - .saturating_add(Weight::from_parts(0, 2496).saturating_mul(y.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(x.into())) + .saturating_add(Weight::from_parts(0, 2670).saturating_mul(y.into())) } /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochIndex` (r:1 w:0) - /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) - /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RingVerifierData` (r:1 w:0) - /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierKey` (r:1 w:0) + /// Proof: `Sassafras::RingVerifierKey` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessBuf` (r:1 w:0) + /// Proof: `Sassafras::RandomnessBuf` (`max_values`: Some(1), `max_size`: Some(128), added: 623, mode: `MaxEncodedLen`) /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextEpochConfig` (r:1 w:0) - /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextRandomness` (r:1 w:0) - /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsData` (r:25 w:25) - /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) - /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::UnsortedSegments` (r:1 w:1) - /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) - /// The range of component `x` is `[1, 25]`. + /// Storage: `Sassafras::CounterForTicketsAccumulator` (r:1 w:1) + /// Proof: `Sassafras::CounterForTicketsAccumulator` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsAccumulator` (r:16 w:16) + /// Proof: `Sassafras::TicketsAccumulator` (`max_values`: None, `max_size`: Some(195), added: 2670, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 16]`. fn submit_tickets(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3869` - // Estimated: `5519 + x * (2559 ±0)` - // Minimum execution time: 36_904_934_000 picoseconds. - Weight::from_parts(25_822_957_295, 5519) - // Standard Error: 11_047_832 - .saturating_add(Weight::from_parts(11_338_353_299, 0).saturating_mul(x.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1029` + // Estimated: `4787 + x * (2670 ±0)` + // Minimum execution time: 52_363_693_000 picoseconds. + Weight::from_parts(38_029_460_770, 4787) + // Standard Error: 15_839_361 + .saturating_add(Weight::from_parts(14_567_084_979, 0).saturating_mul(x.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(x.into()))) - .saturating_add(Weight::from_parts(0, 2559).saturating_mul(x.into())) - } - /// Storage: `Sassafras::PendingEpochConfigChange` (r:0 w:1) - /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn plan_config_change() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_038_000 picoseconds. - Weight::from_parts(4_499_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(x.into()))) + .saturating_add(Weight::from_parts(0, 2670).saturating_mul(x.into())) } /// Storage: `Sassafras::RingContext` (r:1 w:0) /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) - /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierKey` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierKey` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 100]`. fn update_ring_verifier(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `590485` + // Measured: `590458` // Estimated: `591809` - // Minimum execution time: 105_121_424_000 picoseconds. - Weight::from_parts(105_527_334_385, 591809) - // Standard Error: 2_933_910 - .saturating_add(Weight::from_parts(96_136_261, 0).saturating_mul(x.into())) + // Minimum execution time: 135_738_430_000 picoseconds. + Weight::from_parts(135_840_809_672, 591809) + // Standard Error: 3_319_979 + .saturating_add(Weight::from_parts(173_092_727, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -394,32 +290,10 @@ impl WeightInfo for () { /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) fn load_ring_context() -> Weight { // Proof Size summary in bytes: - // Measured: `590485` + // Measured: `590458` // Estimated: `591809` - // Minimum execution time: 44_005_681_000 picoseconds. - Weight::from_parts(44_312_079_000, 591809) + // Minimum execution time: 55_326_215_000 picoseconds. + Weight::from_parts(55_332_809_000, 591809) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) - /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::UnsortedSegments` (r:100 w:100) - /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsIds` (r:0 w:200) - /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsData` (r:0 w:12600) - /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// The range of component `x` is `[1, 100]`. - fn sort_segments(x: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `222 + x * (2060 ±0)` - // Estimated: `4687 + x * (4529 ±0)` - // Minimum execution time: 183_501_000 picoseconds. - Weight::from_parts(183_501_000, 4687) - // Standard Error: 1_426_363 - .saturating_add(Weight::from_parts(169_156_241, 0).saturating_mul(x.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) - .saturating_add(RocksDbWeight::get().writes((129_u64).saturating_mul(x.into()))) - .saturating_add(Weight::from_parts(0, 4529).saturating_mul(x.into())) - } } diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index eb9dfd34c595..8ed668bf432e 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -2,11 +2,11 @@ name = "sp-consensus-sassafras" version = "0.3.4-dev" authors.workspace = true -description = "Primitives for Sassafras consensus" edition.workspace = true license = "Apache-2.0" homepage.workspace = true -repository = "/~https://github.com/paritytech/polkadot-sdk/" +repository.workspace = true +description = "Primitives for Sassafras consensus" documentation = "https://docs.rs/sp-consensus-sassafras" readme = "README.md" publish = false @@ -26,6 +26,7 @@ sp-application-crypto = { features = ["bandersnatch-experimental"], workspace = sp-consensus-slots = { workspace = true } sp-core = { features = ["bandersnatch-experimental"], workspace = true } sp-runtime = { workspace = true } +sp-inherents = { workspace = true } [features] default = ["std"] @@ -37,6 +38,7 @@ std = [ "sp-application-crypto/std", "sp-consensus-slots/std", "sp-core/std", + "sp-inherents/std", "sp-runtime/std", ] diff --git a/substrate/primitives/consensus/sassafras/src/digests.rs b/substrate/primitives/consensus/sassafras/src/digests.rs index bac31f57f2da..08889201179b 100644 --- a/substrate/primitives/consensus/sassafras/src/digests.rs +++ b/substrate/primitives/consensus/sassafras/src/digests.rs @@ -18,8 +18,8 @@ //! Sassafras digests structures and helpers. use crate::{ - ticket::TicketClaim, vrf::VrfSignature, AuthorityId, AuthorityIndex, AuthoritySignature, - EpochConfiguration, Randomness, Slot, SASSAFRAS_ENGINE_ID, + vrf::VrfSignature, AuthorityId, AuthorityIndex, AuthoritySignature, Randomness, Slot, + SASSAFRAS_ENGINE_ID, }; use codec::{Decode, Encode, MaxEncodedLen}; @@ -34,14 +34,12 @@ use sp_runtime::{DigestItem, RuntimeDebug}; /// This is mandatory for each block. #[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct SlotClaim { - /// Authority index that claimed the slot. - pub authority_idx: AuthorityIndex, /// Corresponding slot number. pub slot: Slot, + /// Authority index that claimed the slot. + pub authority_idx: AuthorityIndex, /// Slot claim VRF signature. pub vrf_signature: VrfSignature, - /// Ticket auxiliary information for claim check. - pub ticket_claim: Option, } /// Information about the next epoch. @@ -53,10 +51,6 @@ pub struct NextEpochDescriptor { pub randomness: Randomness, /// Authorities list. pub authorities: Vec, - /// Epoch configuration. - /// - /// If not present previous epoch parameters are used. - pub config: Option, } /// Runtime digest entries. diff --git a/substrate/primitives/consensus/sassafras/src/lib.rs b/substrate/primitives/consensus/sassafras/src/lib.rs index d7880c4de9e8..5254064f104f 100644 --- a/substrate/primitives/consensus/sassafras/src/lib.rs +++ b/substrate/primitives/consensus/sassafras/src/lib.rs @@ -27,6 +27,7 @@ use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_core::crypto::KeyTypeId; +use sp_inherents::{InherentIdentifier, MakeFatalError}; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; pub use sp_consensus_slots::{Slot, SlotDuration}; @@ -39,8 +40,7 @@ pub mod ticket; pub mod vrf; pub use ticket::{ - ticket_id_threshold, EphemeralPublic, EphemeralSignature, TicketBody, TicketClaim, - TicketEnvelope, TicketId, + ticket_id_threshold, EphemeralPublic, EphemeralSignature, TicketBody, TicketEnvelope, TicketId, }; mod app { @@ -48,6 +48,15 @@ mod app { app_crypto!(bandersnatch, SASSAFRAS); } +/// Errors that can occur while checking the inherent. +pub type InherentError = MakeFatalError<()>; + +/// The type of the inherent +pub type InherentType = Vec; + +/// The identifier for the protocol inherent. +pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"SASSAFRA"; + /// Key type identifier. pub const KEY_TYPE: KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; @@ -82,45 +91,45 @@ pub type EquivocationProof = sp_consensus_slots::EquivocationProof, - /// Epoch configuration. - pub config: EpochConfiguration, } /// An opaque type used to represent the key ownership proof at the runtime API boundary. diff --git a/substrate/primitives/consensus/sassafras/src/ticket.rs b/substrate/primitives/consensus/sassafras/src/ticket.rs index fd025f1d53ea..334d8553da54 100644 --- a/substrate/primitives/consensus/sassafras/src/ticket.rs +++ b/substrate/primitives/consensus/sassafras/src/ticket.rs @@ -20,8 +20,12 @@ use crate::vrf::RingVrfSignature; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +use sp_core::{bounded::BoundedVec, ConstU32}; pub use sp_core::ed25519::{Public as EphemeralPublic, Signature as EphemeralSignature}; +use sp_core::U256; + +const TICKET_EXTRA_MAX_LEN: u32 = 128; /// Ticket identifier. /// @@ -30,38 +34,66 @@ pub use sp_core::ed25519::{Public as EphemeralPublic, Signature as EphemeralSign /// Because of this, it is also used as the ticket score to compare against /// the epoch ticket's threshold to decide if the ticket is worth being considered /// for slot assignment (refer to [`ticket_id_threshold`]). -pub type TicketId = u128; +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct TicketId(pub [u8; 32]); + +impl core::fmt::Debug for TicketId { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{}", sp_core::hexdisplay::HexDisplay::from(&self.0)) + } +} + +impl From for TicketId { + fn from(value: U256) -> Self { + let mut inner = [0; 32]; + value.to_big_endian(&mut inner); + Self(inner) + } +} + +impl From for U256 { + fn from(ticket: TicketId) -> U256 { + U256::from_big_endian(&ticket.0[..]) + } +} /// Ticket data persisted on-chain. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct TicketBody { + /// Ticket identifier. + pub id: TicketId, /// Attempt index. - pub attempt_idx: u32, - /// Ephemeral public key which gets erased when the ticket is claimed. - pub erased_public: EphemeralPublic, - /// Ephemeral public key which gets exposed when the ticket is claimed. - pub revealed_public: EphemeralPublic, + pub attempt: u8, + /// User opaque extra data. + pub extra: BoundedVec>, +} + +impl Ord for TicketBody { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.id.cmp(&other.id) + } +} + +impl PartialOrd for TicketBody { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } /// Ticket ring vrf signature. pub type TicketSignature = RingVrfSignature; -/// Ticket envelope used on during submission. +/// Ticket envelope used during submission. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct TicketEnvelope { - /// Ticket body. - pub body: TicketBody, + /// Attempt index. + pub attempt: u8, + /// User opaque extra data. + pub extra: BoundedVec>, /// Ring signature. pub signature: TicketSignature, } -/// Ticket claim information filled by the block author. -#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct TicketClaim { - /// Signature verified via `TicketBody::erased_public`. - pub erased_signature: EphemeralSignature, -} - /// Computes a boundary for [`TicketId`] maximum allowed value for a given epoch. /// /// Only ticket identifiers below this threshold should be considered as candidates @@ -80,45 +112,52 @@ pub struct TicketClaim { /// For details about the formula and implications refer to /// [*probabilities an parameters*](https://research.web3.foundation/Polkadot/protocols/block-production/SASSAFRAS#probabilities-and-parameters) /// paragraph of the w3f introduction to the protocol. -// TODO: replace with [RFC-26](/~https://github.com/polkadot-fellows/RFCs/pull/26) -// "Tickets Threshold" paragraph once is merged -pub fn ticket_id_threshold( - redundancy: u32, - slots: u32, - attempts: u32, - validators: u32, -) -> TicketId { - let num = redundancy as u64 * slots as u64; +pub fn ticket_id_threshold(slots: u32, validators: u32, attempts: u8, redundancy: u8) -> TicketId { let den = attempts as u64 * validators as u64; - TicketId::max_value() + let num = redundancy as u64 * slots as u64; + U256::MAX .checked_div(den.into()) .unwrap_or_default() .saturating_mul(num.into()) + .into() } #[cfg(test)] mod tests { use super::*; + fn normalize_u256(bytes: [u8; 32]) -> f64 { + let max_u128 = u128::MAX as f64; + let base = max_u128 + 1.0; + let max = max_u128 * (base + 1.0); + + // Extract two u128 segments from the byte array + let h = u128::from_be_bytes(bytes[..16].try_into().unwrap()) as f64; + let l = u128::from_be_bytes(bytes[16..].try_into().unwrap()) as f64; + (h * base + l) / max + } + // This is a trivial example/check which just better explain explains the rationale // behind the threshold. // // After this reading the formula should become obvious. #[test] fn ticket_id_threshold_trivial_check() { - // For an epoch with `s` slots we want to accept a number of tickets equal to ~s·r + // For an epoch with `s` slots, with a redundancy factor `r`, we want to accept + // a number of tickets equal to ~s·r. let redundancy = 2; let slots = 1000; let attempts = 100; let validators = 500; - let threshold = ticket_id_threshold(redundancy, slots, attempts, validators); - let threshold = threshold as f64 / TicketId::MAX as f64; + let threshold = ticket_id_threshold(slots, validators, attempts, redundancy); + println!("{:?}", threshold); + let threshold = normalize_u256(threshold.0); + println!("{}", threshold); - // We expect that the total number of tickets allowed to be submitted - // is slots*redundancy - let avt = ((attempts * validators) as f64 * threshold) as u32; - assert_eq!(avt, slots * redundancy); + // We expect that the total number of tickets allowed to be submitted is slots*redundancy + let avt = ((attempts as u32 * validators) as f64 * threshold) as u32; + assert_eq!(avt, slots * redundancy as u32); println!("threshold: {}", threshold); println!("avt = {}", avt); diff --git a/substrate/primitives/consensus/sassafras/src/vrf.rs b/substrate/primitives/consensus/sassafras/src/vrf.rs index f8def1b5f189..afc001318080 100644 --- a/substrate/primitives/consensus/sassafras/src/vrf.rs +++ b/substrate/primitives/consensus/sassafras/src/vrf.rs @@ -17,77 +17,54 @@ //! Utilities related to VRF input, pre-output and signatures. -use crate::{Randomness, TicketBody, TicketId}; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -use codec::Encode; +use crate::{Randomness, TicketId}; use sp_consensus_slots::Slot; pub use sp_core::bandersnatch::{ - ring_vrf::{RingProver, RingVerifier, RingVerifierData, RingVrfSignature}, + ring_vrf::{RingProver, RingVerifier, RingVerifierKey, RingVrfSignature}, vrf::{VrfInput, VrfPreOutput, VrfSignData, VrfSignature}, }; /// Ring VRF domain size for Sassafras consensus. pub const RING_VRF_DOMAIN_SIZE: u32 = 2048; +const TICKET_SEAL_CONTEXT: &[u8] = b"sassafras_ticket_seal"; +// const FALLBACK_SEAL_CONTEXT: &[u8] = b"sassafras_fallback_seal"; +const BLOCK_ENTROPY_CONTEXT: &[u8] = b"sassafras_entropy"; + /// Bandersnatch VRF [`RingContext`] specialization for Sassafras using [`RING_VRF_DOMAIN_SIZE`]. pub type RingContext = sp_core::bandersnatch::ring_vrf::RingContext; -fn vrf_input_from_data( - domain: &[u8], - data: impl IntoIterator>, -) -> VrfInput { - let buf = data.into_iter().fold(Vec::new(), |mut buf, item| { - let bytes = item.as_ref(); - buf.extend_from_slice(bytes); - let len = u8::try_from(bytes.len()).expect("private function with well known inputs; qed"); - buf.push(len); - buf - }); - VrfInput::new(domain, buf) -} - -/// VRF input to claim slot ownership during block production. -pub fn slot_claim_input(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfInput { - vrf_input_from_data( - b"sassafras-claim-v1.0", - [randomness.as_slice(), &slot.to_le_bytes(), &epoch.to_le_bytes()], - ) +/// VRF input to generate the ticket id. +pub fn ticket_id_input(randomness: &Randomness, attempt: u8) -> VrfInput { + VrfInput::new(b"sassafras", [TICKET_SEAL_CONTEXT, randomness.as_slice(), &[attempt]].concat()) } -/// Signing-data to claim slot ownership during block production. -pub fn slot_claim_sign_data(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfSignData { - let input = slot_claim_input(randomness, slot, epoch); +/// Data to be signed via ring-vrf. +pub fn ticket_id_sign_data(ticket_id_input: VrfInput, extra_data: &[u8]) -> VrfSignData { VrfSignData::new_unchecked( - b"sassafras-slot-claim-transcript-v1.0", - Option::<&[u8]>::None, - Some(input), - ) -} - -/// VRF input to generate the ticket id. -pub fn ticket_id_input(randomness: &Randomness, attempt: u32, epoch: u64) -> VrfInput { - vrf_input_from_data( - b"sassafras-ticket-v1.0", - [randomness.as_slice(), &attempt.to_le_bytes(), &epoch.to_le_bytes()], + b"sassafras-ticket-body-transcript", + Some(extra_data), + Some(ticket_id_input), ) } -/// VRF input to generate the revealed key. -pub fn revealed_key_input(randomness: &Randomness, attempt: u32, epoch: u64) -> VrfInput { - vrf_input_from_data( - b"sassafras-revealed-v1.0", - [randomness.as_slice(), &attempt.to_le_bytes(), &epoch.to_le_bytes()], +/// VRF input to produce randomness. +pub fn block_randomness_input(randomness: &Randomness, slot: Slot) -> VrfInput { + // TODO: @davxy: implement as JAM + VrfInput::new( + b"sassafras", + [BLOCK_ENTROPY_CONTEXT, randomness.as_slice(), &slot.to_le_bytes()].concat(), ) } -/// Data to be signed via ring-vrf. -pub fn ticket_body_sign_data(ticket_body: &TicketBody, ticket_id_input: VrfInput) -> VrfSignData { +/// Signing-data to claim slot ownership during block production. +pub fn block_randomness_sign_data(randomness: &Randomness, slot: Slot) -> VrfSignData { + let input = block_randomness_input(randomness, slot); VrfSignData::new_unchecked( - b"sassafras-ticket-body-transcript-v1.0", - Some(ticket_body.encode().as_slice()), - Some(ticket_id_input), + b"sassafras-randomness-transcript", + Option::<&[u8]>::None, + Some(input), ) } @@ -97,15 +74,5 @@ pub fn ticket_body_sign_data(ticket_body: &TicketBody, ticket_id_input: VrfInput /// Pre-output should have been obtained from the input directly using the vrf /// secret key or from the vrf signature pre-outputs. pub fn make_ticket_id(input: &VrfInput, pre_output: &VrfPreOutput) -> TicketId { - let bytes = pre_output.make_bytes::<16>(b"ticket-id", input); - u128::from_le_bytes(bytes) -} - -/// Make revealed key seed from a given VRF input and pre-output. -/// -/// Input should have been obtained via [`revealed_key_input`]. -/// Pre-output should have been obtained from the input directly using the vrf -/// secret key or from the vrf signature pre-outputs. -pub fn make_revealed_key_seed(input: &VrfInput, pre_output: &VrfPreOutput) -> [u8; 32] { - pre_output.make_bytes::<32>(b"revealed-seed", input) + TicketId(pre_output.make_bytes::<32>(b"ticket-id", input)) } diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index 25bf4657030f..3001a1a55541 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -204,7 +204,7 @@ pub mod vrf { /// This object is used to produce an arbitrary number of verifiable pseudo random /// bytes and is often called pre-output to emphasize that this is not the actual /// output of the VRF but an object capable of generating the output. - #[derive(Clone, Debug, PartialEq, Eq)] + #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct VrfPreOutput(pub(super) bandersnatch_vrfs::VrfPreOut); impl Encode for VrfPreOutput { @@ -227,8 +227,6 @@ pub mod vrf { } } - impl EncodeLike for VrfPreOutput {} - impl MaxEncodedLen for VrfPreOutput { fn max_encoded_len() -> usize { <[u8; PREOUT_SERIALIZED_SIZE]>::max_encoded_len() @@ -503,20 +501,20 @@ pub mod ring_vrf { pub(crate) const RING_SIGNATURE_SERIALIZED_SIZE: usize = 755; /// remove as soon as soon as serialization is implemented by the backend - pub struct RingVerifierData { + pub struct RingVerifierKey { /// Domain size. pub domain_size: u32, /// Verifier key. pub verifier_key: VerifierKey, } - impl From for RingVerifier { - fn from(vd: RingVerifierData) -> RingVerifier { + impl From for RingVerifier { + fn from(vd: RingVerifierKey) -> RingVerifier { bandersnatch_vrfs::ring::make_ring_verifier(vd.verifier_key, vd.domain_size as usize) } } - impl Encode for RingVerifierData { + impl Encode for RingVerifierKey { fn encode(&self) -> Vec { const ERR_STR: &str = "serialization length is constant and checked by test; qed"; let mut buf = [0; RING_VERIFIER_DATA_SERIALIZED_SIZE]; @@ -526,7 +524,7 @@ pub mod ring_vrf { } } - impl Decode for RingVerifierData { + impl Decode for RingVerifierKey { fn decode(i: &mut R) -> Result { const ERR_STR: &str = "serialization length is constant and checked by test; qed"; let buf = <[u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]>::decode(i)?; @@ -535,19 +533,19 @@ pub mod ring_vrf { .expect(ERR_STR); let verifier_key = ::deserialize_compressed_unchecked(&mut &buf[4..]).expect(ERR_STR); - Ok(RingVerifierData { domain_size, verifier_key }) + Ok(RingVerifierKey { domain_size, verifier_key }) } } - impl EncodeLike for RingVerifierData {} + impl EncodeLike for RingVerifierKey {} - impl MaxEncodedLen for RingVerifierData { + impl MaxEncodedLen for RingVerifierKey { fn max_encoded_len() -> usize { <[u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]>::max_encoded_len() } } - impl TypeInfo for RingVerifierData { + impl TypeInfo for RingVerifierKey { type Identity = [u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]; fn type_info() -> scale_info::Type { @@ -601,13 +599,13 @@ pub mod ring_vrf { } /// Information required for a lazy construction of a ring verifier. - pub fn verifier_data(&self, public_keys: &[Public]) -> Option { + pub fn verifier_key(&self, public_keys: &[Public]) -> Option { let mut pks = Vec::with_capacity(public_keys.len()); for public_key in public_keys { let pk = PublicKey::deserialize_compressed_unchecked(public_key.as_slice()).ok()?; pks.push(pk.0.into()); } - Some(RingVerifierData { + Some(RingVerifierKey { verifier_key: self.0.verifier_key(pks), domain_size: self.0.domain_size, }) @@ -1070,19 +1068,19 @@ mod tests { } #[test] - fn encode_decode_verifier_data() { + fn encode_decode_verifier_key() { let ring_ctx = TestRingContext::new_testing(); let pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); assert!(pks.len() <= ring_ctx.max_keyset_size()); - let verifier_data = ring_ctx.verifier_data(&pks).unwrap(); - let enc1 = verifier_data.encode(); + let verifier_key = ring_ctx.verifier_key(&pks).unwrap(); + let enc1 = verifier_key.encode(); assert_eq!(enc1.len(), RING_VERIFIER_DATA_SERIALIZED_SIZE); - assert_eq!(RingVerifierData::max_encoded_len(), RING_VERIFIER_DATA_SERIALIZED_SIZE); + assert_eq!(RingVerifierKey::max_encoded_len(), RING_VERIFIER_DATA_SERIALIZED_SIZE); - let vd2 = RingVerifierData::decode(&mut enc1.as_slice()).unwrap(); + let vd2 = RingVerifierKey::decode(&mut enc1.as_slice()).unwrap(); let enc2 = vd2.encode(); assert_eq!(enc1, enc2); diff --git a/substrate/primitives/inherents/src/client_side.rs b/substrate/primitives/inherents/src/client_side.rs index 3c299dfa4eea..45884223f9a0 100644 --- a/substrate/primitives/inherents/src/client_side.rs +++ b/substrate/primitives/inherents/src/client_side.rs @@ -99,9 +99,11 @@ pub trait InherentDataProvider: Send + Sync { /// If the given error could not be decoded, `None` should be returned. async fn try_handle_error( &self, - identifier: &InherentIdentifier, - error: &[u8], - ) -> Option>; + _identifier: &InherentIdentifier, + _error: &[u8], + ) -> Option> { + None + } } #[impl_trait_for_tuples::impl_for_tuples(30)] diff --git a/substrate/primitives/keyring/src/sr25519.rs b/substrate/primitives/keyring/src/sr25519.rs index a3a506152d7d..8e13c39af005 100644 --- a/substrate/primitives/keyring/src/sr25519.rs +++ b/substrate/primitives/keyring/src/sr25519.rs @@ -159,14 +159,14 @@ impl FromStr for Keyring { fn from_str(s: &str) -> Result::Err> { match s { - "alice" => Ok(Keyring::Alice), - "bob" => Ok(Keyring::Bob), - "charlie" => Ok(Keyring::Charlie), - "dave" => Ok(Keyring::Dave), - "eve" => Ok(Keyring::Eve), - "ferdie" => Ok(Keyring::Ferdie), - "one" => Ok(Keyring::One), - "two" => Ok(Keyring::Two), + "Alice" => Ok(Keyring::Alice), + "Bob" => Ok(Keyring::Bob), + "Charlie" => Ok(Keyring::Charlie), + "Dave" => Ok(Keyring::Dave), + "Eve" => Ok(Keyring::Eve), + "Ferdie" => Ok(Keyring::Ferdie), + "One" => Ok(Keyring::One), + "Two" => Ok(Keyring::Two), _ => Err(ParseKeyringError), } } diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 1c82c73072bc..229054c058b8 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -19,6 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] sp-application-crypto = { features = ["serde"], workspace = true } sp-consensus-aura = { features = ["serde"], workspace = true } sp-consensus-babe = { features = ["serde"], workspace = true } +sp-consensus-sassafras = { path = "../../primitives/consensus/sassafras", default-features = false, features = ["serde"] } sp-genesis-builder = { workspace = true } sp-block-builder = { workspace = true } codec = { features = ["derive"], workspace = true } @@ -35,6 +36,7 @@ sp-session = { workspace = true } sp-api = { workspace = true } sp-runtime = { features = ["serde"], workspace = true } pallet-babe = { workspace = true } +pallet-sassafras = { path = "../../frame/sassafras", default-features = false } pallet-balances = { workspace = true } frame-executive = { workspace = true } frame-metadata-hash-extension = { workspace = true } @@ -84,6 +86,7 @@ std = [ "log/std", "pallet-babe/std", "pallet-balances/std", + "pallet-sassafras/std", "pallet-timestamp/std", "sc-executor/std", "sc-service", @@ -96,6 +99,7 @@ std = [ "sp-consensus-aura/std", "sp-consensus-babe/std", "sp-consensus-grandpa/std", + "sp-consensus-sassafras/std", "sp-core/std", "sp-crypto-hashing/std", "sp-externalities/std", diff --git a/substrate/test-utils/runtime/client/Cargo.toml b/substrate/test-utils/runtime/client/Cargo.toml index 5dd3c304f4a8..a57c360ad0b1 100644 --- a/substrate/test-utils/runtime/client/Cargo.toml +++ b/substrate/test-utils/runtime/client/Cargo.toml @@ -26,3 +26,4 @@ sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } substrate-test-client = { workspace = true } substrate-test-runtime = { workspace = true } +sp-crypto-ec-utils = { workspace = true, features = ["bls12-381", "ed-on-bls12-381-bandersnatch"] } diff --git a/substrate/test-utils/runtime/client/src/lib.rs b/substrate/test-utils/runtime/client/src/lib.rs index 435f3f5ebacb..84a7708089b9 100644 --- a/substrate/test-utils/runtime/client/src/lib.rs +++ b/substrate/test-utils/runtime/client/src/lib.rs @@ -48,6 +48,25 @@ pub mod prelude { pub use super::{AccountKeyring, Sr25519Keyring}; } +/// A unit struct which implements `NativeExecutionDispatch` feeding in the +/// hard-coded runtime. +pub struct LocalExecutorDispatch; + +impl sc_executor::NativeExecutionDispatch for LocalExecutorDispatch { + type ExtendHostFunctions = ( + sp_crypto_ec_utils::bls12_381::host_calls::HostFunctions, + sp_crypto_ec_utils::ed_on_bls12_381_bandersnatch::host_calls::HostFunctions, + ); + + fn dispatch(method: &str, data: &[u8]) -> Option> { + substrate_test_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + substrate_test_runtime::native_version() + } +} + /// Test client database backend. pub type Backend = substrate_test_client::Backend; diff --git a/substrate/test-utils/runtime/res/default_genesis_config.json b/substrate/test-utils/runtime/res/default_genesis_config.json index 95c7799a033d..ee32ff6968b0 100644 --- a/substrate/test-utils/runtime/res/default_genesis_config.json +++ b/substrate/test-utils/runtime/res/default_genesis_config.json @@ -23,6 +23,17 @@ "allowed_slots": "PrimaryAndSecondaryPlainSlots" } }, + "sassafras": { + "authorities": [ + "KmTJSgAeSqH6VFSbfLuAGPtNfkXD5NQr2mqgomtyckpvfbRpn", + "KYW9snBs4hEMC2MFbXTWHjHVRt2Mov91h7mEsAEkQfMy9PD61", + "KXMr3GG4GkpFoQoDXUPEcPbSWePNzuPNtEPycdJ3yveZnm56G" + ], + "epochConfig": { + "redundancy_factor": 1, + "attempts_number": 32 + } + }, "substrateTest": { "authorities": [ "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", diff --git a/substrate/test-utils/runtime/res/default_genesis_config_incomplete.json b/substrate/test-utils/runtime/res/default_genesis_config_incomplete.json index 510ed87c93c5..7fb2b2dc07c9 100644 --- a/substrate/test-utils/runtime/res/default_genesis_config_incomplete.json +++ b/substrate/test-utils/runtime/res/default_genesis_config_incomplete.json @@ -16,6 +16,17 @@ "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" ] }, + "sassafras": { + "authorities": [ + "KmTJSgAeSqH6VFSbfLuAGPtNfkXD5NQr2mqgomtyckpvfbRpn", + "KYW9snBs4hEMC2MFbXTWHjHVRt2Mov91h7mEsAEkQfMy9PD61", + "KXMr3GG4GkpFoQoDXUPEcPbSWePNzuPNtEPycdJ3yveZnm56G" + ], + "epochConfig": { + "redundancy_factor": 1, + "attempts_number": 32 + } + }, "balances": { "balances": [ [ diff --git a/substrate/test-utils/runtime/res/default_genesis_config_invalid.json b/substrate/test-utils/runtime/res/default_genesis_config_invalid.json index f8e06f91d665..d117f2c5e5d2 100644 --- a/substrate/test-utils/runtime/res/default_genesis_config_invalid.json +++ b/substrate/test-utils/runtime/res/default_genesis_config_invalid.json @@ -23,6 +23,17 @@ "allowed_slots": "PrimaryAndSecondaryPlainSlots" } }, + "sassafras": { + "authorities": [ + "KmTJSgAeSqH6VFSbfLuAGPtNfkXD5NQr2mqgomtyckpvfbRpn", + "KYW9snBs4hEMC2MFbXTWHjHVRt2Mov91h7mEsAEkQfMy9PD61", + "KXMr3GG4GkpFoQoDXUPEcPbSWePNzuPNtEPycdJ3yveZnm56G" + ], + "epochConfig": { + "redundancy_factor": 1, + "attempts_number": 32 + } + }, "substrateTest": { "authorities": [ "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", diff --git a/substrate/test-utils/runtime/src/genesismap.rs b/substrate/test-utils/runtime/src/genesismap.rs index 9e972886b377..b06731e1d63b 100644 --- a/substrate/test-utils/runtime/src/genesismap.rs +++ b/substrate/test-utils/runtime/src/genesismap.rs @@ -23,11 +23,11 @@ use super::{ use codec::Encode; use sc_service::construct_genesis_block; use sp_core::{ - sr25519, + bandersnatch, sr25519, storage::{well_known_keys, StateVersion, Storage}, Pair, }; -use sp_keyring::{AccountKeyring, Sr25519Keyring}; +use sp_keyring::AccountKeyring; use sp_runtime::{ traits::{Block as BlockT, Hash as HashT, Header as HeaderT}, BuildStorage, @@ -54,9 +54,9 @@ impl Default for GenesisStorageBuilder { fn default() -> Self { Self::new( vec![ - Sr25519Keyring::Alice.into(), - Sr25519Keyring::Bob.into(), - Sr25519Keyring::Charlie.into(), + AccountKeyring::Alice.into(), + AccountKeyring::Bob.into(), + AccountKeyring::Charlie.into(), ], (0..16_usize) .into_iter() @@ -109,11 +109,23 @@ impl GenesisStorageBuilder { /// A `RuntimeGenesisConfig` from internal configuration pub fn genesis_config(&self) -> RuntimeGenesisConfig { - let authorities_sr25519: Vec<_> = self + let authorities_sr25519: Vec = self .authorities - .clone() - .into_iter() - .map(|id| sr25519::Public::from(id)) + .iter() + .map(|id| { + use std::str::FromStr; + let seed: &'static str = AccountKeyring::from_public(id).unwrap().into(); + sp_keyring::Sr25519Keyring::from_str(&seed).unwrap().into() + }) + .collect(); + let authorities_bandersnatch: Vec = self + .authorities + .iter() + .map(|id| { + use std::str::FromStr; + let seed: &'static str = AccountKeyring::from_public(id).unwrap().into(); + sp_keyring::BandersnatchKeyring::from_str(&seed).unwrap().into() + }) .collect(); RuntimeGenesisConfig { @@ -126,6 +138,14 @@ impl GenesisStorageBuilder { .collect(), ..Default::default() }, + sassafras: pallet_sassafras::GenesisConfig { + authorities: authorities_bandersnatch.into_iter().map(|x| x.into()).collect(), + epoch_config: sp_consensus_sassafras::EpochConfiguration { + redundancy_factor: 1, + attempts_number: 32, + }, + ..Default::default() + }, substrate_test: substrate_test_pallet::GenesisConfig { authorities: authorities_sr25519.clone(), ..Default::default() diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index d1a3eaa2daa9..d086a8b16360 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -76,7 +76,10 @@ pub use sp_consensus_babe::{AllowedSlots, BabeEpochConfiguration, Slot}; pub use pallet_balances::Call as BalancesCall; +// Ensure Babe, Sassafras and Aura use the same crypto to simplify things a bit. pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; +pub type SassafrasId = sp_consensus_sassafras::AuthorityId; + #[cfg(feature = "std")] pub use extrinsic::{ExtrinsicBuilder, Transfer}; @@ -310,6 +313,7 @@ construct_runtime!( { System: frame_system, Babe: pallet_babe, + Sassafras: pallet_sassafras, SubstrateTest: substrate_test_pallet::pallet, Balances: pallet_balances, } @@ -403,22 +407,36 @@ impl pallet_timestamp::Config for Runtime { type WeightInfo = pallet_timestamp::weights::SubstrateWeight; } -parameter_types! { - pub const EpochDuration: u64 = 6; -} +const EPOCH_LENGTH: u32 = 6; +const MAX_AUTHORITIES: u32 = 10; impl pallet_babe::Config for Runtime { - type EpochDuration = EpochDuration; + type EpochDuration = ConstU64<{ EPOCH_LENGTH as u64 }>; type ExpectedBlockTime = ConstU64<10_000>; type EpochChangeTrigger = pallet_babe::SameAuthoritiesForever; type DisabledValidators = (); type KeyOwnerProof = sp_core::Void; type EquivocationReportSystem = (); type WeightInfo = (); - type MaxAuthorities = ConstU32<10>; + type MaxAuthorities = ConstU32; type MaxNominators = ConstU32<100>; } +impl pallet_sassafras::Config for Runtime { + type EpochLength = ConstU32; + type EpochChangeTrigger = pallet_sassafras::EpochChangeInternalTrigger; + type MaxAuthorities = ConstU32; + type WeightInfo = (); +} + +impl frame_system::offchain::SendTransactionTypes for Runtime +where + RuntimeCall: From, +{ + type Extrinsic = Extrinsic; + type OverarchingCall = RuntimeCall; +} + /// Adds one to the given input and returns the final result. #[inline(never)] fn benchmark_add_one(i: u64) -> u64 { @@ -630,7 +648,7 @@ impl_runtime_apis! { let epoch_config = Babe::epoch_config().unwrap_or(TEST_RUNTIME_BABE_EPOCH_CONFIGURATION); sp_consensus_babe::BabeConfiguration { slot_duration: Babe::slot_duration(), - epoch_length: EpochDuration::get(), + epoch_length: EPOCH_LENGTH as u64, c: epoch_config.c, authorities: Babe::authorities().to_vec(), randomness: Babe::randomness(), @@ -667,6 +685,51 @@ impl_runtime_apis! { } } + impl sp_consensus_sassafras::SassafrasApi for Runtime { + fn ring_context() -> Option { + Sassafras::ring_context() + } + + fn submit_tickets_unsigned_extrinsic( + tickets: Vec + ) -> bool { + Sassafras::submit_tickets_unsigned_extrinsic(tickets) + } + + fn current_epoch() -> sp_consensus_sassafras::Epoch { + Sassafras::current_epoch() + } + + fn next_epoch() -> sp_consensus_sassafras::Epoch { + Sassafras::next_epoch() + } + + fn slot_ticket_id(slot: sp_consensus_sassafras::Slot) -> Option { + Sassafras::slot_ticket_id(slot) + } + + fn slot_ticket( + slot: sp_consensus_sassafras::Slot + ) -> Option<(sp_consensus_sassafras::TicketId, sp_consensus_sassafras::TicketBody)> { + Sassafras::slot_ticket(slot) + } + + fn generate_key_ownership_proof( + _authority_id: sp_consensus_sassafras::AuthorityId, + ) -> Option { + // TODO @davxy + None + } + + fn submit_report_equivocation_unsigned_extrinsic( + _equivocation_proof: sp_consensus_sassafras::EquivocationProof<::Header>, + _key_owner_proof: sp_consensus_sassafras::OpaqueKeyOwnershipProof, + ) -> bool { + // TODO @davxy + false + } + } + impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { let ext = Extrinsic::new_unsigned( @@ -892,6 +955,10 @@ pub mod storage_key_generator { vec![b"Babe", b"EpochConfig"], vec![b"Babe", b"NextAuthorities"], vec![b"Babe", b"SegmentIndex"], + vec![b"Sassafras", b":__STORAGE_VERSION__:"], + vec![b"Sassafras", b"EpochConfig"], + vec![b"Sassafras", b"Authorities"], + vec![b"Sassafras", b"NextAuthorities"], vec![b"Balances", b":__STORAGE_VERSION__:"], vec![b"Balances", b"TotalIssuance"], vec![b"SubstrateTest", b":__STORAGE_VERSION__:"], @@ -950,29 +1017,28 @@ pub mod storage_key_generator { let mut res = vec![ //SubstrateTest|:__STORAGE_VERSION__: "00771836bebdd29870ff246d305c578c4e7b9012096b41c4eb3aaf947f6ea429", - //SubstrateTest|Authorities + // SubstrateTest|Authorities "00771836bebdd29870ff246d305c578c5e0621c4869aa60c02be9adcc98a0d1d", - //Babe|:__STORAGE_VERSION__: + // Babe|:__STORAGE_VERSION__: "1cb6f36e027abb2091cfb5110ab5087f4e7b9012096b41c4eb3aaf947f6ea429", - //Babe|Authorities + // Babe|Authorities "1cb6f36e027abb2091cfb5110ab5087f5e0621c4869aa60c02be9adcc98a0d1d", - //Babe|SegmentIndex + // Babe|SegmentIndex "1cb6f36e027abb2091cfb5110ab5087f66e8f035c8adbe7f1547b43c51e6f8a4", - //Babe|NextAuthorities + // Babe|NextAuthorities "1cb6f36e027abb2091cfb5110ab5087faacf00b9b41fda7a9268821c2a2b3e4c", - //Babe|EpochConfig + // Babe|EpochConfig "1cb6f36e027abb2091cfb5110ab5087fdc6b171b77304263c292cc3ea5ed31ef", - //System|:__STORAGE_VERSION__: + // System|:__STORAGE_VERSION__: "26aa394eea5630e07c48ae0c9558cef74e7b9012096b41c4eb3aaf947f6ea429", - //System|UpgradedToU32RefCount + // System|UpgradedToU32RefCount "26aa394eea5630e07c48ae0c9558cef75684a022a34dd8bfa2baaf44f172b710", - //System|ParentHash + // System|ParentHash "26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc", - //System::BlockHash|0 + // System::BlockHash|0 "26aa394eea5630e07c48ae0c9558cef7a44704b568d21667356a5a050c118746bb1bdbcacd6ac9340000000000000000", - //System|UpgradedToTripleRefCount + // System|UpgradedToTripleRefCount "26aa394eea5630e07c48ae0c9558cef7a7fd6c28836b9a28522dc924110cf439", - // System|Account|blake2_128Concat("//11") "26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da901cae4e3edfbb32c91ed3f01ab964f4eeeab50338d8e5176d3141802d7b010a55dadcd5f23cf8aaafa724627e967e90e", // System|Account|blake2_128Concat("//4") @@ -1017,6 +1083,14 @@ pub mod storage_key_generator { "3a636f6465", // :extrinsic_index "3a65787472696e7369635f696e646578", + // Sassafras|__STORAGE_VERSION__: + "be5e1f844c68e483aa815e45bbd9d3184e7b9012096b41c4eb3aaf947f6ea429", + // Sassafras|Authorities + "be5e1f844c68e483aa815e45bbd9d3185e0621c4869aa60c02be9adcc98a0d1d", + // Sassafras|NextAuthorities + "be5e1f844c68e483aa815e45bbd9d318aacf00b9b41fda7a9268821c2a2b3e4c", + // Sassafras|EpochConfig + "be5e1f844c68e483aa815e45bbd9d318dc6b171b77304263c292cc3ea5ed31ef", // Balances|:__STORAGE_VERSION__: "c2261276cc9d1f8598ea4b6a74b15c2f4e7b9012096b41c4eb3aaf947f6ea429", // Balances|TotalIssuance @@ -1261,7 +1335,7 @@ mod tests { #[test] fn build_minimal_genesis_config_works() { sp_tracing::try_init_simple(); - let default_minimal_json = r#"{"system":{},"babe":{"authorities":[],"epochConfig":{"c": [ 3, 10 ],"allowed_slots":"PrimaryAndSecondaryPlainSlots"}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + let default_minimal_json = r#"{"system":{},"babe":{"authorities":[],"epochConfig":{"c": [ 3, 10 ],"allowed_slots":"PrimaryAndSecondaryPlainSlots"}},"sassafras":{"authorities":[],"epochConfig":{"redundancy_factor": 1,"attempts_number": 32}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; let mut t = BasicExternalities::new_empty(); executor_call(&mut t, "GenesisBuilder_build_state", &default_minimal_json.encode()) @@ -1302,6 +1376,15 @@ mod tests { "1cb6f36e027abb2091cfb5110ab5087f4e7b9012096b41c4eb3aaf947f6ea429", //SubstrateTest|:__STORAGE_VERSION__: "00771836bebdd29870ff246d305c578c4e7b9012096b41c4eb3aaf947f6ea429", + + // Sassafras|__STORAGE_VERSION__: + "be5e1f844c68e483aa815e45bbd9d3184e7b9012096b41c4eb3aaf947f6ea429", + // Sassafras|Authorities + "be5e1f844c68e483aa815e45bbd9d3185e0621c4869aa60c02be9adcc98a0d1d", + // Sassafras|NextAuthorities + "be5e1f844c68e483aa815e45bbd9d318aacf00b9b41fda7a9268821c2a2b3e4c", + // Sassafras|EpochConfig + "be5e1f844c68e483aa815e45bbd9d318dc6b171b77304263c292cc3ea5ed31ef", ].into_iter().map(String::from).collect::>(); expected.sort(); @@ -1319,7 +1402,8 @@ mod tests { .expect("default config is there"); let json = String::from_utf8(r.into()).expect("returned value is json. qed."); - let expected = r#"{"system":{},"babe":{"authorities":[],"epochConfig":{"c":[1,4],"allowed_slots":"PrimaryAndSecondaryVRFSlots"}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + let expected = r#"{"system":{},"babe":{"authorities":[],"epochConfig":{"c":[1,4],"allowed_slots":"PrimaryAndSecondaryVRFSlots"}},"sassafras":{"authorities":[],"epochConfig":{"redundancy_factor":0,"attempts_number":0}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + assert_eq!(expected.to_string(), json); }