From 6f5f7cecd89e59e056d8485d8a530fa272ab7547 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Wed, 4 Dec 2024 10:58:01 +0100 Subject: [PATCH 1/4] [stable2412] Backport #6734 (#6750) Backport #6734 into `stable2412` --- .github/scripts/common/lib.sh | 45 +++- .github/scripts/release/release_lib.sh | 22 ++ ...le.yml => release-10_branchoff-stable.yml} | 0 ...ation.yml => release-11_rc-automation.yml} | 0 ...e-build-rc.yml => release-20_build-rc.yml} | 96 +++++++- .../release-30_publish_release_draft.yml | 206 +++++++++++------- .../workflows/release-50_publish-docker.yml | 97 +++------ .../workflows/release-reusable-rc-buid.yml | 53 ++++- .github/workflows/release-srtool.yml | 18 +- 9 files changed, 373 insertions(+), 164 deletions(-) rename .github/workflows/{release-branchoff-stable.yml => release-10_branchoff-stable.yml} (100%) rename .github/workflows/{release-10_rc-automation.yml => release-11_rc-automation.yml} (100%) rename .github/workflows/{release-build-rc.yml => release-20_build-rc.yml} (62%) diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index 6b8f70a26d7eb..41dc0ba06dd2c 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -270,20 +270,19 @@ fetch_debian_package_from_s3() { } # Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: -# - RELEASE_ID -# - GITHUB_TOKEN -# - REPO in the form paritytech/polkadot +# inputs: binary (polkadot), target(aarch64-apple-darwin) fetch_release_artifacts_from_s3() { BINARY=$1 - OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"} + TARGET=$2 + OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${TARGET}/${BINARY}"} echo "OUTPUT_DIR : $OUTPUT_DIR" URL_BASE=$(get_s3_url_base $BINARY) echo "URL_BASE=$URL_BASE" - URL_BINARY=$URL_BASE/$VERSION/$BINARY - URL_SHA=$URL_BASE/$VERSION/$BINARY.sha256 - URL_ASC=$URL_BASE/$VERSION/$BINARY.asc + URL_BINARY=$URL_BASE/$VERSION/$TARGET/$BINARY + URL_SHA=$URL_BASE/$VERSION/$TARGET/$BINARY.sha256 + URL_ASC=$URL_BASE/$VERSION/$TARGET/$BINARY.asc # Fetch artifacts mkdir -p "$OUTPUT_DIR" @@ -306,15 +305,26 @@ fetch_release_artifacts_from_s3() { function get_s3_url_base() { name=$1 case $name in - polkadot | polkadot-execute-worker | polkadot-prepare-worker | staking-miner) + polkadot | polkadot-execute-worker | polkadot-prepare-worker ) printf "https://releases.parity.io/polkadot" ;; - polkadot-parachain) - printf "https://releases.parity.io/cumulus" + polkadot-parachain) + printf "https://releases.parity.io/polkadot-parachain" + ;; + + polkadot-omni-node) + printf "https://releases.parity.io/polkadot-omni-node" + ;; + + chain-spec-builder) + printf "https://releases.parity.io/chain-spec-builder" ;; - *) + frame-omni-bencher) + printf "https://releases.parity.io/frame-omni-bencher" + ;; + *) printf "UNSUPPORTED BINARY $name" exit 1 ;; @@ -497,3 +507,16 @@ validate_stable_tag() { exit 1 fi } + +# Prepare docker stable tag form the polkadot stable tag +# input: tag (polkaodot-stableYYMM(-X) or polkadot-stableYYMM(-X)-rcX) +# output: stableYYMM(-X) or stableYYMM(-X)-rcX +prepare_docker_stable_tag() { + tag="$1" + if [[ "$tag" =~ stable[0-9]{4}(-[0-9]+)?(-rc[0-9]+)? ]]; then + echo "${BASH_REMATCH[0]}" + else + echo "Tag is invalid: $tag" + exit 1 + fi +} diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh index 8b9254ec3f290..43227180cb7c7 100644 --- a/.github/scripts/release/release_lib.sh +++ b/.github/scripts/release/release_lib.sh @@ -139,3 +139,25 @@ upload_s3_release() { aws s3 ls "s3://releases.parity.io/${product}/${version}/${target}" --recursive --human-readable --summarize echo "✅ The release should be at https://releases.parity.io/${product}/${version}/${target}" } + +# Upload runtimes artifacts to s3 release bucket +# +# input: version (stable release tage.g. polkadot-stable2412 or polkadot-stable2412-rc1) +# output: none +upload_s3_runtimes_release_artifacts() { + alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws' + + version=$1 + + echo "Working on version: $version " + + echo "Current content, should be empty on new uploads:" + aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize || true + echo "Content to be uploaded:" + artifacts="artifacts/runtimes/" + ls "$artifacts" + aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/polkadot/runtimes/${version}/" + echo "Uploaded files:" + aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize + echo "✅ The release should be at https://releases.parity.io/polkadot/runtimes/${version}" +} diff --git a/.github/workflows/release-branchoff-stable.yml b/.github/workflows/release-10_branchoff-stable.yml similarity index 100% rename from .github/workflows/release-branchoff-stable.yml rename to .github/workflows/release-10_branchoff-stable.yml diff --git a/.github/workflows/release-10_rc-automation.yml b/.github/workflows/release-11_rc-automation.yml similarity index 100% rename from .github/workflows/release-10_rc-automation.yml rename to .github/workflows/release-11_rc-automation.yml diff --git a/.github/workflows/release-build-rc.yml b/.github/workflows/release-20_build-rc.yml similarity index 62% rename from .github/workflows/release-build-rc.yml rename to .github/workflows/release-20_build-rc.yml index a43c2b282a8d3..d4c7055c37c58 100644 --- a/.github/workflows/release-build-rc.yml +++ b/.github/workflows/release-20_build-rc.yml @@ -11,10 +11,12 @@ on: - polkadot - polkadot-parachain - polkadot-omni-node + - frame-omni-bencher + - chain-spec-builder - all release_tag: - description: Tag matching the actual release candidate with the format stableYYMM-rcX or stableYYMM + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM(-X) type: string jobs: @@ -106,6 +108,50 @@ jobs: attestations: write contents: read + build-frame-omni-bencher-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["frame-omni-bencher"]' + package: "frame-omni-bencher" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-chain-spec-builder-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["chain-spec-builder"]' + package: staging-chain-spec-builder + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + build-polkadot-macos-binary: needs: [validate-inputs] if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} @@ -134,7 +180,7 @@ jobs: uses: "./.github/workflows/release-reusable-rc-buid.yml" with: binary: '["polkadot-parachain"]' - package: "polkadot-parachain-bin" + package: polkadot-parachain-bin release_tag: ${{ needs.validate-inputs.outputs.release_tag }} target: aarch64-apple-darwin secrets: @@ -156,7 +202,51 @@ jobs: uses: "./.github/workflows/release-reusable-rc-buid.yml" with: binary: '["polkadot-omni-node"]' - package: "polkadot-omni-node" + package: polkadot-omni-node + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-frame-omni-bencher-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["frame-omni-bencher"]' + package: frame-omni-bencher + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-chain-spec-builder-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["chain-spec-builder"]' + package: staging-chain-spec-builder release_tag: ${{ needs.validate-inputs.outputs.release_tag }} target: aarch64-apple-darwin secrets: diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index d1087177a9aa8..61ce19dcfd9dc 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -1,19 +1,46 @@ name: Release - Publish draft -on: - push: - tags: - # Catches v1.2.3 and v1.2.3-rc1 - - v[0-9]+.[0-9]+.[0-9]+* - # - polkadot-stable[0-9]+* Activate when the release process from release org is setteled +# This workflow runs in paritytech-release and creates full release draft with: +# - release notes +# - info about the runtimes +# - attached artifacts: +# - runtimes +# - binaries +# - signatures +on: workflow_dispatch: inputs: - version: - description: Current release/rc version + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM(-X) + required: true + type: string jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [ check-synchronization ] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + get-rust-versions: + needs: [ validate-inputs ] runs-on: ubuntu-latest outputs: rustc-stable: ${{ steps.get-rust-versions.outputs.stable }} @@ -24,47 +51,28 @@ jobs: echo "stable=$RUST_STABLE_VERSION" >> $GITHUB_OUTPUT build-runtimes: + needs: [ validate-inputs ] uses: "./.github/workflows/release-srtool.yml" with: excluded_runtimes: "asset-hub-rococo bridge-hub-rococo contracts-rococo coretime-rococo people-rococo rococo rococo-parachain substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template polkadot-sdk-docs-first" build_opts: "--features on-chain-release-build" - - build-binaries: - runs-on: ubuntu-latest - strategy: - matrix: - # Tuples of [package, binary-name] - binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder] ] - steps: - - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 - - - name: Install protobuf-compiler - run: | - sudo apt update - sudo apt install -y protobuf-compiler - - - name: Build ${{ matrix.binary[1] }} binary - run: | - cargo build --locked --profile=production -p ${{ matrix.binary[0] }} --bin ${{ matrix.binary[1] }} - target/production/${{ matrix.binary[1] }} --version - - - name: Upload ${{ matrix.binary[1] }} binary - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 - with: - name: ${{ matrix.binary[1] }} - path: target/production/${{ matrix.binary[1] }} - + profile: production + permissions: + id-token: write + attestations: write + contents: read publish-release-draft: runs-on: ubuntu-latest - needs: [ get-rust-versions, build-runtimes ] + environment: release + needs: [ validate-inputs, get-rust-versions, build-runtimes ] outputs: release_url: ${{ steps.create-release.outputs.html_url }} asset_upload_url: ${{ steps.create-release.outputs.upload_url }} + steps: - name: Checkout - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -87,20 +95,21 @@ jobs: GLUTTON_WESTEND_DIGEST: ${{ github.workspace}}/glutton-westend-runtime/glutton-westend-srtool-digest.json PEOPLE_WESTEND_DIGEST: ${{ github.workspace}}/people-westend-runtime/people-westend-srtool-digest.json WESTEND_DIGEST: ${{ github.workspace}}/westend-runtime/westend-srtool-digest.json + RELEASE_TAG: ${{ needs.validate-inputs.outputs.release_tag }} shell: bash run: | . ./.github/scripts/common/lib.sh export REF1=$(get_latest_release_tag) - if [[ -z "${{ inputs.version }}" ]]; then + if [[ -z "$RELEASE_TAG" ]]; then export REF2="${{ github.ref_name }}" echo "REF2: ${REF2}" else - export REF2="${{ inputs.version }}" + export REF2="$RELEASE_TAG" echo "REF2: ${REF2}" fi echo "REL_TAG=$REF2" >> $GITHUB_ENV - export VERSION=$(echo "$REF2" | sed -E 's/.*(stable[0-9]+).*$/\1/') + export VERSION=$(echo "$REF2" | sed -E 's/.*(stable[0-9]{4}(-[0-9]+)?).*$/\1/') ./scripts/release/build-changelogs.sh @@ -112,19 +121,29 @@ jobs: scripts/release/context.json **/*-srtool-digest.json + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk + - name: Create draft release id: create-release - uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4 env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ env.REL_TAG }} - release_name: Polkadot ${{ env.REL_TAG }} - body_path: ${{ github.workspace}}/scripts/release/RELEASE_DRAFT.md - draft: true + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + run: | + gh release create ${{ env.REL_TAG }} \ + --repo paritytech/polkadot-sdk \ + --draft \ + --title "Polkadot ${{ env.REL_TAG }}" \ + --notes-file ${{ github.workspace}}/scripts/release/RELEASE_DRAFT.md publish-runtimes: - needs: [ build-runtimes, publish-release-draft ] + needs: [ validate-inputs, build-runtimes, publish-release-draft ] + environment: release continue-on-error: true runs-on: ubuntu-latest strategy: @@ -132,7 +151,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -144,44 +163,83 @@ jobs: >>$GITHUB_ENV echo ASSET=$(find ${{ matrix.chain }}-runtime -name '*.compact.compressed.wasm') >>$GITHUB_ENV echo SPEC=$(<${JSON} jq -r .runtimes.compact.subwasm.core_version.specVersion) + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk + - name: Upload compressed ${{ matrix.chain }} v${{ env.SPEC }} wasm - if: ${{ matrix.chain != 'rococo-parachain' }} - uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} - asset_path: ${{ env.ASSET }} - asset_name: ${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm - asset_content_type: application/wasm + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + run: | + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + '${{ env.ASSET }}#${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm' - publish-binaries: - needs: [ publish-release-draft, build-binaries ] + publish-release-artifacts: + needs: [ validate-inputs, publish-release-draft ] + environment: release continue-on-error: true runs-on: ubuntu-latest strategy: matrix: - binary: [frame-omni-bencher, chain-spec-builder] + binary: [ polkadot, polkadot-execute-worker, polkadot-prepare-worker, polkadot-parachain, polkadot-omni-node, frame-omni-bencher, chain-spec-builder ] + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] steps: - - name: Download artifacts - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Fetch binaries from s3 based on version + run: | + . ./.github/scripts/common/lib.sh + + VERSION="${{ needs.validate-inputs.outputs.release_tag }}" + fetch_release_artifacts_from_s3 ${{ matrix.binary }} ${{ matrix.target }} + + - name: Rename aarch64-apple-darwin binaries + if: ${{ matrix.target == 'aarch64-apple-darwin' }} + working-directory: ${{ github.workspace}}/release-artifacts/${{ matrix.target }}/${{ matrix.binary }} + run: | + mv ${{ matrix.binary }} ${{ matrix.binary }}-aarch64-apple-darwin + mv ${{ matrix.binary }}.asc ${{ matrix.binary }}-aarch64-apple-darwin.asc + mv ${{ matrix.binary }}.sha256 ${{ matrix.binary }}-aarch64-apple-darwin.sha256 + + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 with: - name: ${{ matrix.binary }} + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk - - name: Upload ${{ matrix.binary }} binary - uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 + - name: Upload ${{ matrix.binary }} binary to release draft env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} - asset_path: ${{ github.workspace}}/${{ matrix.binary }} - asset_name: ${{ matrix.binary }} - asset_content_type: application/octet-stream + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + working-directory: ${{ github.workspace}}/release-artifacts/${{ matrix.target }}/${{ matrix.binary }} + run: | + if [[ ${{ matrix.target }} == "aarch64-apple-darwin" ]]; then + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + ${{ matrix.binary }}-aarch64-apple-darwin \ + ${{ matrix.binary }}-aarch64-apple-darwin.asc \ + ${{ matrix.binary }}-aarch64-apple-darwin.sha256 + else + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + ${{ matrix.binary }} \ + ${{ matrix.binary }}.asc \ + ${{ matrix.binary }}.sha256 + fi post_to_matrix: runs-on: ubuntu-latest - needs: publish-release-draft + needs: [ validate-inputs, publish-release-draft ] environment: release strategy: matrix: @@ -197,5 +255,5 @@ jobs: access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} server: m.parity.io message: | - **New version of polkadot tagged**: ${{ github.ref_name }}
- Draft release created: ${{ needs.publish-release-draft.outputs.release_url }} + **New version of polkadot tagged**: ${{ needs.validate-inputs.outputs.release_tag }}
+ And release draft is release created in [polkadot-sdk repo](/~https://github.com/paritytech/polkadot-sdk/releases) diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 627e53bacd88a..5c3c3a6e854d9 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -4,10 +4,6 @@ name: Release - Publish Docker Image # It builds and published releases and rc candidates. on: - #TODO: activate automated run later - # release: - # types: - # - published workflow_dispatch: inputs: image_type: @@ -30,16 +26,6 @@ on: - polkadot-parachain - chain-spec-builder - release_id: - description: | - Release ID. - You can find it using the command: - curl -s \ - -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/$OWNER/$REPO/releases | \ - jq '.[] | { name: .name, id: .id }' - required: true - type: number - registry: description: Container registry required: true @@ -55,7 +41,7 @@ on: default: parity version: - description: version to build/release + description: Version of the polkadot node release in format v1.16.0 or v1.16.0-rc1 default: v0.9.18 required: true @@ -78,11 +64,15 @@ env: IMAGE_TYPE: ${{ inputs.image_type }} jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + validate-inputs: + needs: [check-synchronization] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' runs-on: ubuntu-latest outputs: version: ${{ steps.validate_inputs.outputs.VERSION }} - release_id: ${{ steps.validate_inputs.outputs.RELEASE_ID }} stable_tag: ${{ steps.validate_inputs.outputs.stable_tag }} steps: @@ -97,11 +87,6 @@ jobs: VERSION=$(filter_version_from_input "${{ inputs.version }}") echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT - RELEASE_ID=$(check_release_id "${{ inputs.release_id }}") - echo "RELEASE_ID=${RELEASE_ID}" >> $GITHUB_OUTPUT - - echo "Release ID: $RELEASE_ID" - STABLE_TAG=$(validate_stable_tag ${{ inputs.stable_tag }}) echo "stable_tag=${STABLE_TAG}" >> $GITHUB_OUTPUT @@ -114,50 +99,26 @@ jobs: - name: Checkout sources uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - #TODO: this step will be needed when automated triggering will work - #this step runs only if the workflow is triggered automatically when new release is published - # if: ${{ env.EVENT_NAME == 'release' && env.EVENT_ACTION != '' && env.EVENT_ACTION == 'published' }} - # run: | - # mkdir -p release-artifacts && cd release-artifacts - - # for f in $BINARY $BINARY.asc $BINARY.sha256; do - # URL="/~https://github.com/${{ github.event.repository.full_name }}/releases/download/${{ github.event.release.tag_name }}/$f" - # echo " - Fetching $f from $URL" - # wget "$URL" -O "$f" - # done - # chmod a+x $BINARY - # ls -al - - name: Fetch rc artifacts or release artifacts from s3 based on version - #this step runs only if the workflow is triggered manually - if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'polkadot-omni-node' && inputs.binary != 'chain-spec-builder'}} + # if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'polkadot-omni-node' && inputs.binary != 'chain-spec-builder'}} run: | . ./.github/scripts/common/lib.sh - VERSION="${{ needs.validate-inputs.outputs.VERSION }}" + VERSION="${{ needs.validate-inputs.outputs.stable_tag }}" if [[ ${{ inputs.binary }} == 'polkadot' ]]; then bins=(polkadot polkadot-prepare-worker polkadot-execute-worker) for bin in "${bins[@]}"; do - fetch_release_artifacts_from_s3 $bin + fetch_release_artifacts_from_s3 $bin x86_64-unknown-linux-gnu done else - fetch_release_artifacts_from_s3 $BINARY + fetch_release_artifacts_from_s3 $BINARY x86_64-unknown-linux-gnu fi - - name: Fetch polkadot-omni-node/chain-spec-builder rc artifacts or release artifacts based on release id - #this step runs only if the workflow is triggered manually and only for chain-spec-builder - if: ${{ env.EVENT_NAME == 'workflow_dispatch' && (inputs.binary == 'polkadot-omni-node' || inputs.binary == 'chain-spec-builder') }} - run: | - . ./.github/scripts/common/lib.sh - - RELEASE_ID="${{ needs.validate-inputs.outputs.RELEASE_ID }}" - fetch_release_artifacts - - name: Upload artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: release-artifacts - path: release-artifacts/${{ env.BINARY }}/**/* + path: release-artifacts/x86_64-unknown-linux-gnu/${{ env.BINARY }}/**/* build-container: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} @@ -173,7 +134,7 @@ jobs: uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - name: Check sha256 ${{ env.BINARY }} - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + # if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} working-directory: release-artifacts run: | . ../.github/scripts/common/lib.sh @@ -182,7 +143,7 @@ jobs: check_sha256 $BINARY && echo "OK" || echo "ERR" - name: Check GPG ${{ env.BINARY }} - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + # if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} working-directory: release-artifacts run: | . ../.github/scripts/common/lib.sh @@ -190,35 +151,29 @@ jobs: check_gpg $BINARY - name: Fetch rc commit and tag + working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'rc' }} id: fetch_rc_refs + shell: bash run: | - . ./.github/scripts/common/lib.sh - - echo "release=${{ needs.validate-inputs.outputs.stable_tag }}" >> $GITHUB_OUTPUT + . ../.github/scripts/common/lib.sh commit=$(git rev-parse --short HEAD) && \ echo "commit=${commit}" >> $GITHUB_OUTPUT - - echo "tag=${{ needs.validate-inputs.outputs.version }}" >> $GITHUB_OUTPUT + echo "release=$(echo ${{ needs.validate-inputs.outputs.version }})" >> $GITHUB_OUTPUT + echo "tag=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Fetch release tags working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'release'}} id: fetch_release_refs + shell: bash run: | - chmod a+rx $BINARY - - if [[ $BINARY != 'chain-spec-builder' ]]; then - VERSION=$(./$BINARY --version | awk '{ print $2 }' ) - release=$( echo $VERSION | cut -f1 -d- ) - else - release=$(echo ${{ needs.validate-inputs.outputs.VERSION }} | sed 's/^v//') - fi + . ../.github/scripts/common/lib.sh echo "tag=latest" >> $GITHUB_OUTPUT - echo "release=${release}" >> $GITHUB_OUTPUT - echo "stable=${{ needs.validate-inputs.outputs.stable_tag }}" >> $GITHUB_OUTPUT + echo "release=$(echo ${{ needs.validate-inputs.outputs.version }})" >> $GITHUB_OUTPUT + echo "stable=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Build Injected Container image for polkadot rc if: ${{ env.BINARY == 'polkadot' }} @@ -342,8 +297,10 @@ jobs: - name: Fetch values id: fetch-data run: | + . ./.github/scripts/common/lib.sh date=$(date -u '+%Y-%m-%dT%H:%M:%SZ') echo "date=$date" >> $GITHUB_OUTPUT + echo "stable=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Build and push id: docker_build @@ -354,9 +311,9 @@ jobs: # TODO: The owner should be used below but buildx does not resolve the VARs # TODO: It would be good to get rid of this GHA that we don't really need. tags: | - parity/polkadot:${{ needs.validate-inputs.outputs.stable_tag }} - parity/polkadot:latest - parity/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} + egorpop/polkadot:${{ steps.fetch-data.outputs.stable }} + egorpop/polkadot:latest + egorpop/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} build-args: | VCS_REF=${{ github.ref }} POLKADOT_VERSION=${{ needs.fetch-latest-debian-package-version.outputs.polkadot_apt_version }} diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index 7e31a4744b594..901c1302aa092 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -302,7 +302,6 @@ jobs: AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - upload-polkadot-parachain-artifacts-to-s3: if: ${{ inputs.package == 'polkadot-parachain-bin' && inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [build-rc] @@ -329,6 +328,32 @@ jobs: AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + upload-frame-omni-bencher-artifacts-to-s3: + if: ${{ inputs.package == 'frame-omni-bencher' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-chain-spec-builder-artifacts-to-s3: + if: ${{ inputs.package == 'staging-chain-spec-builder' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: chain-spec-builder + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + upload-polkadot-macos-artifacts-to-s3: if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} # TODO: add and use a `build-polkadot-homebrew-package` which packs all `polkadot` binaries: @@ -395,3 +420,29 @@ jobs: AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-frame-omni-bencher-macos-artifacts-to-s3: + if: ${{ inputs.package == 'frame-omni-bencher' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-chain-spec-builder-macos-artifacts-to-s3: + if: ${{ inputs.package == 'staging-chain-spec-builder' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: chain-spec-builder + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/release-srtool.yml b/.github/workflows/release-srtool.yml index 9a29b46d2fc32..fc10496d481b9 100644 --- a/.github/workflows/release-srtool.yml +++ b/.github/workflows/release-srtool.yml @@ -1,7 +1,7 @@ name: Srtool build env: - SUBWASM_VERSION: 0.20.0 + SUBWASM_VERSION: 0.21.0 TOML_CLI_VERSION: 0.2.4 on: @@ -11,14 +11,16 @@ on: type: string build_opts: type: string + profile: + type: string outputs: published_runtimes: value: ${{ jobs.find-runtimes.outputs.runtime }} - schedule: - - cron: "00 02 * * 1" # 2AM weekly on monday - - workflow_dispatch: +permissions: + id-token: write + attestations: write + contents: read jobs: find-runtimes: @@ -75,6 +77,7 @@ jobs: with: chain: ${{ matrix.chain }} runtime_dir: ${{ matrix.runtime_dir }} + profile: ${{ inputs.profile }} - name: Summary run: | @@ -83,6 +86,11 @@ jobs: echo "Compact Runtime: ${{ steps.srtool_build.outputs.wasm }}" echo "Compressed Runtime: ${{ steps.srtool_build.outputs.wasm_compressed }}" + - name: Generate artifact attestation + uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + with: + subject-path: ${{ steps.srtool_build.outputs.wasm }} + # We now get extra information thanks to subwasm - name: Install subwasm run: | From adf73801acce5922e07fc5b61d3c64a87f2b4399 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Wed, 4 Dec 2024 13:46:09 +0100 Subject: [PATCH 2/4] [stable2412] Backport backport #6748 (#6753) Backport #6748 into `stable2412` --- .github/scripts/common/lib.sh | 2 +- .github/scripts/release/release_lib.sh | 44 +++++- .../release-31_promote-rc-to-final.yml | 125 ++++++++++++++++++ .../release-reusable-promote-to-final.yml | 83 ++++++++++++ .../workflows/release-reusable-rc-buid.yml | 4 +- .../workflows/release-reusable-s3-upload.yml | 14 +- 6 files changed, 253 insertions(+), 19 deletions(-) create mode 100644 .github/workflows/release-31_promote-rc-to-final.yml create mode 100644 .github/workflows/release-reusable-promote-to-final.yml diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index 41dc0ba06dd2c..00f8c089831ec 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -297,7 +297,7 @@ fetch_release_artifacts_from_s3() { pwd ls -al --color popd > /dev/null - + unset OUTPUT_DIR } # Pass the name of the binary as input, it will diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh index 43227180cb7c7..984709f2ea031 100644 --- a/.github/scripts/release/release_lib.sh +++ b/.github/scripts/release/release_lib.sh @@ -129,15 +129,17 @@ upload_s3_release() { echo "Working on version: $version " echo "Working on platform: $target " + URL_BASE=$(get_s3_url_base $product) + echo "Current content, should be empty on new uploads:" - aws s3 ls "s3://releases.parity.io/${product}/${version}/${target}" --recursive --human-readable --summarize || true + aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize || true echo "Content to be uploaded:" - artifacts="artifacts/$product/" + artifacts="release-artifacts/$target/$product/" ls "$artifacts" - aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/${product}/${version}/${target}" + aws s3 sync --acl public-read "$artifacts" "s3://${URL_BASE}/${version}/${target}" echo "Uploaded files:" - aws s3 ls "s3://releases.parity.io/${product}/${version}/${target}" --recursive --human-readable --summarize - echo "✅ The release should be at https://releases.parity.io/${product}/${version}/${target}" + aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize + echo "✅ The release should be at https://${URL_BASE}/${version}/${target}" } # Upload runtimes artifacts to s3 release bucket @@ -161,3 +163,35 @@ upload_s3_runtimes_release_artifacts() { aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize echo "✅ The release should be at https://releases.parity.io/polkadot/runtimes/${version}" } + + +# Pass the name of the binary as input, it will +# return the s3 base url +function get_s3_url_base() { + name=$1 + case $name in + polkadot | polkadot-execute-worker | polkadot-prepare-worker ) + printf "releases.parity.io/polkadot" + ;; + + polkadot-parachain) + printf "releases.parity.io/polkadot-parachain" + ;; + + polkadot-omni-node) + printf "releases.parity.io/polkadot-omni-node" + ;; + + chain-spec-builder) + printf "releases.parity.io/chain-spec-builder" + ;; + + frame-omni-bencher) + printf "releases.parity.io/frame-omni-bencher" + ;; + *) + printf "UNSUPPORTED BINARY $name" + exit 1 + ;; + esac +} diff --git a/.github/workflows/release-31_promote-rc-to-final.yml b/.github/workflows/release-31_promote-rc-to-final.yml new file mode 100644 index 0000000000000..6aa9d4bddd1d5 --- /dev/null +++ b/.github/workflows/release-31_promote-rc-to-final.yml @@ -0,0 +1,125 @@ +name: Release - Promote RC to final candidate on S3 + +on: + workflow_dispatch: + inputs: + binary: + description: Binary to be build for the release + default: all + type: choice + options: + - polkadot + - polkadot-parachain + - polkadot-omni-node + - frame-omni-bencher + - chain-spec-builder + - all + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX + type: string + + +jobs: + + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [ check-synchronization ] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + final_tag: ${{ steps.validate_inputs.outputs.final_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + + promote-polkadot-rc-to-final: + if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-polkadot-parachain-rc-to-final: + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot-parachain + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-polkadot-omni-node-rc-to-final: + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot-omni-node + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-frame-omni-bencher-rc-to-final: + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: frame-omni-bencher + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-chain-spec-builder-rc-to-final: + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: chain-spec-builder + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/release-reusable-promote-to-final.yml b/.github/workflows/release-reusable-promote-to-final.yml new file mode 100644 index 0000000000000..ed4a80a01e826 --- /dev/null +++ b/.github/workflows/release-reusable-promote-to-final.yml @@ -0,0 +1,83 @@ +name: Promote rc to final + +on: + workflow_call: + inputs: + package: + description: Package to be promoted + required: true + type: string + + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX taht will be changed to final in form of polkadot-stableYYMM(-X) + required: true + type: string + + target: + description: Target triple for which the artifacts are being uploaded (e.g aarch64-apple-darwin) + required: true + type: string + + secrets: + AWS_DEFAULT_REGION: + required: true + AWS_RELEASE_ACCESS_KEY_ID: + required: true + AWS_RELEASE_SECRET_ACCESS_KEY: + required: true + +jobs: + + promote-release-artifacts: + environment: release + runs-on: ubuntu-latest + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + AWS_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Prepare final tag + id: prepare_final_tag + shell: bash + run: | + tag="$(echo ${{ inputs.release_tag }} | sed 's/-rc[0-9]*$//')" + echo $tag + echo "FINAL_TAG=${tag}" >> $GITHUB_OUTPUT + + - name: Fetch binaries from s3 based on version + run: | + . ./.github/scripts/common/lib.sh + + VERSION="${{ inputs.release_tag }}" + if [[ ${{ inputs.package }} == 'polkadot' ]]; then + packages=(polkadot polkadot-prepare-worker polkadot-execute-worker) + for package in "${packages[@]}"; do + fetch_release_artifacts_from_s3 $package ${{ inputs.target }} + done + else + fetch_release_artifacts_from_s3 ${{ inputs.package }} ${{ inputs.target }} + fi + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ env.AWS_REGION }} + + - name: Upload ${{ inputs.package }} ${{ inputs.target }} artifacts to s3 + run: | + . ./.github/scripts/release/release_lib.sh + + if [[ ${{ inputs.package }} == 'polkadot' ]]; then + packages=(polkadot polkadot-prepare-worker polkadot-execute-worker) + for package in "${packages[@]}"; do + upload_s3_release $package ${{ steps.prepare_final_tag.outputs.final_tag }} ${{ inputs.target }} + done + else + upload_s3_release ${{ inputs.package }} ${{ steps.prepare_final_tag.outputs.final_tag }} ${{ inputs.target }} + fi diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index 901c1302aa092..877f2fb8af3ab 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -133,7 +133,7 @@ jobs: - name: Upload ${{ matrix.binaries }} artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ matrix.binaries }} + name: ${{ matrix.binaries }}_${{ inputs.target }} path: /artifacts/${{ matrix.binaries }} build-macos-rc: @@ -285,7 +285,7 @@ jobs: - name: Upload ${{inputs.package }} artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ inputs.package }} + name: ${{ inputs.package }}_${{ inputs.target }} path: target/production overwrite: true diff --git a/.github/workflows/release-reusable-s3-upload.yml b/.github/workflows/release-reusable-s3-upload.yml index f85466bc8c072..48c7e53c6c8f9 100644 --- a/.github/workflows/release-reusable-s3-upload.yml +++ b/.github/workflows/release-reusable-s3-upload.yml @@ -9,7 +9,7 @@ on: type: string release_tag: - description: Tag matching the actual release candidate with the format stableYYMM-rcX or stableYYMM-rcX + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM-rcX required: true type: string @@ -40,18 +40,10 @@ jobs: uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Download amd64 artifacts - if: ${{ inputs.target == 'x86_64-unknown-linux-gnu' }} uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - name: ${{ inputs.package }} - path: artifacts/${{ inputs.package }} - - - name: Download arm artifacts - if: ${{ inputs.target == 'aarch64-apple-darwin' }} - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - with: - name: ${{ inputs.package }}_aarch64-apple-darwin - path: artifacts/${{ inputs.package }} + name: ${{ inputs.package }}_${{ inputs.target }} + path: release-artifacts/${{ inputs.target }}/${{ inputs.package }} - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 From 8ac3ba85dc8a8cb27aa5cd0e37ef5762be845bc3 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 4 Dec 2024 15:01:42 +0100 Subject: [PATCH 3/4] backport/rpc-v2: Refactor `archive_storage` method into subscription (#6483) (#6710) Backports: /~https://github.com/paritytech/polkadot-sdk/pull/6483 Builds on top of the backport: /~https://github.com/paritytech/polkadot-sdk/pull/6709 cc @paritytech/subxt-team Signed-off-by: Alexandru Vasile Co-authored-by: James Wilson --- .../client/rpc-spec-v2/src/archive/api.rs | 13 +- .../client/rpc-spec-v2/src/archive/archive.rs | 202 +++---- .../src/archive/archive_storage.rs | 105 +--- .../client/rpc-spec-v2/src/archive/mod.rs | 2 +- .../client/rpc-spec-v2/src/archive/tests.rs | 500 +++++++----------- .../rpc-spec-v2/src/chain_head/event.rs | 3 +- .../client/rpc-spec-v2/src/common/events.rs | 59 ++- .../client/rpc-spec-v2/src/common/storage.rs | 151 ++++-- substrate/client/service/src/builder.rs | 2 - 9 files changed, 458 insertions(+), 579 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/archive/api.rs b/substrate/client/rpc-spec-v2/src/archive/api.rs index dcfeaecb147bb..a205d0502c936 100644 --- a/substrate/client/rpc-spec-v2/src/archive/api.rs +++ b/substrate/client/rpc-spec-v2/src/archive/api.rs @@ -20,8 +20,7 @@ use crate::{ common::events::{ - ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageResult, - PaginatedStorageQuery, + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, }, MethodResult, }; @@ -100,13 +99,17 @@ pub trait ArchiveApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "archive_unstable_storage", blocking)] + #[subscription( + name = "archive_unstable_storage" => "archive_unstable_storageEvent", + unsubscribe = "archive_unstable_stopStorage", + item = ArchiveStorageEvent, + )] fn archive_unstable_storage( &self, hash: Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult; + ); /// Returns the storage difference between two blocks. /// diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs index 55054d91d85d1..62e44a0162419 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -20,13 +20,13 @@ use crate::{ archive::{ - archive_storage::{ArchiveStorage, ArchiveStorageDiff}, - error::Error as ArchiveError, - ArchiveApiServer, + archive_storage::ArchiveStorageDiff, error::Error as ArchiveError, ArchiveApiServer, }, - common::events::{ - ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageResult, - PaginatedStorageQuery, + common::{ + events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, + }, + storage::{QueryResult, StorageSubscriptionClient}, }, hex_string, MethodResult, SubscriptionTaskExecutor, }; @@ -57,42 +57,12 @@ use tokio::sync::mpsc; pub(crate) const LOG_TARGET: &str = "rpc-spec-v2::archive"; -/// The configuration of [`Archive`]. -pub struct ArchiveConfig { - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - pub max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - pub max_queried_items: usize, -} - -/// The maximum number of items the `archive_storage` can return for a descendant query before -/// pagination is required. -/// -/// Note: this is identical to the `chainHead` value. -const MAX_DESCENDANT_RESPONSES: usize = 5; - -/// The maximum number of queried items allowed for the `archive_storage` at a time. -/// -/// Note: A queried item can also be a descendant query which can return up to -/// `MAX_DESCENDANT_RESPONSES`. -const MAX_QUERIED_ITEMS: usize = 8; - /// The buffer capacity for each storage query. /// /// This is small because the underlying JSON-RPC server has /// its down buffer capacity per connection as well. const STORAGE_QUERY_BUF: usize = 16; -impl Default for ArchiveConfig { - fn default() -> Self { - Self { - max_descendant_responses: MAX_DESCENDANT_RESPONSES, - max_queried_items: MAX_QUERIED_ITEMS, - } - } -} - /// An API for archive RPC calls. pub struct Archive, Block: BlockT, Client> { /// Substrate client. @@ -103,11 +73,6 @@ pub struct Archive, Block: BlockT, Client> { executor: SubscriptionTaskExecutor, /// The hexadecimal encoded hash of the genesis block. genesis_hash: String, - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, /// Phantom member to pin the block type. _phantom: PhantomData, } @@ -119,18 +84,9 @@ impl, Block: BlockT, Client> Archive { backend: Arc, genesis_hash: GenesisHash, executor: SubscriptionTaskExecutor, - config: ArchiveConfig, ) -> Self { let genesis_hash = hex_string(&genesis_hash.as_ref()); - Self { - client, - backend, - executor, - genesis_hash, - storage_max_descendant_responses: config.max_descendant_responses, - storage_max_queried_items: config.max_queried_items, - _phantom: PhantomData, - } + Self { client, backend, executor, genesis_hash, _phantom: PhantomData } } } @@ -260,47 +216,53 @@ where fn archive_unstable_storage( &self, + pending: PendingSubscriptionSink, hash: Block::Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult { - let items = items - .into_iter() - .map(|query| { - let key = StorageKey(parse_hex_param(query.key)?); - let pagination_start_key = query - .pagination_start_key - .map(|key| parse_hex_param(key).map(|key| StorageKey(key))) - .transpose()?; - - // Paginated start key is only supported - if pagination_start_key.is_some() && !query.query_type.is_descendant_query() { - return Err(ArchiveError::InvalidParam( - "Pagination start key is only supported for descendants queries" - .to_string(), - )) - } + ) { + let mut storage_client = + StorageSubscriptionClient::::new(self.client.clone()); + + let fut = async move { + let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; - Ok(PaginatedStorageQuery { - key, - query_type: query.query_type, - pagination_start_key, + let items = match items + .into_iter() + .map(|query| { + let key = StorageKey(parse_hex_param(query.key)?); + Ok(StorageQuery { key, query_type: query.query_type }) }) - }) - .collect::, ArchiveError>>()?; + .collect::, ArchiveError>>() + { + Ok(items) => items, + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; - let child_trie = child_trie - .map(|child_trie| parse_hex_param(child_trie)) - .transpose()? - .map(ChildInfo::new_default_from_vec); + let child_trie = child_trie.map(|child_trie| parse_hex_param(child_trie)).transpose(); + let child_trie = match child_trie { + Ok(child_trie) => child_trie.map(ChildInfo::new_default_from_vec), + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; - let storage_client = ArchiveStorage::new( - self.client.clone(), - self.storage_max_descendant_responses, - self.storage_max_queried_items, - ); + let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); + let storage_fut = storage_client.generate_events(hash, items, child_trie, tx); - Ok(storage_client.handle_query(hash, items, child_trie)) + // We don't care about the return value of this join: + // - process_events might encounter an error (if the client disconnected) + // - storage_fut might encounter an error while processing a trie queries and + // the error is propagated via the sink. + let _ = futures::future::join(storage_fut, process_storage_events(&mut rx, &mut sink)) + .await; + }; + + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); } fn archive_unstable_storage_diff( @@ -337,24 +299,74 @@ where // - process_events might encounter an error (if the client disconnected) // - storage_fut might encounter an error while processing a trie queries and // the error is propagated via the sink. - let _ = futures::future::join(storage_fut, process_events(&mut rx, &mut sink)).await; + let _ = + futures::future::join(storage_fut, process_storage_diff_events(&mut rx, &mut sink)) + .await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); } } -/// Sends all the events to the sink. -async fn process_events(rx: &mut mpsc::Receiver, sink: &mut Subscription) { - while let Some(event) = rx.recv().await { - if event.is_done() { - log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); - } else if event.is_err() { - log::debug!(target: LOG_TARGET, "Error encountered while processing partial trie query"); +/// Sends all the events of the storage_diff method to the sink. +async fn process_storage_diff_events( + rx: &mut mpsc::Receiver, + sink: &mut Subscription, +) { + loop { + tokio::select! { + _ = sink.closed() => { + return + }, + + maybe_event = rx.recv() => { + let Some(event) = maybe_event else { + break; + }; + + if event.is_done() { + log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); + } else if event.is_err() { + log::debug!(target: LOG_TARGET, "Error encountered while processing partial trie query"); + } + + if sink.send(&event).await.is_err() { + return + } + } } + } +} + +/// Sends all the events of the storage method to the sink. +async fn process_storage_events(rx: &mut mpsc::Receiver, sink: &mut Subscription) { + loop { + tokio::select! { + _ = sink.closed() => { + break + } + + maybe_storage = rx.recv() => { + let Some(event) = maybe_storage else { + break; + }; + + match event { + Ok(None) => continue, + + Ok(Some(event)) => + if sink.send(&ArchiveStorageEvent::result(event)).await.is_err() { + return + }, - if sink.send(&event).await.is_err() { - return + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error)).await; + return + } + } + } } } + + let _ = sink.send(&ArchiveStorageEvent::StorageDone).await; } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs index 5a3920882f00e..390db765a48f6 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs @@ -33,114 +33,13 @@ use crate::{ common::{ events::{ ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, - ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageResult, - PaginatedStorageQuery, StorageQueryType, StorageResult, + ArchiveStorageDiffResult, ArchiveStorageDiffType, StorageResult, }, - storage::{IterQueryType, QueryIter, Storage}, + storage::Storage, }, }; use tokio::sync::mpsc; -/// Generates the events of the `archive_storage` method. -pub struct ArchiveStorage { - /// Storage client. - client: Storage, - /// The maximum number of responses the API can return for a descendant query at a time. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, -} - -impl ArchiveStorage { - /// Constructs a new [`ArchiveStorage`]. - pub fn new( - client: Arc, - storage_max_descendant_responses: usize, - storage_max_queried_items: usize, - ) -> Self { - Self { - client: Storage::new(client), - storage_max_descendant_responses, - storage_max_queried_items, - } - } -} - -impl ArchiveStorage -where - Block: BlockT + 'static, - BE: Backend + 'static, - Client: StorageProvider + 'static, -{ - /// Generate the response of the `archive_storage` method. - pub fn handle_query( - &self, - hash: Block::Hash, - mut items: Vec>, - child_key: Option, - ) -> ArchiveStorageResult { - let discarded_items = items.len().saturating_sub(self.storage_max_queried_items); - items.truncate(self.storage_max_queried_items); - - let mut storage_results = Vec::with_capacity(items.len()); - for item in items { - match item.query_type { - StorageQueryType::Value => { - match self.client.query_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - } - }, - StorageQueryType::Hash => - match self.client.query_hash(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::ClosestDescendantMerkleValue => - match self.client.query_merkle_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::DescendantsValues => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Value, - pagination_start_key: item.pagination_start_key, - }, - hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), - } - }, - StorageQueryType::DescendantsHashes => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Hash, - pagination_start_key: item.pagination_start_key, - }, - hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), - } - }, - }; - } - - ArchiveStorageResult::ok(storage_results, discarded_items) - } -} - /// Parse hex-encoded string parameter as raw bytes. /// /// If the parsing fails, returns an error propagated to the RPC method. diff --git a/substrate/client/rpc-spec-v2/src/archive/mod.rs b/substrate/client/rpc-spec-v2/src/archive/mod.rs index 5f020c203eab4..14fa104c113a4 100644 --- a/substrate/client/rpc-spec-v2/src/archive/mod.rs +++ b/substrate/client/rpc-spec-v2/src/archive/mod.rs @@ -32,4 +32,4 @@ pub mod archive; pub mod error; pub use api::ArchiveApiServer; -pub use archive::{Archive, ArchiveConfig}; +pub use archive::Archive; diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 994c5d28bd617..cddaafde66594 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -19,16 +19,13 @@ use crate::{ common::events::{ ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, - ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageMethodOk, - ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType, StorageResultType, + ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageEvent, StorageQuery, + StorageQueryType, StorageResult, StorageResultType, }, hex_string, MethodResult, }; -use super::{ - archive::{Archive, ArchiveConfig}, - *, -}; +use super::{archive::Archive, *}; use assert_matches::assert_matches; use codec::{Decode, Encode}; @@ -55,8 +52,6 @@ use substrate_test_runtime_client::{ const CHAIN_GENESIS: [u8; 32] = [0; 32]; const INVALID_HASH: [u8; 32] = [1; 32]; -const MAX_PAGINATION_LIMIT: usize = 5; -const MAX_QUERIED_LIMIT: usize = 5; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; const CHILD_STORAGE_KEY: &[u8] = b"child"; @@ -65,10 +60,7 @@ const CHILD_VALUE: &[u8] = b"child value"; type Header = substrate_test_runtime_client::runtime::Header; type Block = substrate_test_runtime_client::runtime::Block; -fn setup_api( - max_descendant_responses: usize, - max_queried_items: usize, -) -> (Arc>, RpcModule>>) { +fn setup_api() -> (Arc>, RpcModule>>) { let child_info = ChildInfo::new_default(CHILD_STORAGE_KEY); let builder = TestClientBuilder::new().add_extra_child_storage( &child_info, @@ -83,7 +75,6 @@ fn setup_api( backend, CHAIN_GENESIS, Arc::new(TokioTestExecutor::default()), - ArchiveConfig { max_descendant_responses, max_queried_items }, ) .into_rpc(); @@ -101,7 +92,7 @@ async fn get_next_event(sub: &mut RpcSubscriptio #[tokio::test] async fn archive_genesis() { - let (_client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (_client, api) = setup_api(); let genesis: String = api.call("archive_unstable_genesisHash", EmptyParams::new()).await.unwrap(); @@ -110,7 +101,7 @@ async fn archive_genesis() { #[tokio::test] async fn archive_body() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -144,7 +135,7 @@ async fn archive_body() { #[tokio::test] async fn archive_header() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -178,7 +169,7 @@ async fn archive_header() { #[tokio::test] async fn archive_finalized_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let client_height: u32 = client.info().finalized_number.saturated_into(); @@ -190,7 +181,7 @@ async fn archive_finalized_height() { #[tokio::test] async fn archive_hash_by_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Genesis height. let hashes: Vec = api.call("archive_unstable_hashByHeight", [0]).await.unwrap(); @@ -296,7 +287,7 @@ async fn archive_hash_by_height() { #[tokio::test] async fn archive_call() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let invalid_hash = hex_string(&INVALID_HASH); // Invalid parameter (non-hex). @@ -355,7 +346,7 @@ async fn archive_call() { #[tokio::test] async fn archive_storage_hashes_values() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -369,42 +360,23 @@ async fn archive_storage_hashes_values() { let block_hash = format!("{:?}", block.header.hash()); let key = hex_string(&KEY); - let items: Vec> = vec![ - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsHashes, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }, ]; - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - // Key has not been imported yet. - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; + // Key has not been imported yet. + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); // Import a block with the given key value pair. let mut builder = BlockBuilderBuilder::new(&*client) @@ -420,32 +392,103 @@ async fn archive_storage_hashes_values() { let expected_hash = format!("{:?}", Blake2Hasher::hash(&VALUE)); let expected_value = hex_string(&VALUE); - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 4); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, key); - assert_eq!(result[0].result, StorageResultType::Hash(expected_hash.clone())); - assert_eq!(result[1].key, key); - assert_eq!(result[1].result, StorageResultType::Value(expected_value.clone())); - assert_eq!(result[2].key, key); - assert_eq!(result[2].result, StorageResultType::Hash(expected_hash)); - assert_eq!(result[3].key, key); - assert_eq!(result[3].result, StorageResultType::Value(expected_value)); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value), + child_trie_key: None, + }), + ); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); +} + +#[tokio::test] +async fn archive_storage_hashes_values_child_trie() { + let (client, api) = setup_api(); + + // Get child storage values set in `setup_api`. + let child_info = hex_string(&CHILD_STORAGE_KEY); + let key = hex_string(&KEY); + let genesis_hash = format!("{:?}", client.genesis_hash()); + let expected_hash = format!("{:?}", Blake2Hasher::hash(&CHILD_VALUE)); + let expected_value = hex_string(&CHILD_VALUE); + + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + ]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storage", + rpc_params![&genesis_hash, items, &child_info], + ) + .await + .unwrap(); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); } #[tokio::test] async fn archive_storage_closest_merkle_value() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); /// The core of this test. /// @@ -457,55 +500,47 @@ async fn archive_storage_closest_merkle_value() { api: &RpcModule>>, block_hash: String, ) -> HashMap { - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, vec![ - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAB"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key with descendant. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Keys below this comment do not produce a result. // Key that exceed the keyspace of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAABX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key that are not part of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, ] ], @@ -513,19 +548,21 @@ async fn archive_storage_closest_merkle_value() { .await .unwrap(); - let merkle_values: HashMap<_, _> = match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, .. }) => result - .into_iter() - .map(|res| { - let value = match res.result { + let mut merkle_values = HashMap::new(); + loop { + let event = get_next_event::(&mut sub).await; + match event { + ArchiveStorageEvent::Storage(result) => { + let str_result = match result.result { StorageResultType::ClosestDescendantMerkleValue(value) => value, - _ => panic!("Unexpected StorageResultType"), + _ => panic!("Unexpected result type"), }; - (res.key, value) - }) - .collect(), - _ => panic!("Unexpected result"), - }; + merkle_values.insert(result.key, str_result); + }, + ArchiveStorageEvent::StorageError(err) => panic!("Unexpected error {err:?}"), + ArchiveStorageEvent::StorageDone => break, + } + } // Response for AAAA, AAAB, A and AA. assert_eq!(merkle_values.len(), 4); @@ -604,9 +641,9 @@ async fn archive_storage_closest_merkle_value() { } #[tokio::test] -async fn archive_storage_paginate_iterations() { +async fn archive_storage_iterations() { // 1 iteration allowed before pagination kicks in. - let (client, api) = setup_api(1, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Import a new block with storage changes. let mut builder = BlockBuilderBuilder::new(&*client) @@ -625,237 +662,94 @@ async fn archive_storage_paginate_iterations() { // Calling with an invalid hash. let invalid_hash = hex_string(&INVALID_HASH); - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &invalid_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Err(_) => (), - _ => panic!("Unexpected result"), - }; - - // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; - - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":m")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":mo")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"ab"))); - }, - _ => panic!("Unexpected result"), - }; - - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mo")), }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":moD")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcmoD"))); - }, - _ => panic!("Unexpected result"), - }; - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moD")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":moc")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abc"))); - }, - _ => panic!("Unexpected result"), - }; + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageError(_) + ); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( + // Valid call with storage at the key. + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moc")), }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - assert_eq!(result[0].key, hex_string(b":mock")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcd"))); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":m"), + result: StorageResultType::Value(hex_string(b"a")), + child_trie_key: None, + }) + ); - // Continue with pagination until no keys are returned. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mock")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; -} + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mo"), + result: StorageResultType::Value(hex_string(b"ab")), + child_trie_key: None, + }) + ); -#[tokio::test] -async fn archive_storage_discarded_items() { - // One query at a time - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, 1); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moD"), + result: StorageResultType::Value(hex_string(b"abcmoD")), + child_trie_key: None, + }) + ); - // Import a new block with storage changes. - let mut builder = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap(); - builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap(); - let block = builder.build().unwrap().block; - let block_hash = format!("{:?}", block.header.hash()); - client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moc"), + result: StorageResultType::Value(hex_string(b"abc")), + child_trie_key: None, + }) + ); - // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![ - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - } - ] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 2); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mock"), + result: StorageResultType::Value(hex_string(b"abcd")), + child_trie_key: None, + }) + ); - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); } #[tokio::test] async fn archive_storage_diff_main_trie() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let mut builder = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -965,7 +859,7 @@ async fn archive_storage_diff_main_trie() { #[tokio::test] async fn archive_storage_diff_no_changes() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Build 2 identical blocks. let mut builder = BlockBuilderBuilder::new(&*client) @@ -1012,7 +906,7 @@ async fn archive_storage_diff_no_changes() { #[tokio::test] async fn archive_storage_diff_deleted_changes() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Blocks are imported as forks. let mut builder = BlockBuilderBuilder::new(&*client) @@ -1079,7 +973,7 @@ async fn archive_storage_diff_deleted_changes() { #[tokio::test] async fn archive_storage_diff_invalid_params() { let invalid_hash = hex_string(&INVALID_HASH); - let (_, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (_, api) = setup_api(); // Invalid shape for parameters. let items: Vec> = Vec::new(); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs index bd98630609105..de74145a3f088 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs @@ -235,7 +235,7 @@ pub struct OperationCallDone { pub output: String, } -/// The response of the `chainHead_call` method. +/// The response of the `chainHead_storage` method. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct OperationStorageItems { @@ -536,6 +536,7 @@ mod tests { items: vec![StorageResult { key: "0x1".into(), result: StorageResultType::Value("0x123".to_string()), + child_trie_key: None, }], }); diff --git a/substrate/client/rpc-spec-v2/src/common/events.rs b/substrate/client/rpc-spec-v2/src/common/events.rs index 198a60bf4cac5..44f722c0c61ba 100644 --- a/substrate/client/rpc-spec-v2/src/common/events.rs +++ b/substrate/client/rpc-spec-v2/src/common/events.rs @@ -78,6 +78,10 @@ pub struct StorageResult { /// The result of the query. #[serde(flatten)] pub result: StorageResultType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, } /// The type of the storage query. @@ -105,23 +109,41 @@ pub struct StorageResultErr { /// The result of a storage call. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(untagged)] -pub enum ArchiveStorageResult { +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +pub enum ArchiveStorageEvent { /// Query generated a result. - Ok(ArchiveStorageMethodOk), + Storage(StorageResult), /// Query encountered an error. - Err(ArchiveStorageMethodErr), + StorageError(ArchiveStorageMethodErr), + /// Operation storage is done. + StorageDone, } -impl ArchiveStorageResult { - /// Create a new `ArchiveStorageResult::Ok` result. - pub fn ok(result: Vec, discarded_items: usize) -> Self { - Self::Ok(ArchiveStorageMethodOk { result, discarded_items }) +impl ArchiveStorageEvent { + /// Create a new `ArchiveStorageEvent::StorageErr` event. + pub fn err(error: String) -> Self { + Self::StorageError(ArchiveStorageMethodErr { error }) } - /// Create a new `ArchiveStorageResult::Err` result. - pub fn err(error: String) -> Self { - Self::Err(ArchiveStorageMethodErr { error }) + /// Create a new `ArchiveStorageEvent::StorageResult` event. + pub fn result(result: StorageResult) -> Self { + Self::Storage(result) + } + + /// Checks if the event is a `StorageDone` event. + pub fn is_done(&self) -> bool { + matches!(self, Self::StorageDone) + } + + /// Checks if the event is a `StorageErr` event. + pub fn is_err(&self) -> bool { + matches!(self, Self::StorageError(_)) + } + + /// Checks if the event is a `StorageResult` event. + pub fn is_result(&self) -> bool { + matches!(self, Self::Storage(_)) } } @@ -354,8 +376,11 @@ mod tests { #[test] fn storage_result() { // Item with Value. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Value("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Value("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","value":"res"}"#; @@ -365,8 +390,11 @@ mod tests { assert_eq!(dec, item); // Item with Hash. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Hash("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","hash":"res"}"#; @@ -379,6 +407,7 @@ mod tests { let item = StorageResult { key: "0x1".into(), result: StorageResultType::ClosestDescendantMerkleValue("res".into()), + child_trie_key: None, }; // Encode let ser = serde_json::to_string(&item).unwrap(); diff --git a/substrate/client/rpc-spec-v2/src/common/storage.rs b/substrate/client/rpc-spec-v2/src/common/storage.rs index 673e20b2bc780..a1e34d51530ec 100644 --- a/substrate/client/rpc-spec-v2/src/common/storage.rs +++ b/substrate/client/rpc-spec-v2/src/common/storage.rs @@ -24,7 +24,7 @@ use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sp_runtime::traits::Block as BlockT; use tokio::sync::mpsc; -use super::events::{StorageResult, StorageResultType}; +use super::events::{StorageQuery, StorageQueryType, StorageResult, StorageResultType}; use crate::hex_string; /// Call into the storage of blocks. @@ -70,9 +70,6 @@ pub enum IterQueryType { /// The result of making a query call. pub type QueryResult = Result, String>; -/// The result of iterating over keys. -pub type QueryIterResult = Result<(Vec, Option), String>; - impl Storage where Block: BlockT + 'static, @@ -97,6 +94,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Value(hex_string(&storage_data.0)), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -120,6 +118,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Hash(hex_string(&storage_data.as_ref())), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -149,6 +148,7 @@ where StorageResult { key: hex_string(&key.0), result: StorageResultType::ClosestDescendantMerkleValue(result), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), } })) }) @@ -199,56 +199,6 @@ where } } - /// Iterate over at most the provided number of keys. - /// - /// Returns the storage result with a potential next key to resume iteration. - pub fn query_iter_pagination( - &self, - query: QueryIter, - hash: Block::Hash, - child_key: Option<&ChildInfo>, - count: usize, - ) -> QueryIterResult { - let QueryIter { ty, query_key, pagination_start_key } = query; - - let mut keys_iter = if let Some(child_key) = child_key { - self.client.child_storage_keys( - hash, - child_key.to_owned(), - Some(&query_key), - pagination_start_key.as_ref(), - ) - } else { - self.client.storage_keys(hash, Some(&query_key), pagination_start_key.as_ref()) - } - .map_err(|err| err.to_string())?; - - let mut ret = Vec::with_capacity(count); - let mut next_pagination_key = None; - for _ in 0..count { - let Some(key) = keys_iter.next() else { break }; - - next_pagination_key = Some(key.clone()); - - let result = match ty { - IterQueryType::Value => self.query_value(hash, &key, child_key), - IterQueryType::Hash => self.query_hash(hash, &key, child_key), - }?; - - if let Some(value) = result { - ret.push(value); - } - } - - // Save the next key if any to continue the iteration. - let maybe_next_query = keys_iter.next().map(|_| QueryIter { - ty, - query_key, - pagination_start_key: next_pagination_key, - }); - Ok((ret, maybe_next_query)) - } - /// Raw iterator over the keys. pub fn raw_keys_iter( &self, @@ -264,3 +214,96 @@ where keys_iter.map_err(|err| err.to_string()) } } + +/// Generates storage events for `chainHead_storage` and `archive_storage` subscriptions. +pub struct StorageSubscriptionClient { + /// Storage client. + client: Storage, + _phandom: PhantomData<(BE, Block)>, +} + +impl Clone for StorageSubscriptionClient { + fn clone(&self) -> Self { + Self { client: self.client.clone(), _phandom: PhantomData } + } +} + +impl StorageSubscriptionClient { + /// Constructs a new [`StorageSubscriptionClient`]. + pub fn new(client: Arc) -> Self { + Self { client: Storage::new(client), _phandom: PhantomData } + } +} + +impl StorageSubscriptionClient +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: StorageProvider + Send + Sync + 'static, +{ + /// Generate storage events to the provided sender. + pub async fn generate_events( + &mut self, + hash: Block::Hash, + items: Vec>, + child_key: Option, + tx: mpsc::Sender, + ) -> Result<(), tokio::task::JoinError> { + let this = self.clone(); + + tokio::task::spawn_blocking(move || { + for item in items { + match item.query_type { + StorageQueryType::Value => { + let rp = this.client.query_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::Hash => { + let rp = this.client.query_hash(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::ClosestDescendantMerkleValue => { + let rp = + this.client.query_merkle_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::DescendantsValues => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Value, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + StorageQueryType::DescendantsHashes => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Hash, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + } + } + }) + .await?; + + Ok(()) + } +} diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 2ea1b2fabdafc..c3b2084a7c9c2 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -732,8 +732,6 @@ where backend.clone(), genesis_hash, task_executor.clone(), - // Defaults to sensible limits for the `Archive`. - sc_rpc_spec_v2::archive::ArchiveConfig::default(), ) .into_rpc(); rpc_api.merge(archive_v2).map_err(|e| Error::Application(e.into()))?; From 9163777aa625da7d34b37219b1015922ec345c02 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 4 Dec 2024 17:51:48 +0100 Subject: [PATCH 4/4] backport/2412: Update litep2p to v0.8.3 (#6742) (#6756) Backport for: /~https://github.com/paritytech/polkadot-sdk/pull/6742 cc @paritytech/sdk-node Signed-off-by: Alexandru Vasile --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- prdoc/pr_6742.prdoc | 11 +++++++++++ substrate/client/network/src/litep2p/mod.rs | 2 +- 4 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_6742.prdoc diff --git a/Cargo.lock b/Cargo.lock index aedd6c622e6d0..f92d703a8f075 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9414,9 +9414,9 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.8.2" +version = "0.8.3" source = "registry+/~https://github.com/rust-lang/crates.io-index" -checksum = "569e7dbec8a0d4b08d30f4942cd579cfe8db5d3f83f8604abe61697c38d17e73" +checksum = "14e490b5a6d486711fd0284bd30e607a287343f2935a59a9192bd7109e85f443" dependencies = [ "async-trait", "bs58", diff --git a/Cargo.toml b/Cargo.toml index 8f69d59a0971e..d6b5a2b8389e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -848,7 +848,7 @@ linked-hash-map = { version = "0.5.4" } linked_hash_set = { version = "0.1.4" } linregress = { version = "0.5.1" } lite-json = { version = "0.2.0", default-features = false } -litep2p = { version = "0.8.2", features = ["websocket"] } +litep2p = { version = "0.8.3", features = ["websocket"] } log = { version = "0.4.22", default-features = false } macro_magic = { version = "0.5.1" } maplit = { version = "1.0.2" } diff --git a/prdoc/pr_6742.prdoc b/prdoc/pr_6742.prdoc new file mode 100644 index 0000000000000..92c3755a3c28a --- /dev/null +++ b/prdoc/pr_6742.prdoc @@ -0,0 +1,11 @@ +title: Update litep2p backend to v0.8.3 +doc: +- audience: Node Dev + description: |- + This release includes two fixes for small memory leaks on edge-cases in the notification and request-response protocols. + While at it, have downgraded a log message from litep2p. + +crates: +- name: sc-network + bump: patch + diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index 0dc6b962cff1e..8eff3a640b8a0 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -753,7 +753,7 @@ impl NetworkBackend for Litep2pNetworkBac } if self.litep2p.add_known_address(peer.into(), iter::once(address.clone())) == 0usize { - log::warn!( + log::debug!( target: LOG_TARGET, "couldn't add known address ({address}) for {peer:?}, unsupported transport" );