diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 000000000000..4796a2c26965 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,33 @@ +# +# An auto defined `clippy` feature was introduced, +# but it was found to clash with user defined features, +# so was renamed to `cargo-clippy`. +# +# If you want standard clippy run: +# RUSTFLAGS= cargo clippy +[target.'cfg(feature = "cargo-clippy")'] +rustflags = [ + "-Aclippy::all", + "-Dclippy::correctness", + "-Aclippy::if-same-then-else", + "-Aclippy::clone-double-ref", + "-Dclippy::complexity", + "-Aclippy::zero-prefixed-literal", # 00_1000_000 + "-Aclippy::type_complexity", # raison d'etre + "-Aclippy::nonminimal-bool", # maybe + "-Aclippy::borrowed-box", # Reasonable to fix this one + "-Aclippy::too-many-arguments", # (Turning this on would lead to) + "-Aclippy::unnecessary_cast", # Types may change + "-Aclippy::identity-op", # One case where we do 0 + + "-Aclippy::useless_conversion", # Types may change + "-Aclippy::unit_arg", # styalistic. + "-Aclippy::option-map-unit-fn", # styalistic + "-Aclippy::bind_instead_of_map", # styalistic + "-Aclippy::erasing_op", # E.g. 0 * DOLLARS + "-Aclippy::eq_op", # In tests we test equality. + "-Aclippy::while_immutable_condition", # false positives + "-Aclippy::needless_option_as_deref", # false positives + "-Aclippy::derivable_impls", # false positives + "-Aclippy::stable_sort_primitive", # prefer stable sort + "-Aclippy::extra-unused-type-parameters", # stylistic +] diff --git a/.config/nextest.toml b/.config/nextest.toml new file mode 100644 index 000000000000..1e18f8b5589c --- /dev/null +++ b/.config/nextest.toml @@ -0,0 +1,126 @@ +# This is the default config used by nextest. It is embedded in the binary at +# build time. It may be used as a template for .config/nextest.toml. + +[store] +# The directory under the workspace root at which nextest-related files are +# written. Profile-specific storage is currently written to dir/. +dir = "target/nextest" + +# This section defines the default nextest profile. Custom profiles are layered +# on top of the default profile. +[profile.default] +# "retries" defines the number of times a test should be retried. If set to a +# non-zero value, tests that succeed on a subsequent attempt will be marked as +# non-flaky. Can be overridden through the `--retries` option. +# Examples +# * retries = 3 +# * retries = { backoff = "fixed", count = 2, delay = "1s" } +# * retries = { backoff = "exponential", count = 10, delay = "1s", jitter = true, max-delay = "10s" } +retries = 5 + +# The number of threads to run tests with. Supported values are either an integer or +# the string "num-cpus". Can be overridden through the `--test-threads` option. +# test-threads = "num-cpus" + +test-threads = 20 + +# The number of threads required for each test. This is generally used in overrides to +# mark certain tests as heavier than others. However, it can also be set as a global parameter. +threads-required = 1 + +# Show these test statuses in the output. +# +# The possible values this can take are: +# * none: no output +# * fail: show failed (including exec-failed) tests +# * retry: show flaky and retried tests +# * slow: show slow tests +# * pass: show passed tests +# * skip: show skipped tests (most useful for CI) +# * all: all of the above +# +# Each value includes all the values above it; for example, "slow" includes +# failed and retried tests. +# +# Can be overridden through the `--status-level` flag. +status-level = "pass" + +# Similar to status-level, show these test statuses at the end of the run. +final-status-level = "flaky" + +# "failure-output" defines when standard output and standard error for failing tests are produced. +# Accepted values are +# * "immediate": output failures as soon as they happen +# * "final": output failures at the end of the test run +# * "immediate-final": output failures as soon as they happen and at the end of +# the test run; combination of "immediate" and "final" +# * "never": don't output failures at all +# +# For large test suites and CI it is generally useful to use "immediate-final". +# +# Can be overridden through the `--failure-output` option. +failure-output = "immediate" + +# "success-output" controls production of standard output and standard error on success. This should +# generally be set to "never". +success-output = "never" + +# Cancel the test run on the first failure. For CI runs, consider setting this +# to false. +fail-fast = true + +# Treat a test that takes longer than the configured 'period' as slow, and print a message. +# See for more information. +# +# Optional: specify the parameter 'terminate-after' with a non-zero integer, +# which will cause slow tests to be terminated after the specified number of +# periods have passed. +# Example: slow-timeout = { period = "60s", terminate-after = 2 } +slow-timeout = { period = "60s" } + +# Treat a test as leaky if after the process is shut down, standard output and standard error +# aren't closed within this duration. +# +# This usually happens in case of a test that creates a child process and lets it inherit those +# handles, but doesn't clean the child process up (especially when it fails). +# +# See for more information. +leak-timeout = "100ms" + +[profile.default.junit] +# Output a JUnit report into the given file inside 'store.dir/'. +# If unspecified, JUnit is not written out. + +path = "junit.xml" + +# The name of the top-level "report" element in JUnit report. If aggregating +# reports across different test runs, it may be useful to provide separate names +# for each report. +report-name = "substrate" + +# Whether standard output and standard error for passing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +store-success-output = false + +# Whether standard output and standard error for failing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +# +# Note that if a description can be extracted from the output, it is always stored in the +# element. +store-failure-output = true + +# This profile is activated if MIRI_SYSROOT is set. +[profile.default-miri] +# Miri tests take up a lot of memory, so only run 1 test at a time by default. +test-threads = 1 + +# Mutual exclusion of tests with `cargo build` invocation as a lock to avoid multiple +# simultaneous invocations clobbering each other. +[test-groups] +serial-integration = { max-threads = 1 } + +# Running UI tests sequentially +# More info can be found here: https://github.com/paritytech/ci_cd/issues/754 +[[profile.default.overrides]] +filter = 'test(/(^ui$|_ui|ui_)/)' +test-group = 'serial-integration' diff --git a/.github/pr-custom-review.yml b/.github/pr-custom-review.yml new file mode 100644 index 000000000000..fc1836866208 --- /dev/null +++ b/.github/pr-custom-review.yml @@ -0,0 +1,79 @@ +# 🔒 PROTECTED: Changes to locks-review-team should be approved by the current locks-review-team +locks-review-team: locks-review +team-leads-team: polkadot-review +action-review-team: ci + +rules: + - name: CI files + check_type: changed_files + condition: + include: ^\.gitlab-ci\.yml|^docker/.*|^\.github/.*|^\.gitlab/.*|^\.config/nextest.toml|^\.cargo/.* + exclude: ^./gitlab/pipeline/zombienet.yml$ + min_approvals: 2 + teams: + - ci + - release-engineering + + - name: Audit rules + check_type: changed_files + condition: + include: ^polkadot/runtime\/(kusama|polkadot|common)\/.*|^polkadot/primitives/src\/.+\.rs$|^substrate/primitives/.*|^substrate/frame/.* + exclude: ^polkadot/runtime\/(kusama|polkadot)\/src\/weights\/.+\.rs$|^substrate\/frame\/.+\.md$ + all_distinct: + - min_approvals: 1 + teams: + - locks-review + - min_approvals: 1 + teams: + - polkadot-review + - min_approvals: 2 + teams: + - srlabs + + - name: Core developers + check_type: changed_files + condition: + include: .* + # excluding files from 'Runtime files' and 'CI files' rules + exclude: ^polkadot/runtime/(kusama|polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/assets/(asset-hub-kusama|asset-hub-polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/bridge-hubs/(bridge-hub-kusama|bridge-hub-polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/collectives/collectives-polkadot/src/[^/]+\.rs$|^cumulus/parachains/common/src/[^/]+\.rs$|^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*))|^polkadot/runtime/(kusama|polkadot)/src/[^/]+\.rs$|^\.gitlab-ci\.yml|^(?!.*\.dic$|.*spellcheck\.toml$)scripts/ci/.*|^\.github/.* + min_approvals: 2 + teams: + - core-devs + + # cumulus + - name: Runtime files cumulus + check_type: changed_files + condition: ^cumulus/parachains/runtimes/assets/(asset-hub-kusama|asset-hub-polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/bridge-hubs/(bridge-hub-kusama|bridge-hub-polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/collectives/collectives-polkadot/src/[^/]+\.rs$|^cumulus/parachains/common/src/[^/]+\.rs$ + all_distinct: + - min_approvals: 1 + teams: + - locks-review + - min_approvals: 1 + teams: + - polkadot-review + + # if there are any changes in the bridges subtree (in case of backport changes back to bridges repo) + - name: Bridges subtree files + check_type: changed_files + condition: ^cumulus/bridges/.* + min_approvals: 1 + teams: + - bridges-core + + # substrate + + - name: FRAME coders substrate + check_type: changed_files + condition: + include: ^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*)) + all: + - min_approvals: 2 + teams: + - core-devs + - min_approvals: 1 + teams: + - frame-coders + +prevent-review-request: + teams: + - core-devs diff --git a/.github/workflows/check-D-labels.yml b/.github/workflows/check-D-labels.yml new file mode 100644 index 000000000000..94a818fcc2a3 --- /dev/null +++ b/.github/workflows/check-D-labels.yml @@ -0,0 +1,54 @@ +# name: Check D labels +# disabled in favor of pr-custom-review + +# on: +# pull_request: +# types: [labeled, opened, synchronize, unlabeled] +# paths: +# - cumulus/primitives/** +# - polkadot/runtime/polkadot/** +# - polkadot/runtime/kusama/** +# - polkadot/runtime/common/** +# - polkadot/primitives/src/** +# - substrate/frame/** +# - substrate/primitives/** + +# jobs: +# check-labels: +# runs-on: ubuntu-latest +# steps: +# - name: Pull image +# env: +# IMAGE: paritytech/ruled_labels:0.4.0 +# run: docker pull $IMAGE + +# - name: Check labels +# env: +# IMAGE: paritytech/ruled_labels:0.4.0 +# MOUNT: /work +# GITHUB_PR: ${{ github.event.pull_request.number }} +# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} +# API_BASE: https://api.github.com/repos +# REPO: ${{ github.repository }} +# RULES_PATH: labels/ruled_labels +# CHECK_SPECS: specs_cumulus.yaml +# run: | +# echo "REPO: ${REPO}" +# echo "GITHUB_PR: ${GITHUB_PR}" +# # Clone repo with labels specs +# git clone https://github.com/paritytech/labels +# # Fetch the labels for the PR under test +# labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",") + +# if [ -z "${labels}" ]; then +# docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --tags audit --no-label +# fi + +# labels_args=${labels: :-1} +# printf "Checking labels: %s\n" "${labels_args}" + +# # Prevent the shell from splitting labels with spaces +# IFS="," + +# # --dev is more useful to debug mode to debug +# docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --labels ${labels_args} --dev --tags audit diff --git a/.github/workflows/check-labels.yml b/.github/workflows/check-labels.yml new file mode 100644 index 000000000000..ea16bb9f3e4a --- /dev/null +++ b/.github/workflows/check-labels.yml @@ -0,0 +1,57 @@ +name: Check labels + +on: + pull_request: + types: [labeled, opened, synchronize, unlabeled] + +jobs: + check-labels: + runs-on: ubuntu-latest + steps: + - name: Pull image + env: + IMAGE: paritytech/ruled_labels:0.4.0 + run: docker pull $IMAGE + + - name: Check labels + env: + IMAGE: paritytech/ruled_labels:0.4.0 + MOUNT: /work + GITHUB_PR: ${{ github.event.pull_request.number }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + API_BASE: https://api.github.com/repos + REPO: ${{ github.repository }} + RULES_PATH: labels/ruled_labels + CHECK_SPECS: "[DRAFT]specs_monorepo.yaml" + run: | + echo "REPO: ${REPO}" + echo "GITHUB_PR: ${GITHUB_PR}" + + # Clone repo with labels specs + echo "Cloning repo with labels specs" + + # Temporary, before https://github.com/paritytech/labels/pull/29 is not merged + git clone https://github.com/paritytech/labels + cd labels + git fetch origin the-right-joyce-monorepo-labels + git checkout the-right-joyce-monorepo-labels + cd .. + + # Fetch the labels for the PR under test + echo "Fetch the labels for $API_BASE/${REPO}/pulls/${GITHUB_PR}" + labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",") + echo "Labels: ${labels}" + + if [ -z "${labels}" ]; then + echo "No labels found, checking without them" + docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --no-label + fi + + labels_args=${labels: :-1} + printf "Checking labels: %s\n" "${labels_args}" + + # Prevent the shell from splitting labels with spaces + IFS="," + + # --dev is more useful to debug mode to debug + docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --labels ${labels_args} --dev --tags PR diff --git a/.github/workflows/fmt-check.yml b/.github/workflows/fmt-check.yml new file mode 100644 index 000000000000..fd4b72061b92 --- /dev/null +++ b/.github/workflows/fmt-check.yml @@ -0,0 +1,22 @@ +name: Rustfmt check + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + +jobs: + quick_check: + strategy: + matrix: + os: ["ubuntu-latest"] + runs-on: ${{ matrix.os }} + container: + image: paritytech/ci-unified:bullseye-1.70.0-2023-05-23-v20230706 + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + + - name: Cargo fmt + run: cargo +nightly fmt --all -- --check diff --git a/.github/workflows/pr-custom-review.yml b/.github/workflows/pr-custom-review.yml new file mode 100644 index 000000000000..345f1f859d7c --- /dev/null +++ b/.github/workflows/pr-custom-review.yml @@ -0,0 +1,42 @@ +name: Assign reviewers + +on: + pull_request: + branches: + - master + - main + types: + - opened + - reopened + - synchronize + - review_requested + - review_request_removed + - ready_for_review + - converted_to_draft + pull_request_review: + +jobs: + pr-custom-review: + runs-on: ubuntu-latest + steps: + - name: Skip if pull request is in Draft + # `if: github.event.pull_request.draft == true` should be kept here, at + # the step level, rather than at the job level. The latter is not + # recommended because when the PR is moved from "Draft" to "Ready to + # review" the workflow will immediately be passing (since it was skipped), + # even though it hasn't actually ran, since it takes a few seconds for + # the workflow to start. This is also disclosed in: + # https://github.community/t/dont-run-actions-on-draft-pull-requests/16817/17 + # That scenario would open an opportunity for the check to be bypassed: + # 1. Get your PR approved + # 2. Move it to Draft + # 3. Push whatever commits you want + # 4. Move it to "Ready for review"; now the workflow is passing (it was + # skipped) and "Check reviews" is also passing (it won't be updated + # until the workflow is finished) + if: github.event.pull_request.draft == true + run: exit 1 + - name: pr-custom-review + uses: paritytech/pr-custom-review@master + with: + checks-reviews-api: http://pcr.parity-stg.parity.io/api/v1/check_reviews diff --git a/.gitignore b/.gitignore index e69de29bb2d1..b71c270d7368 100644 --- a/.gitignore +++ b/.gitignore @@ -0,0 +1,13 @@ +target/ +**/target/ +.idea +.vscode +.DS_Store +/.cargo/config +polkadot_argument_parsing +**/node_modules +**/chains/ +*.iml +.env +**/._* + diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 000000000000..13a11b9bb4f3 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,238 @@ +# .gitlab-ci.yml +# +# substrate +# +# pipelines can be triggered manually in the web +# +# Currently the file is divided into subfiles. Each stage has a different file which +# can be found here: gitlab/pipeline/.yml +# +# Instead of YAML anchors "extends" is used. +# Useful links: +# https://docs.gitlab.com/ee/ci/yaml/index.html#extends +# https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#reference-tags +# +# SAMPLE JOB TEMPLATE - This is not a complete example but is enough to build a +# simple CI job. For full documentation, visit https://docs.gitlab.com/ee/ci/yaml/ +# +# my-example-job: +# stage: test # One of the stages listed below this job (required) +# image: paritytech/tools:latest # Any docker image (required) +# allow_failure: true # Allow the pipeline to continue if this job fails (default: false) +# needs: +# - job: test-linux # Any jobs that are required to run before this job (optional) +# variables: +# MY_ENVIRONMENT_VARIABLE: "some useful value" # Environment variables passed to the job (optional) +# script: +# - echo "List of shell commands to run in your job" +# - echo "You can also just specify a script here, like so:" +# - ./gitlab/my_amazing_script.sh + +stages: + - check + - test + - build + - publish + - short-benchmarks + - zombienet + - deploy + - notify + +workflow: + rules: + - if: $CI_COMMIT_TAG + - if: $CI_COMMIT_BRANCH + +variables: + CI_IMAGE: "paritytech/ci-unified:bullseye-1.70.0-2023-05-23-v20230706" + BUILDAH_IMAGE: "quay.io/buildah/stable:v1.29" + BUILDAH_COMMAND: "buildah --storage-driver overlay2" + RELENG_SCRIPTS_BRANCH: "master" + RUSTY_CACHIER_SINGLE_BRANCH: master + RUSTY_CACHIER_DONT_OPERATE_ON_MAIN_BRANCH: "true" + RUSTY_CACHIER_COMPRESSION_METHOD: zstd + NEXTEST_FAILURE_OUTPUT: immediate-final + NEXTEST_SUCCESS_OUTPUT: final + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.59" + DOCKER_IMAGES_VERSION: "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" + +default: + retry: + max: 2 + when: + - runner_system_failure + - unknown_failure + - api_failure + cache: {} + interruptible: true + +.collect-artifacts: + artifacts: + name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" + when: on_success + expire_in: 1 days + paths: + - artifacts/ + +.collect-artifacts-short: + artifacts: + name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" + when: on_failure + expire_in: 3 hours + paths: + - artifacts/ + +.prepare-env: + before_script: + # TODO: remove unset invocation when we'll be free from 'ENV RUSTC_WRAPPER=sccache' & sccache + # itself in all images + - unset RUSTC_WRAPPER + # $WASM_BUILD_WORKSPACE_HINT enables wasm-builder to find the Cargo.lock from within generated + # packages + - export WASM_BUILD_WORKSPACE_HINT="$PWD" + # ensure that RUSTFLAGS are set correctly + - echo $RUSTFLAGS + +.job-switcher: + before_script: + - if echo "$CI_DISABLED_JOBS" | grep -xF "$CI_JOB_NAME"; then echo "The job has been cancelled in CI settings"; exit 0; fi + +.kubernetes-env: + image: "${CI_IMAGE}" + before_script: + # - !reference [.job-switcher, before_script] + - !reference [.prepare-env, before_script] + tags: + - kubernetes-parity-build + +.rust-info-script: + script: + - rustup show + - cargo --version + - rustup +nightly show + - cargo +nightly --version + +.pipeline-stopper-vars: + script: + - !reference [.job-switcher, before_script] + - echo "Collecting env variables for the cancel-pipeline job" + - echo "FAILED_JOB_URL=${CI_JOB_URL}" > pipeline-stopper.env + - echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env + - echo "PR_NUM=${CI_COMMIT_REF_NAME}" >> pipeline-stopper.env + +.pipeline-stopper-artifacts: + artifacts: + reports: + dotenv: pipeline-stopper.env + +.docker-env: + image: "${CI_IMAGE}" + before_script: + - !reference [.job-switcher, before_script] + - !reference [.prepare-env, before_script] + - !reference [.rust-info-script, script] + - !reference [.rusty-cachier, before_script] + - !reference [.pipeline-stopper-vars, script] + after_script: + - !reference [.rusty-cachier, after_script] + tags: + - linux-docker-vm-c2 + +# rusty-cachier's hidden job. Parts of this job are used to instrument the pipeline's other real jobs with rusty-cachier +# Description of the commands is available here - https://gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client#description +.rusty-cachier: + before_script: + # - curl -s https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client/-/raw/release/util/install.sh | bash + # - rusty-cachier environment check --gracefully + # - $(rusty-cachier environment inject) + # - rusty-cachier project mtime + - echo tbd + after_script: + - echo tbd + # - env RUSTY_CACHIER_SUPRESS_OUTPUT=true rusty-cachier snapshot destroy + +.common-refs: + rules: + - if: $CI_PIPELINE_SOURCE == "web" + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + +.test-pr-refs: + rules: + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + +# handle the specific case where benches could store incorrect bench data because of the downstream staging runs +# exclude cargo-check-benches from such runs +.test-refs-check-benches: + rules: + - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "pipeline" && $CI_IMAGE =~ /staging$/ + when: never + - if: $CI_PIPELINE_SOURCE == "web" + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + +.test-refs-no-trigger: + rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_PIPELINE_SOURCE == "web" + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ + +.test-refs-no-trigger-prs-only: + rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_PIPELINE_SOURCE == "web" + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + +.publish-refs: + rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_PIPELINE_SOURCE == "web" + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + +.build-refs: + # publish-refs + PRs + rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_PIPELINE_SOURCE == "web" + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + +.zombienet-refs: + extends: .build-refs + +include: + # weights jobs + # - gitlab/pipeline/weights.yml + # check jobs + - .gitlab/pipeline/check.yml + # test jobs + - .gitlab/pipeline/test.yml + # build jobs + - .gitlab/pipeline/build.yml + # short-benchmarks jobs + - .gitlab/pipeline/short-benchmarks.yml + # publish jobs + - .gitlab/pipeline/publish.yml + # zombienet jobs + - .gitlab/pipeline/zombienet.yml + # # timestamp handler + # - project: parity/infrastructure/ci_cd/shared + # ref: v0.2 + # file: /common/timestamp.yml diff --git a/.gitlab/check-each-crate.py b/.gitlab/check-each-crate.py new file mode 100755 index 000000000000..adad4f5bd583 --- /dev/null +++ b/.gitlab/check-each-crate.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 + +# A script that checks each workspace crate individually. +# It's relevant to check workspace crates individually because otherwise their compilation problems +# due to feature misconfigurations won't be caught, as exemplified by +# https://github.com/paritytech/substrate/issues/12705 +# +# `check-each-crate.py target_group groups_total` +# +# - `target_group`: Integer starting from 1, the group this script should execute. +# - `groups_total`: Integer starting from 1, total number of groups. + +import subprocess, sys + +# Get all crates +output = subprocess.check_output(["cargo", "tree", "--locked", "--workspace", "--depth", "0", "--prefix", "none"]) + +# Convert the output into a proper list +crates = [] +for line in output.splitlines(): + if line != b"": + crates.append(line.decode('utf8').split(" ")[0]) + +# Make the list unique and sorted +crates = list(set(crates)) +crates.sort() + +target_group = int(sys.argv[1]) - 1 +groups_total = int(sys.argv[2]) + +if len(crates) == 0: + print("No crates detected!", file=sys.stderr) + sys.exit(1) + +print(f"Total crates: {len(crates)}", file=sys.stderr) + +crates_per_group = len(crates) // groups_total + +# If this is the last runner, we need to take care of crates +# after the group that we lost because of the integer division. +if target_group + 1 == groups_total: + overflow_crates = len(crates) % groups_total +else: + overflow_crates = 0 + +print(f"Crates per group: {crates_per_group}", file=sys.stderr) + +# Check each crate +for i in range(0, crates_per_group + overflow_crates): + crate = crates_per_group * target_group + i + + print(f"Checking {crates[crate]}", file=sys.stderr) + + res = subprocess.run(["cargo", "check", "--locked", "-p", crates[crate]]) + + if res.returncode != 0: + sys.exit(1) diff --git a/.gitlab/common/lib.sh b/.gitlab/common/lib.sh new file mode 100755 index 000000000000..ba5b17148728 --- /dev/null +++ b/.gitlab/common/lib.sh @@ -0,0 +1,195 @@ +#!/bin/sh + +api_base="https://api.github.com/repos" + +# Function to take 2 git tags/commits and get any lines from commit messages +# that contain something that looks like a PR reference: e.g., (#1234) +sanitised_git_logs(){ + git --no-pager log --pretty=format:"%s" "$1...$2" | + # Only find messages referencing a PR + grep -E '\(#[0-9]+\)' | + # Strip any asterisks + sed 's/^* //g' +} + +# Checks whether a tag on github has been verified +# repo: 'organization/repo' +# tagver: 'v1.2.3' +# Usage: check_tag $repo $tagver +check_tag () { + repo=$1 + tagver=$2 + if [ -n "$GITHUB_RELEASE_TOKEN" ]; then + echo '[+] Fetching tag using privileged token' + tag_out=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$api_base/$repo/git/refs/tags/$tagver") + else + echo '[+] Fetching tag using unprivileged token' + tag_out=$(curl -H "Authorization: token $GITHUB_PR_TOKEN" -s "$api_base/$repo/git/refs/tags/$tagver") + fi + tag_sha=$(echo "$tag_out" | jq -r .object.sha) + object_url=$(echo "$tag_out" | jq -r .object.url) + if [ "$tag_sha" = "null" ]; then + return 2 + fi + echo "[+] Tag object SHA: $tag_sha" + verified_str=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$object_url" | jq -r .verification.verified) + if [ "$verified_str" = "true" ]; then + # Verified, everything is good + return 0 + else + # Not verified. Bad juju. + return 1 + fi +} + +# Checks whether a given PR has a given label. +# repo: 'organization/repo' +# pr_id: 12345 +# label: B1-silent +# Usage: has_label $repo $pr_id $label +has_label(){ + repo="$1" + pr_id="$2" + label="$3" + + # These will exist if the function is called in Gitlab. + # If the function's called in Github, we should have GITHUB_ACCESS_TOKEN set + # already. + if [ -n "$GITHUB_RELEASE_TOKEN" ]; then + GITHUB_TOKEN="$GITHUB_RELEASE_TOKEN" + elif [ -n "$GITHUB_PR_TOKEN" ]; then + GITHUB_TOKEN="$GITHUB_PR_TOKEN" + fi + + out=$(curl -H "Authorization: token $GITHUB_TOKEN" -s "$api_base/$repo/pulls/$pr_id") + [ -n "$(echo "$out" | tr -d '\r\n' | jq ".labels | .[] | select(.name==\"$label\")")" ] +} + +github_label () { + echo + echo "# run github-api job for labeling it ${1}" + curl -sS -X POST \ + -F "token=${CI_JOB_TOKEN}" \ + -F "ref=master" \ + -F "variables[LABEL]=${1}" \ + -F "variables[PRNO]=${CI_COMMIT_REF_NAME}" \ + -F "variables[PROJECT]=paritytech/polkadot" \ + "${GITLAB_API}/projects/${GITHUB_API_PROJECT}/trigger/pipeline" +} + +# Formats a message into a JSON string for posting to Matrix +# message: 'any plaintext message' +# formatted_message: 'optional message formatted in html' +# Usage: structure_message $content $formatted_content (optional) +structure_message() { + if [ -z "$2" ]; then + body=$(jq -Rs --arg body "$1" '{"msgtype": "m.text", $body}' < /dev/null) + else + body=$(jq -Rs --arg body "$1" --arg formatted_body "$2" '{"msgtype": "m.text", $body, "format": "org.matrix.custom.html", $formatted_body}' < /dev/null) + fi + echo "$body" +} + +# Post a message to a matrix room +# body: '{body: "JSON string produced by structure_message"}' +# room_id: !fsfSRjgjBWEWffws:matrix.parity.io +# access_token: see https://matrix.org/docs/guides/client-server-api/ +# Usage: send_message $body (json formatted) $room_id $access_token +send_message() { +curl -XPOST -d "$1" "https://matrix.parity.io/_matrix/client/r0/rooms/$2/send/m.room.message?access_token=$3" +} + +# Pretty-printing functions +boldprint () { printf "|\n| \033[1m%s\033[0m\n|\n" "${@}"; } +boldcat () { printf "|\n"; while read -r l; do printf "| \033[1m%s\033[0m\n" "${l}"; done; printf "|\n" ; } + +skip_if_companion_pr() { + url="https://api.github.com/repos/paritytech/polkadot/pulls/${CI_COMMIT_REF_NAME}" + echo "[+] API URL: $url" + + pr_title=$(curl -sSL -H "Authorization: token ${GITHUB_PR_TOKEN}" "$url" | jq -r .title) + echo "[+] PR title: $pr_title" + + if echo "$pr_title" | grep -qi '^companion'; then + echo "[!] PR is a companion PR. Build is already done in substrate" + exit 0 + else + echo "[+] PR is not a companion PR. Proceeding test" + fi +} + +# Fetches the tag name of the latest release from a repository +# repo: 'organisation/repo' +# Usage: latest_release 'paritytech/polkadot' +latest_release() { + curl -s "$api_base/$1/releases/latest" | jq -r '.tag_name' +} + +# Check for runtime changes between two commits. This is defined as any changes +# to /primitives/src/* and any *production* chains under /runtime +has_runtime_changes() { + from=$1 + to=$2 + + if git diff --name-only "${from}...${to}" \ + | grep -q -e '^runtime/polkadot' -e '^runtime/kusama' -e '^primitives/src/' -e '^runtime/common' + then + return 0 + else + return 1 + fi +} + +# given a bootnode and the path to a chainspec file, this function will create a new chainspec file +# with only the bootnode specified and test whether that bootnode provides peers +# The optional third argument is the index of the bootnode in the list of bootnodes, this is just used to pick an ephemeral +# port for the node to run on. If you're only testing one, it'll just use the first ephemeral port +# BOOTNODE: /dns/polkadot-connect-0.parity.io/tcp/443/wss/p2p/12D3KooWEPmjoRpDSUuiTjvyNDd8fejZ9eNWH5bE965nyBMDrB4o +# CHAINSPEC_FILE: /path/to/polkadot.json +check_bootnode(){ + BOOTNODE=$1 + BASE_CHAINSPEC=$2 + RUNTIME=$(basename "$BASE_CHAINSPEC" | cut -d '.' -f 1) + MIN_PEERS=1 + + # Generate a temporary chainspec file containing only the bootnode we care about + TMP_CHAINSPEC_FILE="$RUNTIME.$(echo "$BOOTNODE" | tr '/' '_').tmp.json" + jq ".bootNodes = [\"$BOOTNODE\"] " < "$CHAINSPEC_FILE" > "$TMP_CHAINSPEC_FILE" + + # Grab an unused port by binding to port 0 and then immediately closing the socket + # This is a bit of a hack, but it's the only way to do it in the shell + RPC_PORT=$(python -c "import socket; s=socket.socket(); s.bind(('', 0)); print(s.getsockname()[1]); s.close()") + + echo "[+] Checking bootnode $BOOTNODE" + polkadot --chain "$TMP_CHAINSPEC_FILE" --no-mdns --rpc-port="$RPC_PORT" --tmp > /dev/null 2>&1 & + # Wait a few seconds for the node to start up + sleep 5 + POLKADOT_PID=$! + + MAX_POLLS=10 + TIME_BETWEEN_POLLS=3 + for _ in $(seq 1 "$MAX_POLLS"); do + # Check the health endpoint of the RPC node + PEERS="$(curl -s -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"system_health","params":[],"id":1}' http://localhost:"$RPC_PORT" | jq -r '.result.peers')" + # Sometimes due to machine load or other reasons, we don't get a response from the RPC node + # If $PEERS is an empty variable, make it 0 so we can still do the comparison + if [ -z "$PEERS" ]; then + PEERS=0 + fi + if [ "$PEERS" -ge $MIN_PEERS ]; then + echo "[+] $PEERS peers found for $BOOTNODE" + echo " Bootnode appears contactable" + kill $POLKADOT_PID + # Delete the temporary chainspec file now we're done running the node + rm "$TMP_CHAINSPEC_FILE" + return 0 + fi + sleep "$TIME_BETWEEN_POLLS" + done + kill $POLKADOT_PID + # Delete the temporary chainspec file now we're done running the node + rm "$TMP_CHAINSPEC_FILE" + echo "[!] No peers found for $BOOTNODE" + echo " Bootnode appears unreachable" + return 1 +} diff --git a/.gitlab/ensure-deps.sh b/.gitlab/ensure-deps.sh new file mode 100755 index 000000000000..7087200cef51 --- /dev/null +++ b/.gitlab/ensure-deps.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +# The script is meant to check if the rules regarding packages +# dependencies are satisfied. +# The general format is: +# [top-lvl-dir] MESSAGE/[other-top-dir] + +# For instance no crate within `./client` directory +# is allowed to import any crate with a directory path containing `frame`. +# Such rule is just: `client crates must not depend on anything in /frame`. + +# The script should be run from the main repo directory! + +set -u + +# HARD FAILING +MUST_NOT=( + "client crates must not depend on anything in /frame" + "client crates must not depend on anything in /node" + "frame crates must not depend on anything in /node" + "frame crates must not depend on anything in /client" + "primitives crates must not depend on anything in /frame" +) + +# ONLY DISPLAYED, script still succeeds +PLEASE_DONT=( + "primitives crates should not depend on anything in /client" +) + +VIOLATIONS=() +PACKAGES=() + +function check_rule() { + rule=$1 + from=$(echo $rule | cut -f1 -d\ ) + to=$(echo $rule | cut -f2 -d\/) + + cd $from + echo "Checking rule '$rule'" + packages=$(find -name Cargo.toml | xargs grep -wn "path.*\.\.\/$to") + has_references=$(echo -n $packages | wc -c) + if [ "$has_references" != "0" ]; then + VIOLATIONS+=("$rule") + # Find packages that violate: + PACKAGES+=("$packages") + fi + cd - > /dev/null +} + +for rule in "${MUST_NOT[@]}" +do + check_rule "$rule"; +done + +# Only the MUST NOT will be counted towards failure +HARD_VIOLATIONS=${#VIOLATIONS[@]} + + +for rule in "${PLEASE_DONT[@]}" +do + check_rule "$rule"; +done + +# Display violations and fail +I=0 +for v in "${VIOLATIONS[@]}" +do + cat << EOF + +=========================================== +======= Violation of rule: $v +=========================================== +${PACKAGES[$I]} + + +EOF + I=$I+1 +done + +exit $HARD_VIOLATIONS diff --git a/.gitlab/lingua.dic b/.gitlab/lingua.dic new file mode 100644 index 000000000000..d9dad4540277 --- /dev/null +++ b/.gitlab/lingua.dic @@ -0,0 +1,342 @@ +150 +2D +A&V +accessor/MS +AccountId +activations +acyclic +adversary/SM +allocator/SM +annualised +anonymize/D +Apache-2.0/M +API +APIs +arg/MS +assignee/SM +async +asynchrony +autogenerated +backable +backend/MS +benchmark/DSMG +BFT/M +bitfield/MS +bitwise +blake2/MS +blockchain/MS +borked +broadcast/UDSMG +BTC/S +canonicalization +canonicalize/D +CentOS +CLI/MS +codebase/SM +codec/SM +commit/D +comparator +computable +conclude/UD +config/MS +could've +crowdfund +crowdloan/MSG +crypto/MS +CSM +Cucumber/MS +customizable/B +DDoS +Debian/M +decodable/MS +decrement +deduplicated +deduplication +deinitializing +dequeue/SD +dequeuing +deregister +deserialize/G +DHT +disincentivize/D +dispatchable/SM +DLEQ +DM +DMP/SM +DMQ +DoS +DOT +DOTs +ECDSA +ed25519 +encodable +enqueue/D +enqueue/DMSG +entrypoint/MS +enum +ERC-20 +ETH/S +ethereum/MS +externality/MS +extrinsic +extrinsics +fedora/M +finalize/B +FRAME/MS +FSMs +functor +fungibility +gameable +getter/MS +GiB/S +GKE +GNUNet +GPL/M +GPLv3/M +Grafana/MS +Gurke/MS +gurke/MS +Handler/MS +HMP/SM +HRMP +HSM +https +iff +implementer/MS +includable +include/BG +increment/DSMG +inherent +inherents +initialize/CRG +initializer +instantiate/B +instantiation/SM +intrinsic +intrinsics +invariant/MS +invariants +inverter/MS +invertible +io +IP/S +isn +isolatable +isolate/BG +iterable +jaeger/MS +js +judgement/S +keccak256/M +keypair/MS +keystore/MS +Kovan +KSM/S +Kubernetes/MS +kusama/S +KYC/M +lib +libp2p +lifecycle/MS +liveness +lookahead/MS +lookup/MS +LRU +mainnet/MS +malus/MS +MB/M +Mbit +merkle/MS +Merklized +metadata/M +middleware/MS +Millau +misbehavior/SM +misbehaviors +misvalidate/D +MIT/M +MMR +modularity +mpsc +MPSC +MQC/SM +msg +multisig/S +multivalidator/SM +mutators +mutex +natively +NFA +NFT/SM +no_std +nonces +NPoS +NTB +offboard/DMSG +onboard/DMSG +oneshot/MS +onwards +OOM/S +OPENISH +others' +ourself +overseer/MS +ownerless +p2p +parablock/MS +parachain/MS +ParaId +parameterization +parameterize/D +parathread/MS +participations +passthrough +PDK +peerset/MS +permission/D +pessimization +phragmen +picosecond/SM +PoA/MS +polkadot/MS +Polkadot/MS +PoS/MS +PoV/MS +PoW/MS +PR +precheck +prechecking +preconfigured +preimage/MS +preopen +prepend/G +prevalidating +prevalidation +preverify/G +programmatically +prometheus/MS +provisioner/MS +proxy/DMSG +proxy/G +proxying +PRs +PVF/S +querier +README/MS +redhat/M +register/CD +relayer +repo/MS +requesters +reservable +responder/SM +retriability +reverify +ROC +roundtrip/MS +routable +rpc +RPC/MS +runtime/MS +rustc/MS +SAFT +scalability +scalable +Schnorr +schnorrkel +SDF +sending/S +sharding +shareable +Simnet/MS +spawn/SR +spawner +sr25519 +SS58 +SSL +startup/MS +stateful +Statemine +str +struct/MS +subcommand/SM +substream +subsystem/MS +subsystems' +supermajority +SURI +sybil +systemwide +taskmanager/MS +TCP +teleport/D +teleport/RG +teleportation/SM +teleporter/SM +teleporters +template/GSM +testnet/MS +tera/M +teleports +timeframe +timestamp/MS +topologies +tradeoff +transitionary +trie/MS +trustless/Y +TTL +tuple/SM +typesystem +ubuntu/M +UDP +UI +unapplied +unassign +unconcluded +unexpectable +unfinalize/B +unfinalized +union/MSG +unordered +unreceived +unreserve +unreserving +unroutable +unservable/B +untrusted +untyped +unvested +URI +utilize +v0 +v1 +v2 +validator/SM +ve +vec +verifier +verify/R +versa +Versi +version/DMSG +versioned +VMP/SM +VPS +VRF/SM +w3f/MS +wakeup +wakeups +warming/S +wasm/M +wasmtime +Westend/M +wildcard/MS +WND/S +Wococo +WS +XCM/S +XCMP/M +yeet +yml +zsh diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml new file mode 100644 index 000000000000..290ef8c8f72d --- /dev/null +++ b/.gitlab/pipeline/build.yml @@ -0,0 +1,342 @@ +# This file is part of .gitlab-ci.yml +# Here are all jobs that are executed during "build" stage + +# build jobs from polkadot + +build-linux-stable: + stage: build + extends: + - .docker-env + - .common-refs + - .run-immediately + - .collect-artifacts + variables: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + # Ensure we run the UI tests. + RUN_UI_TESTS: 1 + script: + - time cargo build --locked --profile testnet --features pyroscope,fast-runtime --bin polkadot + # pack artifacts + - mkdir -p ./artifacts + - VERSION="${CI_COMMIT_REF_NAME}" # will be tag or branch name + - mv ./target/testnet/polkadot ./artifacts/. + - pushd artifacts + - sha256sum polkadot | tee polkadot.sha256 + - shasum -c polkadot.sha256 + - popd + - EXTRATAG="${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" + - echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})" + - echo -n ${VERSION} > ./artifacts/VERSION + - echo -n ${EXTRATAG} > ./artifacts/EXTRATAG + - echo -n ${CI_JOB_ID} > ./artifacts/BUILD_LINUX_JOB_ID + - RELEASE_VERSION=$(./artifacts/polkadot -V | awk '{print $2}'| awk -F "-" '{print $1}') + - echo -n "v${RELEASE_VERSION}" > ./artifacts/BUILD_RELEASE_VERSION + - cp -r docker/* ./artifacts + +build-test-collators: + stage: build + extends: + - .docker-env + - .common-refs + - .run-immediately + - .collect-artifacts + script: + - time cargo build --locked --profile testnet --verbose -p test-parachain-adder-collator + - time cargo build --locked --profile testnet --verbose -p test-parachain-undying-collator + # pack artifacts + - mkdir -p ./artifacts + - mv ./target/testnet/adder-collator ./artifacts/. + - mv ./target/testnet/undying-collator ./artifacts/. + - echo -n "${CI_COMMIT_REF_NAME}" > ./artifacts/VERSION + - echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG + - echo "adder-collator version = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" + - echo "undying-collator version = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" + - cp -r ./docker/* ./artifacts + +build-malus: + stage: build + extends: + - .docker-env + - .common-refs + - .run-immediately + - .collect-artifacts + script: + - time cargo build --locked --profile testnet --verbose -p polkadot-test-malus + # pack artifacts + - mkdir -p ./artifacts + - mv ./target/testnet/malus ./artifacts/. + - echo -n "${CI_COMMIT_REF_NAME}" > ./artifacts/VERSION + - echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG + - echo "polkadot-test-malus = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" + - cp -r ./docker/* ./artifacts + +build-staking-miner: + stage: build + extends: + - .docker-env + - .common-refs + - .run-immediately + - .collect-artifacts + script: + - time cargo build --locked --release --package staking-miner + # # pack artifacts + # - mkdir -p ./artifacts + # - mv ./target/release/staking-miner ./artifacts/. + # - echo -n "${CI_COMMIT_REF_NAME}" > ./artifacts/VERSION + # - echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG + # - echo "staking-miner = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" + # - cp -r ./scripts/* ./artifacts + +build-rustdoc: + stage: build + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + SKIP_WASM_BUILD: 1 + # artifacts: + # name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc" + # when: on_success + # expire_in: 1 days + # paths: + # - ./crate-docs/ + script: + # FIXME: it fails with `RUSTDOCFLAGS="-Dwarnings"` and `--all-features` + # FIXME: return to stable when https://github.com/rust-lang/rust/issues/96937 gets into stable + - time cargo doc --workspace --verbose --no-deps + - rm -f ./target/doc/.lock + - mv ./target/doc ./crate-docs + # FIXME: remove me after CI image gets nonroot + - chown -R nonroot:nonroot ./crate-docs + - echo "" > ./crate-docs/index.html + +build-implementers-guide: + stage: build + extends: + - .kubernetes-env + - .common-refs + - .run-immediately + # - .collect-artifacts + # git depth is set on purpose: https://github.com/paritytech/polkadot/issues/6284 + variables: + GIT_STRATEGY: clone + GIT_DEPTH: 0 + CI_IMAGE: paritytech/mdbook-utils:e14aae4a-20221123 + script: + - mdbook build ./polkadot/roadmap/implementers-guide + - mkdir -p artifacts + - mv polkadot/roadmap/implementers-guide/book artifacts/ + +build-short-benchmark: + stage: build + extends: + - .docker-env + - .common-refs + - .run-immediately + - .collect-artifacts + script: + - cargo build --profile release --locked --features=runtime-benchmarks + - mkdir -p artifacts + - target/release/polkadot --version + - cp ./target/release/polkadot ./artifacts/ + +# build jobs from cumulus + +build-linux-stable-cumulus: + stage: build + extends: + - .docker-env + - .common-refs + - .run-immediately + - .collect-artifacts + variables: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + script: + - echo "___Building a binary, please refrain from using it in production since it goes with the debug assertions.___" + - time cargo build --release --locked --bin polkadot-parachain + - echo "___Packing the artifacts___" + - mkdir -p ./artifacts + - mv ./target/release/polkadot-parachain ./artifacts/. + - echo "___The VERSION is either a tag name or the curent branch if triggered not by a tag___" + - echo ${CI_COMMIT_REF_NAME} | tee ./artifacts/VERSION + +build-test-parachain: + stage: build + extends: + - .docker-env + - .common-refs + - .run-immediately + - .collect-artifacts + variables: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + script: + - echo "___Building a binary, please refrain from using it in production since it goes with the debug assertions.___" + - time cargo build --release --locked --bin test-parachain + - echo "___Packing the artifacts___" + - mkdir -p ./artifacts + - mv ./target/release/test-parachain ./artifacts/. + - mkdir -p ./artifacts/zombienet + - mv ./target/release/wbuild/cumulus-test-runtime/wasm_binary_spec_version_incremented.rs.compact.compressed.wasm ./artifacts/zombienet/. + +# build runtime only if files in $RUNTIME_PATH/$RUNTIME_NAME were changed +.build-runtime-template: &build-runtime-template + stage: build + extends: + - .docker-env + - .test-refs-no-trigger-prs-only + - .run-immediately + variables: + RUNTIME_PATH: "parachains/runtimes/assets" + script: + - cd ${RUNTIME_PATH} + - for directory in $(echo */); do + echo "_____Running cargo check for ${directory} ______"; + cd ${directory}; + pwd; + SKIP_WASM_BUILD=1 cargo check --locked; + cd ..; + done + +# DAG: build-runtime-assets -> build-runtime-collectives -> build-runtime-bridge-hubs +# DAG: build-runtime-assets -> build-runtime-collectives -> build-runtime-contracts +# DAG: build-runtime-assets -> build-runtime-starters -> build-runtime-testing +build-runtime-assets: + <<: *build-runtime-template + variables: + RUNTIME_PATH: "cumulus/parachains/runtimes/assets" + +build-runtime-collectives: + <<: *build-runtime-template + variables: + RUNTIME_PATH: "cumulus/parachains/runtimes/collectives" + # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs + needs: + - job: build-runtime-assets + artifacts: false + +build-runtime-bridge-hubs: + <<: *build-runtime-template + variables: + RUNTIME_PATH: "cumulus/parachains/runtimes/bridge-hubs" + # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs + needs: + - job: build-runtime-collectives + artifacts: false + +build-runtime-contracts: + <<: *build-runtime-template + variables: + RUNTIME_PATH: "cumulus/parachains/runtimes/contracts" + # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs + needs: + - job: build-runtime-collectives + artifacts: false + +build-runtime-starters: + <<: *build-runtime-template + variables: + RUNTIME_PATH: "cumulus/parachains/runtimes/starters" + # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs + needs: + - job: build-runtime-assets + artifacts: false + +build-runtime-testing: + <<: *build-runtime-template + variables: + RUNTIME_PATH: "cumulus/parachains/runtimes/testing" + # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs + needs: + - job: build-runtime-starters + artifacts: false + +# substrate + +build-linux-substrate: + stage: build + extends: + - .docker-env + - .common-refs + - .run-immediately + - .collect-artifacts + variables: + # this variable gets overriden by "rusty-cachier environment inject", use the value as default + CARGO_TARGET_DIR: "$CI_PROJECT_DIR/target" + before_script: + - mkdir -p ./artifacts/substrate/ + # tldr: we need to checkout the branch HEAD explicitly because of our dynamic versioning approach while building the substrate binary + # see https://github.com/paritytech/ci_cd/issues/682#issuecomment-1340953589 + - git checkout -B "$CI_COMMIT_REF_NAME" "$CI_COMMIT_SHA" + script: + - WASM_BUILD_NO_COLOR=1 time cargo build --locked --release --verbose + - mv $CARGO_TARGET_DIR/release/substrate-node ./artifacts/substrate/substrate + - echo -n "Substrate version = " + - if [ "${CI_COMMIT_TAG}" ]; then + echo "${CI_COMMIT_TAG}" | tee ./artifacts/substrate/VERSION; + else + ./artifacts/substrate/substrate --version | + cut -d ' ' -f 2 | tee ./artifacts/substrate/VERSION; + fi + - sha256sum ./artifacts/substrate/substrate | tee ./artifacts/substrate/substrate.sha256 + - cp -r ./docker/substrate_injected.Dockerfile ./artifacts/substrate/ + # - printf '\n# building node-template\n\n' + # - ./scripts/ci/node-template-release.sh ./artifacts/substrate/substrate-node-template.tar.gz + +.build-subkey: + stage: build + extends: + - .docker-env + - .common-refs + - .run-immediately + # - .collect-artifact + variables: + # this variable gets overriden by "rusty-cachier environment inject", use the value as default + CARGO_TARGET_DIR: "$CI_PROJECT_DIR/target" + before_script: + - mkdir -p ./artifacts/subkey + script: + - cd ./substrate/bin/utils/subkey + - SKIP_WASM_BUILD=1 time cargo build --locked --release --verbose + # - cd - + # - mv $CARGO_TARGET_DIR/release/subkey ./artifacts/subkey/. + # - echo -n "Subkey version = " + # - ./artifacts/subkey/subkey --version | + # sed -n -E 's/^subkey ([0-9.]+.*)/\1/p' | + # tee ./artifacts/subkey/VERSION; + # - sha256sum ./artifacts/subkey/subkey | tee ./artifacts/subkey/subkey.sha256 + # - cp -r ./scripts/ci/docker/subkey.Dockerfile ./artifacts/subkey/ + +build-subkey-linux: + extends: .build-subkey +# tbd +# build-subkey-macos: +# extends: .build-subkey +# # duplicating before_script & script sections from .build-subkey hidden job +# # to overwrite rusty-cachier integration as it doesn't work on macos +# before_script: +# # skip timestamp script, the osx bash doesn't support printf %()T +# - !reference [.job-switcher, before_script] +# - mkdir -p ./artifacts/subkey +# script: +# - cd ./bin/utils/subkey +# - SKIP_WASM_BUILD=1 time cargo build --locked --release --verbose +# - cd - +# - mv ./target/release/subkey ./artifacts/subkey/. +# - echo -n "Subkey version = " +# - ./artifacts/subkey/subkey --version | +# sed -n -E 's/^subkey ([0-9.]+.*)/\1/p' | +# tee ./artifacts/subkey/VERSION; +# - sha256sum ./artifacts/subkey/subkey | tee ./artifacts/subkey/subkey.sha256 +# - cp -r ./scripts/ci/docker/subkey.Dockerfile ./artifacts/subkey/ +# after_script: [""] +# tags: +# - osx + diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml new file mode 100644 index 000000000000..8e917ab11e44 --- /dev/null +++ b/.gitlab/pipeline/check.yml @@ -0,0 +1,177 @@ +cargo-clippy: + stage: check + extends: + - .docker-env + - .common-refs + script: + - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo clippy --all-targets --locked --workspace + # fixme! + allow_failure: true + +check-try-runtime: + stage: check + extends: + - .docker-env + - .common-refs + script: + - time cargo check --locked --all --features try-runtime + # this is taken from cumulus + # Check that parachain-template will compile with `try-runtime` feature flag. + - time cargo check --locked -p parachain-template-node --features try-runtime + # add after https://github.com/paritytech/substrate/pull/14502 is merged + # experimental code may rely on try-runtime and vice-versa + # - time cargo check --locked --features try-runtime,experimental + +cargo-fmt-manifest: + stage: check + extends: + - .docker-env + - .common-refs + script: + - cargo install zepter --locked --version 0.10.0 -q -f --no-default-features && zepter --version + - echo "👉 Hello developer! If you see this CI check failing then it means that one of the your changes in a Cargo.toml file introduced ill-formatted or unsorted features. Please take a look at 'docs/STYLE_GUIDE.md#manifest-formatting' to find out more." + - zepter format features --check + allow_failure: true # Experimental + +cargo-deny-licenses: + stage: check + extends: + - .docker-env + - .test-pr-refs + variables: + CARGO_DENY_CMD: "cargo deny --all-features check licenses -c ./substrate/scripts/ci/deny.toml" + script: + - $CARGO_DENY_CMD --hide-inclusion-graph + after_script: + # - !reference [.rusty-cachier, after_script] + - echo "___The complete log is in the artifacts___" + - $CARGO_DENY_CMD 2> deny.log + - if [ $CI_JOB_STATUS != 'success' ]; then + echo 'Please check license of your crate or add an exception to scripts/ci/deny.toml'; + fi + allow_failure: true + artifacts: + name: $CI_COMMIT_SHORT_SHA + expire_in: 3 days + when: always + paths: + - deny.log + +spellcheck: + stage: check + extends: + - .kubernetes-env + - .common-refs + script: + - cargo spellcheck --version + # compare with the commit parent to the PR, given it's from a default branch + - git fetch origin +${CI_DEFAULT_BRANCH}:${CI_DEFAULT_BRANCH} + - echo "___Spellcheck is going to check your diff___" + - cargo spellcheck list-files -vvv $(git diff --diff-filter=AM --name-only $(git merge-base ${CI_COMMIT_SHA} ${CI_DEFAULT_BRANCH} -- :^bridges)) + - time cargo spellcheck check -vvv --cfg=.gitlab/spellcheck.toml --checkers hunspell --code 1 + $(git diff --diff-filter=AM --name-only $(git merge-base ${CI_COMMIT_SHA} ${CI_DEFAULT_BRANCH} -- :^bridges)) + allow_failure: true + +# from substrate +# not sure if it's needed in monorepo +check-dependency-rules: + stage: check + extends: + - .kubernetes-env + - .test-refs-no-trigger-prs-only + variables: + CI_IMAGE: "paritytech/tools:latest" + allow_failure: true + script: + - .gitlab/ensure-deps.sh + +test-rust-features: + stage: check + extends: + - .kubernetes-env + - .test-refs-no-trigger-prs-only + script: + - git clone + --depth=1 + --branch="master" + https://github.com/paritytech/pipeline-scripts + - bash ./pipeline-scripts/rust-features.sh . + +job-starter: + stage: check + image: paritytech/tools:latest + extends: + - .kubernetes-env + - .common-refs + allow_failure: true + script: + - echo ok + +test-rust-feature-propagation: + stage: check + extends: + - .kubernetes-env + - .test-pr-refs + script: + - cargo install --locked --version 0.10.0 -q -f zepter && zepter --version + - echo "👉 Hello developer! If you see this CI check failing then it means that one of the crates is missing a feature for one of its dependencies. The output below tells you which feature needs to be added for which dependency to which crate. You can do this by modifying the Cargo.toml file. For more context see the MR where this check was introduced https://github.com/paritytech/substrate/pull/14660" + - zepter lint propagate-feature --feature try-runtime --left-side-feature-missing=ignore --workspace --feature-enables-dep="try-runtime:frame-try-runtime" --locked + - zepter lint propagate-feature --feature runtime-benchmarks --left-side-feature-missing=ignore --workspace --feature-enables-dep="runtime-benchmarks:frame-benchmarking" --locked + - zepter lint propagate-feature --feature std --left-side-feature-missing=ignore --workspace --locked + allow_failure: true # Experimental + +# More info can be found here: https://github.com/paritytech/polkadot/pull/5865 +.check-runtime-migration: + stage: check + extends: + - .docker-env + - .test-pr-refs + script: + - | + export RUST_LOG=remote-ext=debug,runtime=debug + echo "---------- Running try-runtime for ${NETWORK} ----------" + time cargo install --locked --git https://github.com/paritytech/try-runtime-cli --rev a93c9b5abe5d31a4cf1936204f7e5c489184b521 + time cargo build --release --locked -p "$NETWORK"-runtime --features try-runtime + time try-runtime \ + --runtime ./target/release/wbuild/"$NETWORK"-runtime/target/wasm32-unknown-unknown/release/"$NETWORK"_runtime.wasm \ + on-runtime-upgrade --checks=pre-and-post live --uri wss://${NETWORK}-try-runtime-node.parity-chains.parity.io:443 + +check-runtime-migration-polkadot: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "polkadot" + allow_failure: true # FIXME https://github.com/paritytech/substrate/issues/13107 + +check-runtime-migration-kusama: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "kusama" + allow_failure: true # FIXME https://github.com/paritytech/substrate/issues/13107 + +check-runtime-migration-westend: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "westend" + allow_failure: true # FIXME https://github.com/paritytech/substrate/issues/13107 + +check-runtime-migration-rococo: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "rococo" + allow_failure: true # FIXME https://github.com/paritytech/substrate/issues/13107 diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml new file mode 100644 index 000000000000..ed18082344f0 --- /dev/null +++ b/.gitlab/pipeline/publish.yml @@ -0,0 +1,315 @@ +# This file is part of .gitlab-ci.yml +# Here are all jobs that are executed during "publish" stage + +# cumulus + +.build-push-image: + image: $BUILDAH_IMAGE + variables: + DOCKERFILE: "" # docker/path-to.Dockerfile + IMAGE_NAME: "" # docker.io/paritypr/image_name + script: + # - test "$PARITYPR_USER" -a "$PARITYPR_PASS" || + # ( echo "no docker credentials provided"; exit 1 ) + - $BUILDAH_COMMAND build + --format=docker + --build-arg VCS_REF="${CI_COMMIT_SHA}" + --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" + --build-arg IMAGE_NAME="${IMAGE_NAME}" + --tag "$IMAGE_NAME:${DOCKER_IMAGES_VERSION}" + --file ${DOCKERFILE} . + - echo "$PARITYPR_PASS" | + buildah login --username "$PARITYPR_USER" --password-stdin docker.io + - $BUILDAH_COMMAND info + - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:${DOCKER_IMAGES_VERSION}" + after_script: + - buildah logout --all + +build-push-image-polkadot-parachain-debug: + stage: publish + extends: + - .kubernetes-env + - .common-refs + - .build-push-image + needs: + - job: build-linux-stable-cumulus + artifacts: true + variables: + DOCKERFILE: "docker/polkadot-parachain-debug_unsigned_injected.Dockerfile" + IMAGE_NAME: "docker.io/paritypr/polkadot-parachain-debug" + +build-push-image-test-parachain: + stage: publish + extends: + - .kubernetes-env + - .common-refs + - .build-push-image + needs: + - job: build-test-parachain + artifacts: true + variables: + DOCKERFILE: "docker/test-parachain_injected.Dockerfile" + IMAGE_NAME: "docker.io/paritypr/test-parachain" +# publish-s3: +# stage: publish +# extends: +# - .kubernetes-env +# - .publish-refs +# image: paritytech/awscli:latest +# needs: +# - job: build-linux-stable-cumulus +# artifacts: true +# variables: +# GIT_STRATEGY: none +# BUCKET: "releases.parity.io" +# PREFIX: "cumulus/${ARCH}-${DOCKER_OS}" +# script: +# - echo "___Publishing a binary with debug assertions!___" +# - echo "___VERSION = $(cat ./artifacts/VERSION) ___" +# - aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/ +# - echo "___Updating objects in latest path___" +# - aws s3 sync s3://${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/ s3://${BUCKET}/${PREFIX}/latest/ +# after_script: +# - aws s3 ls s3://${BUCKET}/${PREFIX}/latest/ +# --recursive --human-readable --summarize + +# publish-benchmarks-assets-s3: &publish-benchmarks +# stage: publish +# extends: +# - .kubernetes-env +# - .benchmarks-refs +# image: paritytech/awscli:latest +# needs: +# - job: benchmarks-assets +# artifacts: true +# variables: +# GIT_STRATEGY: none +# BUCKET: "releases.parity.io" +# PREFIX: "cumulus/$CI_COMMIT_REF_NAME/benchmarks-assets" +# script: +# - echo "___Publishing benchmark results___" +# - aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/ +# after_script: +# - aws s3 ls s3://${BUCKET}/${PREFIX}/ --recursive --human-readable --summarize + +# publish-benchmarks-collectives-s3: +# <<: *publish-benchmarks +# variables: +# GIT_STRATEGY: none +# BUCKET: "releases.parity.io" +# PREFIX: "cumulus/$CI_COMMIT_REF_NAME/benchmarks-collectives" +# needs: +# - job: benchmarks-collectives +# artifacts: true + +### Polkadot + +build-push-image-polkadot-debug: + stage: publish + extends: + - .kubernetes-env + - .common-refs + - .build-push-image + needs: + - job: build-linux-stable + artifacts: true + variables: + DOCKERFILE: "docker/polkadot_injected_debug.Dockerfile" + IMAGE_NAME: "docker.io/paritypr/polkadot-debug" + +build-push-image-colander: + stage: publish + extends: + - .kubernetes-env + - .common-refs + - .build-push-image + needs: + - job: build-test-collators + artifacts: true + variables: + DOCKERFILE: "docker/collator_injected.Dockerfile" + IMAGE_NAME: "docker.io/paritypr/colander" + +build-push-image-malus: + stage: publish + extends: + - .kubernetes-env + - .common-refs + - .build-push-image + needs: + - job: build-malus + artifacts: true + variables: + DOCKERFILE: "docker/malus_injected.Dockerfile" + IMAGE_NAME: "docker.io/paritypr/malus" + +build-push-image-substrate-pr: + stage: publish + extends: + - .kubernetes-env + - .common-refs + - .build-push-image + needs: + - job: build-linux-substrate + artifacts: true + variables: + DOCKERFILE: "docker/substrate_injected.Dockerfile" + IMAGE_NAME: "docker.io/paritypr/substrate" +# old way + +# .build-push-image-polkadot: +# before_script: +# # - test -s ./artifacts/VERSION || exit 1 +# # - test -s ./artifacts/EXTRATAG || exit 1 +# - VERSION="$(cat ./artifacts/VERSION)" +# - EXTRATAG="$(cat ./artifacts/EXTRATAG)" +# - echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})" +# script: +# # - test "$DOCKER_USER" -a "$DOCKER_PASS" || +# # ( echo "no docker credentials provided"; exit 1 ) +# - cd ./artifacts +# - $BUILDAH_COMMAND build +# --format=docker +# --build-arg VCS_REF="${CI_COMMIT_SHA}" +# --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" +# --build-arg IMAGE_NAME="${IMAGE_NAME}" +# --tag "$IMAGE_NAME:$VERSION" +# --tag "$IMAGE_NAME:$EXTRATAG" +# --file ${DOCKERFILE} . +# # The job will success only on the protected branch +# # - echo "$DOCKER_PASS" | +# # buildah login --username "$DOCKER_USER" --password-stdin docker.io +# # - $BUILDAH_COMMAND info +# # - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:$VERSION" +# # - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:$EXTRATAG" +# after_script: +# - buildah logout --all + +# publish-polkadot-debug-image: +# stage: publish +# image: ${BUILDAH_IMAGE} +# extends: +# - .kubernetes-env +# - .build-push-image-polkadot +# rules: +# - if: $CI_PIPELINE_SOURCE == "web" +# - if: $CI_PIPELINE_SOURCE == "schedule" +# - if: $CI_COMMIT_REF_NAME == "master" +# - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs +# - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 +# variables: +# GIT_STRATEGY: none +# DOCKER_USER: ${PARITYPR_USER} +# DOCKER_PASS: ${PARITYPR_PASS} +# # scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile +# DOCKERFILE: polkadot_injected_debug.Dockerfile +# IMAGE_NAME: docker.io/paritypr/polkadot-debug +# needs: +# - job: build-linux-stable +# artifacts: true +# after_script: +# # pass artifacts to the zombienet-tests job +# # https://docs.gitlab.com/ee/ci/multi_project_pipelines.html#with-variable-inheritance +# - echo "PARACHAINS_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/parachains.env +# - echo "PARACHAINS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/parachains.env +# artifacts: +# reports: +# # this artifact is used in zombienet-tests job +# dotenv: ./artifacts/parachains.env +# expire_in: 1 days + +# publish-test-collators-image: +# # service image for zombienet +# stage: publish +# extends: +# - .kubernetes-env +# - .build-push-image-polkadot +# - .zombienet-refs +# variables: +# CI_IMAGE: ${BUILDAH_IMAGE} +# GIT_STRATEGY: none +# DOCKER_USER: ${PARITYPR_USER} +# DOCKER_PASS: ${PARITYPR_PASS} +# # scripts/ci/dockerfiles/collator_injected.Dockerfile +# DOCKERFILE: collator_injected.Dockerfile +# IMAGE_NAME: docker.io/paritypr/colander +# needs: +# - job: build-test-collators +# artifacts: true +# after_script: +# - buildah logout --all +# # pass artifacts to the zombienet-tests job +# - echo "COLLATOR_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/collator.env +# - echo "COLLATOR_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/collator.env +# artifacts: +# reports: +# # this artifact is used in zombienet-tests job +# dotenv: ./artifacts/collator.env + +# publish-malus-image: +# # service image for Simnet +# stage: publish +# extends: +# - .kubernetes-env +# - .build-push-image-polkadot +# - .zombienet-refs +# variables: +# CI_IMAGE: ${BUILDAH_IMAGE} +# GIT_STRATEGY: none +# DOCKER_USER: ${PARITYPR_USER} +# DOCKER_PASS: ${PARITYPR_PASS} +# # scripts/ci/dockerfiles/malus_injected.Dockerfile +# DOCKERFILE: malus_injected.Dockerfile +# IMAGE_NAME: docker.io/paritypr/malus +# needs: +# - job: build-malus +# artifacts: true +# after_script: +# - buildah logout "$IMAGE_NAME" +# # pass artifacts to the zombienet-tests job +# - echo "MALUS_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/malus.env +# - echo "MALUS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/malus.env +# artifacts: +# reports: +# # this artifact is used in zombienet-tests job +# dotenv: ./artifacts/malus.env + +# publish-staking-miner-image: +# stage: publish +# extends: +# - .kubernetes-env +# - .build-push-image +# - .publish-refs +# variables: +# CI_IMAGE: ${BUILDAH_IMAGE} +# # scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile +# DOCKERFILE: ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile +# IMAGE_NAME: docker.io/paritytech/staking-miner +# GIT_STRATEGY: none +# DOCKER_USER: ${Docker_Hub_User_Parity} +# DOCKER_PASS: ${Docker_Hub_Pass_Parity} +# needs: +# - job: build-staking-miner +# artifacts: true + +# substrate + +# publish-substrate-image-pr: +# # service image for zombienet +# stage: publish +# extends: +# - .kubernetes-env +# - .build-push-image-polkadot +# - .zombienet-refs +# variables: +# CI_IMAGE: ${BUILDAH_IMAGE} +# GIT_STRATEGY: none +# DOCKER_USER: ${PARITYPR_USER} +# DOCKER_PASS: ${PARITYPR_PASS} +# DOCKERFILE: substrate_injected.Dockerfile +# IMAGE_NAME: docker.io/paritypr/substrate +# needs: +# - job: build-linux-substrate +# artifacts: true +# after_script: +# - buildah logout "$IMAGE_NAME" diff --git a/.gitlab/pipeline/short-benchmarks.yml b/.gitlab/pipeline/short-benchmarks.yml new file mode 100644 index 000000000000..2993338cabb1 --- /dev/null +++ b/.gitlab/pipeline/short-benchmarks.yml @@ -0,0 +1,26 @@ +# This file is part of .gitlab-ci.yml +# Here are all jobs that are executed during "short-benchmarks" stage + +# Run all pallet benchmarks only once to check if there are any errors +short-benchmark-polkadot: &short-bench + stage: short-benchmarks + extends: + - .docker-env + - .common-refs + needs: + - job: build-short-benchmark + artifacts: true + variables: + RUNTIME: polkadot + script: + - ./artifacts/polkadot benchmark pallet --execution wasm --wasm-execution compiled --chain $RUNTIME-dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 + +short-benchmark-kusama: + <<: *short-bench + variables: + RUNTIME: kusama + +short-benchmark-westend: + <<: *short-bench + variables: + RUNTIME: westend diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml new file mode 100644 index 000000000000..96927ac42f47 --- /dev/null +++ b/.gitlab/pipeline/test.yml @@ -0,0 +1,479 @@ +# this is an artificial job dependency, for pipeline optimization using GitLab's DAGs +# the job can be found in check.yml +.run-immediately: + needs: + - job: job-starter + artifacts: false + +test-runtime: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + script: + # Build all but only execute 'runtime' tests. + - SKIP_WASM_BUILD=1 cargo test "*-runtime" + +test-linux-stable: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + parallel: 3 + script: + # Build all but only execute 'runtime' tests. + - echo "Node index - ${CI_NODE_INDEX}. Total amount - ${CI_NODE_TOTAL}" + # add experimental to features after https://github.com/paritytech/substrate/pull/14502 is merged + - | + time cargo nextest run \ + -E 'all() & !test(upgrade_version_checks_should_work) & !test(receive_rate_limit_is_enforced) & !test(benchmark_block_works) & !test(rx::tests::sent_views_include_finalized_number_update) & !test(follow_chain_works) & !test(create_snapshot_works) & !test(block_execution_works)' \ + --workspace \ + --locked \ + --release \ + --verbose \ + --no-fail-fast \ + --features runtime-benchmarks,try-runtime \ + --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} + # run runtime-api tests with `enable-staging-api` feature on the 1st node + - if [ ${CI_NODE_INDEX} == 1 ]; then time cargo nextest run -p sp-api-test --features enable-staging-api; fi + # todo: add flacky-test collector + +test-linux-stable-all: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + parallel: 3 + script: + # Build all but only execute 'runtime' tests. + - echo "Node index - ${CI_NODE_INDEX}. Total amount - ${CI_NODE_TOTAL}" + - | + time cargo nextest run \ + --workspace \ + --locked \ + --release \ + --verbose \ + --no-fail-fast \ + --features runtime-benchmarks,try-runtime \ + --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} + # todo: add flacky-test collector + +test-linux-oldkernel-stable: + extends: test-linux-stable + tags: + - oldkernel-vm + +# for some reasons these tests fail if run with all tests +test-linux-stable-additional-tests: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + script: + - | + time cargo nextest run \ + -E 'test(receive_rate_limit_is_enforced) + test(benchmark_block_works)' \ + --workspace \ + --locked \ + --release \ + --verbose \ + --features runtime-benchmarks,try-runtime + allow_failure: true + +# these ones can be really slow so it's better to run them separately +test-linux-stable-slow: + stage: test + # remove after cache is setup + timeout: 2h + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + script: + - | + time cargo nextest run \ + -E 'test(rx::tests::sent_views_include_finalized_number_update) + test(follow_chain_works) + test(create_snapshot_works) + test(block_execution_works)' \ + --workspace \ + --locked \ + --release \ + --verbose \ + --features runtime-benchmarks,try-runtime + allow_failure: true + +# takes about 1,5h without cache +# can be used to check that nextest works correctly +# test-linux-stable-polkadot: +# stage: test +# timeout: 2h +# extends: +# - .docker-env +# - .common-refs +# - .run-immediately +# - .collect-artifacts-short +# variables: +# RUST_TOOLCHAIN: stable +# # Enable debug assertions since we are running optimized builds for testing +# # but still want to have debug assertions. +# RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" +# script: +# - mkdir -p artifacts +# - time cargo test --workspace +# --locked +# --profile testnet +# --features=runtime-benchmarks,runtime-metrics,try-runtime -- +# --skip upgrade_version_checks_should_work +# --skip benchmarking::bench_migrate +# --skip benchmarking::bench_on_runtime_upgrade +# --skip migration::test::migration_v3_to_v4_too_large_calls_are_ignored +# --skip migration::test::migration_works + +test-doc: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + script: + - time cargo test --doc + +test-rustdoc: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + SKIP_WASM_BUILD: 1 + RUSTDOCFLAGS: "-Dwarnings" + script: + - time cargo doc --workspace --all-features --verbose --no-deps + allow_failure: true + +cargo-check-all-benches: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + script: + - time cargo check --all --benches + +test-node-metrics: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + - .collect-artifacts-short + variables: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + script: + - mkdir -p artifacts + - time cargo test --profile testnet + --locked + --features=runtime-metrics -p polkadot-node-metrics > artifacts/log.txt + # FIXME! + allow_failure: true + +test-deterministic-wasm: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + script: + - .gitlab/test_deterministic_wasm.sh + +cargo-check-benches: + stage: test + variables: + CI_JOB_NAME: "cargo-check-benches" + extends: + - .docker-env + - .common-refs + - .run-immediately + - .collect-artifacts + before_script: + # TODO: DON'T FORGET TO CHANGE FOR PROD VALUES!!! + # merges in the master branch on PRs. skip if base is not master + - 'if [ $CI_COMMIT_REF_NAME != "master" ]; then + BASE=$(curl -s -H "Authorization: Bearer ${GITHUB_PR_TOKEN}" https://api.github.com/repos/paritytech-stg/polkadot-sdk/pulls/${CI_COMMIT_REF_NAME} | jq -r .base.ref); + printf "Merging base branch %s\n" "${BASE:=master}"; + if [ $BASE != "master" ]; then + echo "$BASE is not master, skipping merge"; + else + git config user.email "ci@gitlab.parity.io"; + git fetch origin "refs/heads/${BASE}"; + git merge --verbose --no-edit FETCH_HEAD; + fi + fi' + parallel: 2 + script: + - mkdir -p ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA + # this job is executed in parallel on two runners + - echo "___Running benchmarks___"; + - case ${CI_NODE_INDEX} in + 1) + SKIP_WASM_BUILD=1 time cargo check --locked --benches --all; + cargo run --locked --release -p node-bench -- ::trie::read::small --json + | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json; + echo "___Uploading cache for rusty-cachier___"; + ;; + 2) + cargo run --locked --release -p node-bench -- ::node::import::sr25519::transfer_keep_alive::paritydb::small --json + | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::sr25519::transfer_keep_alive::paritydb::small.json + ;; + esac + +node-bench-regression-guard: + # it's not belong to `build` semantically, but dag jobs can't depend on each other + # within the single stage - https://gitlab.com/gitlab-org/gitlab/-/issues/30632 + # more: https://github.com/paritytech/substrate/pull/8519#discussion_r608012402 + stage: build + extends: + - .docker-env + - .common-refs + needs: + # this is a DAG + - job: cargo-check-benches + artifacts: true + # polls artifact from master to compare with current result + # need to specify both parallel jobs from master because of the bug + # https://gitlab.com/gitlab-org/gitlab/-/issues/39063 + - project: $CI_PROJECT_PATH + job: "cargo-check-benches 1/2" + ref: master + artifacts: true + - project: $CI_PROJECT_PATH + job: "cargo-check-benches 2/2" + ref: master + artifacts: true + variables: + CI_IMAGE: "paritytech/node-bench-regression-guard:latest" + before_script: [""] + script: + - echo "------- IMPORTANT -------" + - echo "node-bench-regression-guard depends on the results of a cargo-check-benches job" + - echo "In case of this job failure, check your pipeline's cargo-check-benches" + - "node-bench-regression-guard --reference artifacts/benches/master-* + --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA" + after_script: [""] + +# if this fails (especially after rust version upgrade) run +# ./substrate/.maintain/update-rust-stable.sh +test-frame-support: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: 1 + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + # Ensure we run the UI tests. + RUN_UI_TESTS: 1 + script: + - time cargo test --locked -p frame-support-test --features=frame-feature-testing,no-metadata-docs,try-runtime,experimental --manifest-path ./substrate/frame/support/test/Cargo.toml + - time cargo test --locked -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs,try-runtime,experimental --manifest-path ./substrate/frame/support/test/Cargo.toml + - SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --locked -- --ignored timeout + - cat /cargo_target_dir/debug/.fingerprint/memory_units-759eddf317490d2b/lib-memory_units.json || true + +# This job runs all benchmarks defined in the `/bin/node/runtime` once to check that there are no errors. +quick-benchmarks: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: "full" + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + script: + - time cargo run --locked --release --features runtime-benchmarks -- benchmark pallet --execution wasm --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 + +test-frame-examples-compile-to-wasm: + # into one job + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-C debug-assertions" + RUST_BACKTRACE: 1 + script: + - cd ./substrate/frame/examples/offchain-worker/ + - cargo build --locked --target=wasm32-unknown-unknown --no-default-features + - cd ../basic + - cargo build --locked --target=wasm32-unknown-unknown --no-default-features + # FIXME + allow_failure: true + +test-linux-stable-int: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: 1 + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + # Ensure we run the UI tests. + RUN_UI_TESTS: 1 + script: + - WASM_BUILD_NO_COLOR=1 + RUST_LOG=sync=trace,consensus=trace,client=trace,state-db=trace,db=trace,forks=trace,state_db=trace,storage_cache=trace + time cargo test -p node-cli --release --locked -- --ignored + +# more information about this job can be found here: +# https://github.com/paritytech/substrate/pull/6916 +check-tracing: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + script: + # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases + - time cargo test --locked --manifest-path ./substrate/primitives/tracing/Cargo.toml --no-default-features + - time cargo test --locked --manifest-path ./substrate/primitives/tracing/Cargo.toml --no-default-features --features=with-tracing + +# more information about this job can be found here: +# https://github.com/paritytech/substrate/pull/3778 +test-full-crypto-feature: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-C debug-assertions" + RUST_BACKTRACE: 1 + script: + - cd substrate/primitives/core/ + - time cargo build --locked --verbose --no-default-features --features full_crypto + - cd ../application-crypto + - time cargo build --locked --verbose --no-default-features --features full_crypto + +cargo-check-each-crate: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + # - .collect-artifacts + variables: + # $CI_JOB_NAME is set manually so that rusty-cachier can share the cache for all + # "cargo-check-each-crate I/N" jobs + CI_JOB_NAME: cargo-check-each-crate + timeout: 2h + script: + - PYTHONUNBUFFERED=x time .gitlab/check-each-crate.py "$CI_NODE_INDEX" "$CI_NODE_TOTAL" + parallel: 2 + +# todo: enable me +.cargo-check-each-crate-macos: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + # - .collect-artifacts + before_script: + # skip timestamp script, the osx bash doesn't support printf %()T + - !reference [.job-switcher, before_script] + - !reference [.rust-info-script, script] + - !reference [.pipeline-stopper-vars, script] + variables: + SKIP_WASM_BUILD: 1 + script: + # TODO: enable rusty-cachier once it supports Mac + # TODO: use parallel jobs, as per cargo-check-each-crate, once more Mac runners are available + # - time ./scripts/ci/gitlab/check-each-crate.py 1 1 + - time cargo check --workspace --locked + tags: + - osx + +cargo-hfuzz: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + variables: + # max 10s per iteration, 60s per file + HFUZZ_RUN_ARGS: > + --exit_upon_crash + --exit_code_upon_crash 1 + --timeout 10 + --run_time 60 + # use git version of honggfuzz-rs until v0.5.56 is out, we need a few recent changes: + # https://github.com/rust-fuzz/honggfuzz-rs/pull/75 to avoid breakage on debian + # https://github.com/rust-fuzz/honggfuzz-rs/pull/81 fix to the above pr + # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling rusty-cachier's absolute CARGO_TARGET_DIR + HFUZZ_BUILD_ARGS: > + --config=patch.crates-io.honggfuzz.git="https://github.com/altaua/honggfuzz-rs" + --config=patch.crates-io.honggfuzz.rev="205f7c8c059a0d98fe1cb912cdac84f324cb6981" + artifacts: + name: "hfuzz-$CI_COMMIT_SHORT_SHA" + expire_in: 7 days + when: on_failure + paths: + - substrate/primitives/arithmetic/fuzzer/hfuzz_workspace/ + script: + - cd ./substrate/primitives/arithmetic/fuzzer + - cargo hfuzz build + - for target in $(cargo read-manifest | jq -r '.targets | .[] | .name'); do + cargo hfuzz run "$target" || { printf "fuzzing failure for %s\n" "$target"; exit 1; }; done diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml new file mode 100644 index 000000000000..64210d6a00ab --- /dev/null +++ b/.gitlab/pipeline/zombienet.yml @@ -0,0 +1,7 @@ +include: + # substrate tests + - .gitlab/pipeline/zombienet/substrate.yml + # cumulus tests + - .gitlab/pipeline/zombienet/cumulus.yml + # polkadot tests + - .gitlab/pipeline/zombienet/polkadot.yml diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml new file mode 100644 index 000000000000..ca96828a1a55 --- /dev/null +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -0,0 +1,143 @@ +# This file is part of .gitlab-ci.yml +# Here are all jobs that are executed during "zombienet" stage + +.zombienet-before-script: + before_script: + - echo "Zombie-net Tests Config" + - echo "${ZOMBIENET_IMAGE}" + - echo "${RELAY_IMAGE}" + - echo "${COL_IMAGE}" + - echo "${GH_DIR}" + - echo "${LOCAL_DIR}" + - export DEBUG=zombie + - export RELAY_IMAGE=${POLKADOT_IMAGE} + - export COL_IMAGE=${COL_IMAGE} + +.zombienet-after-script: + after_script: + - mkdir -p ./zombienet-logs + - cp /tmp/zombie*/logs/* ./zombienet-logs/ + +# common settings for all zombienet jobs +.zombienet-cumulus-common: + stage: zombienet + image: "${ZOMBIENET_IMAGE}" + needs: + - job: build-push-image-test-parachain + artifacts: true + variables: + POLKADOT_IMAGE: "docker.io/paritypr/polkadot-debug:master" + GH_DIR: "https://github.com/paritytech/cumulus/tree/${CI_COMMIT_SHORT_SHA}/zombienet/tests" + LOCAL_DIR: "/builds/parity/mirrors/polkadot-sdk/cumulus/zombienet/tests" + COL_IMAGE: "docker.io/paritypr/test-parachain:${DOCKER_IMAGES_VERSION}" + FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR: 1 + artifacts: + name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" + when: always + expire_in: 2 days + paths: + - ./zombienet-logs + allow_failure: true + retry: 2 + tags: + - zombienet-polkadot-integration-test + +zombienet-cumulus-0001-sync_blocks_from_tip_without_connected_collator: + extends: + - .zombienet-cumulus-common + - .zombienet-refs + - .zombienet-before-script + - .zombienet-after-script + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}" + --concurrency=1 + --test="0001-sync_blocks_from_tip_without_connected_collator.zndsl" + +zombienet-cumulus-0002-pov_recovery: + extends: + - .zombienet-cumulus-common + - .zombienet-refs + - .zombienet-before-script + - .zombienet-after-script + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}" + --concurrency=1 + --test="0002-pov_recovery.zndsl" + +zombienet-cumulus-0003-full_node_catching_up: + extends: + - .zombienet-cumulus-common + - .zombienet-refs + - .zombienet-before-script + - .zombienet-after-script + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}" + --concurrency=1 + --test="0003-full_node_catching_up.zndsl" + +zombienet-cumulus-0004-runtime_upgrade: + extends: + - .zombienet-cumulus-common + - .zombienet-refs + - .zombienet-before-script + - .zombienet-after-script + needs: + - !reference [.zombienet-cumulus-common, needs] + - job: build-test-parachain + artifacts: true + before_script: + - ls -ltr * + - cp ./artifacts/zombienet/wasm_binary_spec_version_incremented.rs.compact.compressed.wasm /tmp/ + - ls /tmp + - !reference [.zombienet-before-script, before_script] + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}" + --concurrency=1 + --test="0004-runtime_upgrade.zndsl" + +zombienet-cumulus-0005-migrate_solo_to_para: + extends: + - .zombienet-cumulus-common + - .zombienet-refs + - .zombienet-before-script + - .zombienet-after-script + needs: + - !reference [.zombienet-cumulus-common, needs] + - job: build-test-parachain + artifacts: true + before_script: + - ls -ltr * + - !reference [.zombienet-before-script, before_script] + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}" + --concurrency=1 + --test="0005-migrate_solo_to_para.zndsl" + +zombienet-cumulus-0006-rpc_collator_builds_blocks: + extends: + - .zombienet-cumulus-common + - .zombienet-refs + - .zombienet-before-script + - .zombienet-after-script + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}" + --concurrency=1 + --test="0006-rpc_collator_builds_blocks.zndsl" + +zombienet-cumulus-0007-full_node_warp_sync: + extends: + - .zombienet-cumulus-common + - .zombienet-refs + - .zombienet-before-script + - .zombienet-after-script + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}" + --concurrency=1 + --test="0007-full_node_warp_sync.zndsl" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml new file mode 100644 index 000000000000..82dd13cd290c --- /dev/null +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -0,0 +1,169 @@ +# This file is part of .gitlab-ci.yml +# Here are all jobs that are executed during "zombienet" stage + +# common settings for all zombienet jobs +.zombienet-polkadot-common: + before_script: + - export DEBUG=zombie,zombie::network-node + - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} + - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} + - export MALUS_IMAGE="${MALUS_IMAGE}":${PIPELINE_IMAGE_TAG} + - echo "Zombienet Tests Config" + - echo "gh-dir ${GH_DIR}" + - echo "local-dir ${LOCAL_DIR}" + - echo "polkadot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + - echo "colander image ${COL_IMAGE}" + - echo "malus image ${MALUS_IMAGE}" + stage: zombienet + image: "${ZOMBIENET_IMAGE}" + needs: + - job: build-push-image-malus + artifacts: true + - job: build-push-image-polkadot-debug + artifacts: true + - job: build-push-image-colander + artifacts: true + extends: + - .kubernetes-env + - .zombienet-refs + variables: + PIPELINE_IMAGE_TAG: ${DOCKER_IMAGES_VERSION} + POLKADOT_IMAGE: "docker.io/paritypr/polkadot-debug" + COLANDER_IMAGE: "docker.io/paritypr/colander" + MALUS_IMAGE: "docker.io/paritypr/malus" + GH_DIR: "https://github.com/paritytech/substrate/tree/${CI_COMMIT_SHA}/zombienet" + LOCAL_DIR: "/builds/parity/mirrors/polkadot-sdk/polkadot/zombienet_tests" + FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR: 1 + artifacts: + name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" + when: always + expire_in: 2 days + paths: + - ./zombienet-logs + after_script: + - mkdir -p ./zombienet-logs + - cp /tmp/zombie*/logs/* ./zombienet-logs/ + retry: 2 + tags: + - zombienet-polkadot-integration-test + +zombienet-polkadot-functional-0001-parachains-pvf: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0001-parachains-pvf.zndsl" + +zombienet-polkadot-functional-0002-parachains-disputes: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0002-parachains-disputes.zndsl" + +zombienet-polkadot-functional-0003-parachains-disputes-garbage-candidate: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0003-parachains-garbage-candidate.zndsl" + +zombienet-polkadot-functional-0004-beefy-and-mmr: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0003-beefy-and-mmr.zndsl" + +zombienet-polkadot-smoke-0001-parachains-smoke-test: + extends: + - .zombienet-polkadot-common + before_script: + - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} + - export COL_IMAGE="docker.io/paritypr/colander:4519" # The collator image is fixed + - echo "Zombienet Tests Config" + - echo "gh-dir ${GH_DIR}" + - echo "local-dir ${LOCAL_DIR}" + - echo "polkadot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + - echo "colander image ${COL_IMAGE}" + - echo "malus image ${MALUS_IMAGE}" + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/smoke" + --test="0001-parachains-smoke-test.zndsl" + +zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: + extends: + - .zombienet-polkadot-common + before_script: + - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} + - export COL_IMAGE="docker.io/parity/polkadot-collator:latest" # Use cumulus lastest image + - echo "Zombienet Tests Config" + - echo "gh-dir ${GH_DIR}" + - echo "local-dir ${LOCAL_DIR}" + - echo "polkadot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + - echo "colander image ${COL_IMAGE}" + - echo "malus image ${MALUS_IMAGE}" + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/smoke" + --test="0002-parachains-upgrade-smoke-test.zndsl" + +zombienet-polkadot-smoke-0003-deregister-register-validator: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/smoke" + --test="0003-deregister-register-validator-smoke.zndsl" + +zombienet-polkadot-misc-0001-parachains-paritydb: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/misc" + --test="0001-paritydb.zndsl" + +zombienet-polkadot-misc-0002-upgrade-node: + extends: + - .zombienet-polkadot-common + needs: + - job: build-push-image-malus + artifacts: true + - job: build-push-image-polkadot-debug + artifacts: true + - job: build-push-image-colander + artifacts: true + - job: build-linux-stable + artifacts: true + before_script: + - export ZOMBIENET_INTEGRATION_TEST_IMAGE="docker.io/parity/polkadot:latest" + - echo "Overrided poladot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} + - BUILD_LINUX_JOB_ID="$(cat ./artifacts/BUILD_LINUX_JOB_ID)" + - export POLKADOT_PR_BIN_URL="https://gitlab-stg.parity.io/parity/mirrors/polkadot-sdk/-/jobs/${BUILD_LINUX_JOB_ID}/artifacts/raw/artifacts/polkadot" + - echo "Zombienet Tests Config" + - echo "gh-dir ${GH_DIR}" + - echo "local-dir ${LOCAL_DIR}" + - echo "polkadot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + - echo "colander image ${COL_IMAGE}" + - echo "malus image ${MALUS_IMAGE}" + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/misc" + --test="0002-upgrade-node.zndsl" + +zombienet-polkadot-malus-0001-dispute-valid: + extends: + - .zombienet-polkadot-common + variables: + LOCAL_DIR: "/builds/parity/mirrors/polkadot-sdk/polkadot/node/malus" + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/integrationtests" + --test="0001-dispute-valid-block.zndsl" diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml new file mode 100644 index 000000000000..9a461ca41709 --- /dev/null +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -0,0 +1,70 @@ +# This file is part of .gitlab-ci.yml +# Here are all jobs that are executed during "zombienet" stage + +# common settings for all zombienet jobs +.zombienet-substrate-common: + before_script: + - echo "Zombienet Tests Config" + - echo "${ZOMBIENET_IMAGE}" + - echo "${GH_DIR}" + - echo "${LOCAL_DIR}" + - export DEBUG=zombie,zombie::network-node + - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${SUBSTRATE_IMAGE}":${SUBSTRATE_IMAGE_TAG} + - echo "${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + stage: zombienet + image: "${ZOMBIENET_IMAGE}" + needs: + - job: build-push-image-substrate-pr + extends: + - .kubernetes-env + - .zombienet-refs + variables: + SUBSTRATE_IMAGE_TAG: ${DOCKER_IMAGES_VERSION} + SUBSTRATE_IMAGE: "docker.io/paritypr/substrate" + GH_DIR: "https://github.com/paritytech/substrate/tree/${CI_COMMIT_SHA}/zombienet" + LOCAL_DIR: "/builds/parity/mirrors/polkadot-sdk/substrate/zombienet" + FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR: 1 + artifacts: + name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" + when: always + expire_in: 2 days + paths: + - ./zombienet-logs + after_script: + - mkdir -p ./zombienet-logs + - cp /tmp/zombie*/logs/* ./zombienet-logs/ + retry: 2 + tags: + - zombienet-polkadot-integration-test + +zombienet-substrate-0000-block-building: + extends: + - .zombienet-substrate-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/0000-block-building" + --test="block-building.zndsl" + +zombienet-substrate-0001-basic-warp-sync: + extends: + - .zombienet-substrate-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/0001-basic-warp-sync" + --test="test-warp-sync.zndsl" + +zombienet-substrate-0002-validators-warp-sync: + extends: + - .zombienet-substrate-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/0002-validators-warp-sync" + --test="test-validators-warp-sync.zndsl" + +zombienet-substrate-0003-block-building-warp-sync: + extends: + - .zombienet-substrate-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/0003-block-building-warp-sync" + --test="test-block-building-warp-sync.zndsl" diff --git a/.gitlab/spellcheck.toml b/.gitlab/spellcheck.toml new file mode 100644 index 000000000000..025c7a0a461b --- /dev/null +++ b/.gitlab/spellcheck.toml @@ -0,0 +1,27 @@ +[hunspell] +lang = "en_US" +search_dirs = ["."] +extra_dictionaries = ["lingua.dic"] +skip_os_lookups = true +use_builtin = true + +[hunspell.quirks] +# He tagged it as 'TheGreatestOfAllTimes' +transform_regex = [ +# `Type`'s + "^'([^\\s])'$", +# 5x +# 10.7% + "^[0-9_]+(?:\\.[0-9]*)?(x|%)$", +# Transforms' + "^(.*)'$", +# backslashes + "^\\+$", + "^[0-9]*+k|MB|Mb|ms|Mbit|nd|th|rd$", +# single char `=` `>` `%` .. + "^=|>|<|%$", +# 22_100 + "^(?:[0-9]+_)+[0-9]+$" +] +allow_concatenation = true +allow_dashes = true diff --git a/.gitlab/test_deterministic_wasm.sh b/.gitlab/test_deterministic_wasm.sh new file mode 100755 index 000000000000..5b04013e1df3 --- /dev/null +++ b/.gitlab/test_deterministic_wasm.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +#shellcheck source=../common/lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/common/lib.sh" + +# build runtime +WASM_BUILD_NO_COLOR=1 cargo build --verbose --release -p kusama-runtime -p polkadot-runtime -p westend-runtime +# make checksum +sha256sum target/release/wbuild/*-runtime/target/wasm32-unknown-unknown/release/*.wasm > checksum.sha256 +# clean up - FIXME: can we reuse some of the artifacts? +cargo clean +# build again +WASM_BUILD_NO_COLOR=1 cargo build --verbose --release -p kusama-runtime -p polkadot-runtime -p westend-runtime +# confirm checksum +sha256sum -c checksum.sha256 diff --git a/Cargo.lock b/Cargo.lock index 4b0028a64513..c755be63042b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11612,6 +11612,27 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "polkadot" +version = "1.0.0" +dependencies = [ + "assert_cmd", + "color-eyre", + "nix 0.26.2", + "polkadot-cli", + "polkadot-core-primitives", + "polkadot-node-core-pvf", + "polkadot-node-core-pvf-common", + "polkadot-node-core-pvf-execute-worker", + "polkadot-node-core-pvf-prepare-worker", + "polkadot-overseer", + "substrate-build-script-utils", + "substrate-rpc-client", + "tempfile", + "tikv-jemallocator", + "tokio", +] + [[package]] name = "polkadot-approval-distribution" version = "1.0.0" @@ -18713,6 +18734,16 @@ dependencies = [ "libc", ] +[[package]] +name = "tikv-jemallocator" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "965fe0c26be5c56c94e38ba547249074803efd52adfb66de62107d95aab3eaca" +dependencies = [ + "libc", + "tikv-jemalloc-sys", +] + [[package]] name = "time" version = "0.1.45" diff --git a/Cargo.toml b/Cargo.toml index f28ba6afbc82..fc7587c22b69 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -88,6 +88,7 @@ members = [ "cumulus/test/runtime", "cumulus/test/service", "cumulus/xcm/xcm-emulator", + "polkadot", "polkadot/cli", "polkadot/core-primitives", "polkadot/erasure-coding", diff --git a/docker/collator_injected.Dockerfile b/docker/collator_injected.Dockerfile new file mode 100644 index 000000000000..6472c240f332 --- /dev/null +++ b/docker/collator_injected.Dockerfile @@ -0,0 +1,49 @@ +# this file copies from scripts/ci/dockerfiles/Dockerfile and changes only the binary name +FROM docker.io/library/ubuntu:20.04 + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME + +LABEL io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.title="${IMAGE_NAME}" \ + io.parity.image.description="Injected adder-collator Docker image" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/collator_injected.Dockerfile" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="https://github.com/paritytech/polkadot/" + +# show backtraces +ENV RUST_BACKTRACE 1 + +# install tools and dependencies +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + libssl1.1 \ + ca-certificates && \ + # apt cleanup + apt-get autoremove -y && \ + apt-get clean && \ + find /var/lib/apt/lists/ -type f -not -name lock -delete; \ + # add user and link ~/.local/share/adder-collator to /data + useradd -m -u 1000 -U -s /bin/sh -d /adder-collator adder-collator && \ + mkdir -p /data /adder-collator/.local/share && \ + chown -R adder-collator:adder-collator /data && \ + ln -s /data /adder-collator/.local/share/polkadot + +# add adder-collator binary to docker image +COPY ./artifacts/adder-collator /usr/local/bin +COPY ./artifacts/undying-collator /usr/local/bin + +USER adder-collator + +# check if executable works in this container +RUN /usr/local/bin/adder-collator --version +RUN /usr/local/bin/undying-collator --version + +EXPOSE 30333 9933 9944 +VOLUME ["/adder-collator"] + +ENTRYPOINT ["/usr/local/bin/adder-collator"] diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml new file mode 100644 index 000000000000..8344ad43bb4c --- /dev/null +++ b/docker/docker-compose.yml @@ -0,0 +1,129 @@ +version: '3.7' +services: + node_alice: + image: "polkadot:${BRANCH:-cumulus-branch}" + ports: + - "30333:30333" + - "9933:9933" + - "9944:9944" + volumes: + - "polkadot-data-alice:/data" + - type: bind + source: ./test/parachain/chain-specs/polkadot_chainspec.json + target: /chainspec.json + read_only: true + command: > + polkadot + --chain=/chainspec.json + --base-path=/data + --port 30333 + --rpc-port 9933 + --ws-port 9944 + --rpc-external + --rpc-cors all + --ws-external + --alice + networks: + testing_net: + ipv4_address: 172.28.1.1 + aliases: + - alice + + node_bob: + image: "polkadot:${BRANCH:-cumulus-branch}" + ports: + - "30344:30333" + - "9935:9933" + - "9945:9944" + volumes: + - "polkadot-data-bob:/data" + - type: bind + source: ./test/parachain/chain-specs/polkadot_chainspec.json + target: /chainspec.json + read_only: true + command: > + polkadot + --chain=/chainspec.json + --base-path=/data + --port 30333 + --rpc-port 9933 + --ws-port 9944 + --rpc-external + --ws-external + --rpc-cors all + --bob + networks: + testing_net: + ipv4_address: 172.28.1.2 + aliases: + - bob + + genesis_state: + build: + context: . + dockerfile: ./docker/test-parachain-collator.dockerfile + image: "ctpc:latest" + volumes: + - "genesis-state:/data" + command: > + polkadot-parachain + export-genesis-state + /data/genesis-state + + collator: + build: + context: . + dockerfile: ./docker/test-parachain-collator.dockerfile + target: collator + image: "ctpc:collator" + volumes: + - "collator-data:/data" + depends_on: + - node_alice + - node_bob + command: > + inject_bootnodes.sh + --base-path=/data + networks: + testing_net: + + runtime: + build: + context: . + dockerfile: ./docker/test-parachain-collator.dockerfile + target: runtime + image: "ctpc:runtime" + volumes: + - "parachain-runtime:/runtime" + + + registrar: + build: + context: . + dockerfile: ./docker/parachain-registrar.dockerfile + image: para-reg:latest + volumes: + - "genesis-state:/genesis" + - "parachain-runtime:/runtime" + depends_on: + - node_alice + - runtime + - genesis_state + networks: + testing_net: + + +volumes: + polkadot-data-alice: + polkadot-data-bob: + collator-data: + genesis-state: + parachain-runtime: + + +networks: + testing_net: + ipam: + driver: default + config: + - subnet: 172.28.0.0/16 diff --git a/docker/injected.Dockerfile b/docker/injected.Dockerfile new file mode 100644 index 000000000000..93d0561ca877 --- /dev/null +++ b/docker/injected.Dockerfile @@ -0,0 +1,51 @@ +FROM docker.io/library/ubuntu:20.04 + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME + +LABEL io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.title="${IMAGE_NAME}" \ + io.parity.image.description="Cumulus, the Polkadot collator." \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/docker/Dockerfile" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="https://github.com/paritytech/cumulus/" + +# show backtraces +ENV RUST_BACKTRACE 1 + +# install tools and dependencies +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + libssl1.1 \ + ca-certificates \ + curl && \ +# apt cleanup + apt-get autoremove -y && \ + apt-get clean && \ + find /var/lib/apt/lists/ -type f -not -name lock -delete; \ +# add user and link ~/.local/share/polkadot to /data + useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \ + mkdir -p /data /polkadot/.local/share && \ + chown -R polkadot:polkadot /data && \ + ln -s /data /polkadot/.local/share/polkadot && \ + mkdir -p /specs + +# add polkadot-parachain binary to the docker image +COPY ./target/release/polkadot-parachain /usr/local/bin +COPY ./target/release/polkadot-parachain.asc /usr/local/bin +COPY ./target/release/polkadot-parachain.sha256 /usr/local/bin +COPY ./parachains/chain-specs/*.json /specs/ + +USER polkadot + +# check if executable works in this container +RUN /usr/local/bin/polkadot-parachain --version + +EXPOSE 30333 9933 9944 +VOLUME ["/polkadot"] + +ENTRYPOINT ["/usr/local/bin/polkadot-parachain"] diff --git a/docker/malus_injected.Dockerfile b/docker/malus_injected.Dockerfile new file mode 100644 index 000000000000..ecffd2c4f9b4 --- /dev/null +++ b/docker/malus_injected.Dockerfile @@ -0,0 +1,50 @@ +FROM debian:bullseye-slim + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME + +LABEL io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.title="${IMAGE_NAME}" \ + io.parity.image.description="Malus - the nemesis of polkadot" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/malus.Dockerfile" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="https://github.com/paritytech/polkadot/" + +# show backtraces +ENV RUST_BACKTRACE 1 + +# install tools and dependencies +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + ca-certificates \ + curl \ + libssl1.1 \ + tini && \ +# apt cleanup + apt-get autoremove -y && \ + apt-get clean && \ + find /var/lib/apt/lists/ -type f -not -name lock -delete; \ +# add user + groupadd --gid 10000 nonroot && \ + useradd --home-dir /home/nonroot \ + --create-home \ + --shell /bin/bash \ + --gid nonroot \ + --groups nonroot \ + --uid 10000 nonroot + + +# add adder-collator binary to docker image +COPY ./artifacts/malus /usr/local/bin + +USER nonroot + +# check if executable works in this container +RUN /usr/local/bin/malus --version + +# Tini allows us to avoid several Docker edge cases, see https://github.com/krallin/tini. +ENTRYPOINT ["tini", "--", "/bin/bash"] diff --git a/docker/parachain-registrar.dockerfile b/docker/parachain-registrar.dockerfile new file mode 100644 index 000000000000..f7d77454a2b9 --- /dev/null +++ b/docker/parachain-registrar.dockerfile @@ -0,0 +1,27 @@ +FROM node:latest AS pjs + +# It would be great to depend on a more stable tag, but we need some +# as-yet-unreleased features. +RUN yarn global add @polkadot/api-cli@0.10.0-beta.14 + +ENTRYPOINT [ "polkadot-js-api" ] +CMD [ "--version" ] + +# To use the pjs build stage to access the blockchain from the host machine: +# +# docker build -f docker/parachain-registrar.dockerfile --target pjs -t parachain-registrar:pjs . +# alias pjs='docker run --rm --net cumulus_testing_net parachain-registrar:pjs --ws ws://172.28.1.1:9944' +# +# Then, as long as the chain is running, you can use the polkadot-js-api CLI like: +# +# pjs query.sudo.key + +FROM pjs +RUN apt-get update && apt-get install curl netcat -y && \ + curl -sSo /wait-for-it.sh https://raw.githubusercontent.com/vishnubob/wait-for-it/master/wait-for-it.sh && \ + chmod +x /wait-for-it.sh +# the only thing left to do is to actually run the transaction. +COPY ./docker/scripts/register_para.sh /usr/bin +# unset the previous stage's entrypoint +ENTRYPOINT [] +CMD [ "/usr/bin/register_para.sh" ] diff --git a/docker/polkadot-parachain-debug_unsigned_injected.Dockerfile b/docker/polkadot-parachain-debug_unsigned_injected.Dockerfile new file mode 100644 index 000000000000..e77563b8ebf2 --- /dev/null +++ b/docker/polkadot-parachain-debug_unsigned_injected.Dockerfile @@ -0,0 +1,49 @@ +FROM docker.io/library/ubuntu:20.04 + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME + +LABEL io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.title="${IMAGE_NAME}" \ + io.parity.image.description="Cumulus, the Polkadot collator." \ + io.parity.image.source="https://github.com/paritytech/cumulus/blob/${VCS_REF}/scripts/docker/polkadot-parachain-debug_unsigned_injected.Dockerfile" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="https://github.com/paritytech/cumulus/" + +# show backtraces +ENV RUST_BACKTRACE 1 + +# install tools and dependencies +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + libssl1.1 \ + ca-certificates \ + curl && \ + # apt cleanup + apt-get autoremove -y && \ + apt-get clean && \ + find /var/lib/apt/lists/ -type f -not -name lock -delete; \ + # add user and link ~/.local/share/polkadot-parachain to /data + useradd -m -u 10000 -U -s /bin/sh -d /polkadot-parachain polkadot-parachain && \ + mkdir -p /data /polkadot-parachain/.local/share && \ + chown -R polkadot-parachain:polkadot-parachain /data && \ + ln -s /data /polkadot-parachain/.local/share/polkadot-parachain && \ + mkdir -p /specs + +# add polkadot-parachain binary to the docker image +COPY ./artifacts/polkadot-parachain /usr/local/bin +COPY ./cumulus/parachains/chain-specs/*.json /specs/ + +USER polkadot-parachain + +# check if executable works in this container +RUN /usr/local/bin/polkadot-parachain --version + +EXPOSE 30333 9933 9944 +VOLUME ["/polkadot-parachain"] + +ENTRYPOINT ["/usr/local/bin/polkadot-parachain"] diff --git a/docker/polkadot-parachain_builder.Containerfile b/docker/polkadot-parachain_builder.Containerfile new file mode 100644 index 000000000000..159bcb323693 --- /dev/null +++ b/docker/polkadot-parachain_builder.Containerfile @@ -0,0 +1,36 @@ +# This file is sourced from https://github.com/paritytech/polkadot/blob/master/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile +# This is the build stage for polkadot-parachain. Here we create the binary in a temporary image. +FROM docker.io/paritytech/ci-linux:production as builder + +WORKDIR /cumulus +COPY . /cumulus + +RUN cargo build --release --locked -p polkadot-parachain + +# This is the 2nd stage: a very small image where we copy the Polkadot binary." +FROM docker.io/library/ubuntu:20.04 + +LABEL io.parity.image.type="builder" \ + io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.description="Multistage Docker image for polkadot-parachain" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot/polkadot-parachain_builder.Dockerfile" \ + io.parity.image.documentation="https://github.com/paritytech/cumulus" + +COPY --from=builder /cumulus/target/release/polkadot-parachain /usr/local/bin + +RUN useradd -m -u 1000 -U -s /bin/sh -d /cumulus polkadot-parachain && \ + mkdir -p /data /cumulus/.local/share && \ + chown -R polkadot-parachain:polkadot-parachain /data && \ + ln -s /data /cumulus/.local/share/polkadot-parachain && \ +# unclutter and minimize the attack surface + rm -rf /usr/bin /usr/sbin && \ +# check if executable works in this container + /usr/local/bin/polkadot-parachain --version + +USER polkadot-parachain + +EXPOSE 30333 9933 9944 9615 +VOLUME ["/data"] + +ENTRYPOINT ["/usr/local/bin/polkadot-parachain"] diff --git a/docker/polkadot_injected_debug.Dockerfile b/docker/polkadot_injected_debug.Dockerfile new file mode 100644 index 000000000000..3dd62f7ba56f --- /dev/null +++ b/docker/polkadot_injected_debug.Dockerfile @@ -0,0 +1,46 @@ +FROM docker.io/library/ubuntu:20.04 + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME + +LABEL io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.title="${IMAGE_NAME}" \ + io.parity.image.description="Polkadot: a platform for web3" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="https://github.com/paritytech/polkadot/" + +# show backtraces +ENV RUST_BACKTRACE 1 + +# install tools and dependencies +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + libssl1.1 \ + ca-certificates && \ + # apt cleanup + apt-get autoremove -y && \ + apt-get clean && \ + find /var/lib/apt/lists/ -type f -not -name lock -delete; \ + # add user and link ~/.local/share/polkadot to /data + useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \ + mkdir -p /data /polkadot/.local/share && \ + chown -R polkadot:polkadot /data && \ + ln -s /data /polkadot/.local/share/polkadot + +# add polkadot binary to docker image +COPY ./artifacts/polkadot /usr/local/bin + +USER polkadot + +# check if executable works in this container +RUN /usr/local/bin/polkadot --version + +EXPOSE 30333 9933 9944 +VOLUME ["/polkadot"] + +ENTRYPOINT ["/usr/local/bin/polkadot"] diff --git a/docker/polkadot_injected_release.Dockerfile b/docker/polkadot_injected_release.Dockerfile new file mode 100644 index 000000000000..ba0a79e78187 --- /dev/null +++ b/docker/polkadot_injected_release.Dockerfile @@ -0,0 +1,51 @@ +FROM docker.io/library/ubuntu:20.04 + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG POLKADOT_VERSION +ARG POLKADOT_GPGKEY=9D4B2B6EB8F97156D19669A9FF0812D491B96798 +ARG GPG_KEYSERVER="keyserver.ubuntu.com" + +LABEL io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.title="parity/polkadot" \ + io.parity.image.description="Polkadot: a platform for web3. This is the official Parity image with an injected binary." \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="https://github.com/paritytech/polkadot/" + +# show backtraces +ENV RUST_BACKTRACE 1 + +# install tools and dependencies +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + libssl1.1 \ + ca-certificates \ + gnupg && \ + useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \ +# add repo's gpg keys and install the published polkadot binary + gpg --keyserver ${GPG_KEYSERVER} --recv-keys ${POLKADOT_GPGKEY} && \ + gpg --export ${POLKADOT_GPGKEY} > /usr/share/keyrings/parity.gpg && \ + echo 'deb [signed-by=/usr/share/keyrings/parity.gpg] https://releases.parity.io/deb release main' > /etc/apt/sources.list.d/parity.list && \ + apt-get update && \ + apt-get install -y --no-install-recommends polkadot=${POLKADOT_VERSION#?} && \ +# apt cleanup + apt-get autoremove -y && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* ; \ + mkdir -p /data /polkadot/.local/share && \ + chown -R polkadot:polkadot /data && \ + ln -s /data /polkadot/.local/share/polkadot + +USER polkadot + +# check if executable works in this container +RUN /usr/bin/polkadot --version + +EXPOSE 30333 9933 9944 +VOLUME ["/polkadot"] + +ENTRYPOINT ["/usr/bin/polkadot"] diff --git a/docker/staking-miner/staking-miner_builder.Dockerfile b/docker/staking-miner/staking-miner_builder.Dockerfile new file mode 100644 index 000000000000..a1932095fd4c --- /dev/null +++ b/docker/staking-miner/staking-miner_builder.Dockerfile @@ -0,0 +1,46 @@ +FROM paritytech/ci-linux:production as builder + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME="staking-miner" +ARG PROFILE=release + +LABEL description="This is the build stage. Here we create the binary." + +WORKDIR /app +COPY . /app +RUN cargo build --locked --$PROFILE --package staking-miner + +# ===== SECOND STAGE ====== + +FROM docker.io/library/ubuntu:20.04 +LABEL description="This is the 2nd stage: a very small image where we copy the binary." +LABEL io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.title="${IMAGE_NAME}" \ + io.parity.image.description="${IMAGE_NAME} for substrate based chains" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/${IMAGE_NAME}/${IMAGE_NAME}_builder.Dockerfile" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="https://github.com/paritytech/polkadot/" + +ARG PROFILE=release +COPY --from=builder /app/target/$PROFILE/staking-miner /usr/local/bin + +RUN useradd -u 1000 -U -s /bin/sh miner && \ + rm -rf /usr/bin /usr/sbin + +# show backtraces +ENV RUST_BACKTRACE 1 + +USER miner + +ENV SEED="" +ENV URI="wss://rpc.polkadot.io" +ENV RUST_LOG="info" + +# check if the binary works in this container +RUN /usr/local/bin/staking-miner --version + +ENTRYPOINT [ "/usr/local/bin/staking-miner" ] diff --git a/docker/staking-miner/staking-miner_injected.Dockerfile b/docker/staking-miner/staking-miner_injected.Dockerfile new file mode 100644 index 000000000000..4901ab4a3736 --- /dev/null +++ b/docker/staking-miner/staking-miner_injected.Dockerfile @@ -0,0 +1,43 @@ +FROM docker.io/library/ubuntu:20.04 + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME="staking-miner" + +LABEL io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.title="${IMAGE_NAME}" \ + io.parity.image.description="${IMAGE_NAME} for substrate based chains" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/${IMAGE_NAME}/${IMAGE_NAME}_injected.Dockerfile" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="https://github.com/paritytech/polkadot/" + +# show backtraces +ENV RUST_BACKTRACE 1 + +# install tools and dependencies +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + libssl1.1 \ + ca-certificates && \ +# apt cleanup + apt-get autoremove -y && \ + apt-get clean && \ + find /var/lib/apt/lists/ -type f -not -name lock -delete; \ + useradd -u 1000 -U -s /bin/sh miner + +# add binary to docker image +COPY ./staking-miner /usr/local/bin + +USER miner + +ENV SEED="" +ENV URI="wss://rpc.polkadot.io" +ENV RUST_LOG="info" + +# check if the binary works in this container +RUN /usr/local/bin/staking-miner --version + +ENTRYPOINT [ "/usr/local/bin/staking-miner" ] diff --git a/docker/substrate_injected.Dockerfile b/docker/substrate_injected.Dockerfile new file mode 100644 index 000000000000..2d825f5c6bb3 --- /dev/null +++ b/docker/substrate_injected.Dockerfile @@ -0,0 +1,45 @@ +FROM docker.io/library/ubuntu:20.04 + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME + +LABEL io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.title="${IMAGE_NAME}" \ + io.parity.image.description="Substrate: The platform for blockchain innovators." \ + io.parity.image.source="https://github.com/paritytech/substrate/blob/${VCS_REF}/scripts/ci/docker/Dockerfile" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="https://wiki.parity.io/Parity-Substrate" + +# show backtraces +ENV RUST_BACKTRACE 1 + +# install tools and dependencies +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + libssl1.1 \ + ca-certificates \ + curl && \ +# apt cleanup + apt-get autoremove -y && \ + apt-get clean && \ + find /var/lib/apt/lists/ -type f -not -name lock -delete; \ +# add user + useradd -m -u 1000 -U -s /bin/sh -d /substrate substrate + +# add substrate binary to docker image +COPY ./artifacts/substrate /usr/local/bin + +USER substrate + +# check if executable works in this container +RUN /usr/local/bin/substrate --version + +EXPOSE 30333 9933 9944 +VOLUME ["/substrate"] + +ENTRYPOINT ["/usr/local/bin/substrate"] diff --git a/docker/test-parachain-collator.dockerfile b/docker/test-parachain-collator.dockerfile new file mode 100644 index 000000000000..9c2d8fbe5818 --- /dev/null +++ b/docker/test-parachain-collator.dockerfile @@ -0,0 +1,46 @@ +# This file is sourced from https://github.com/paritytech/polkadot/blob/master/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile +FROM docker.io/paritytech/ci-linux:production as builder + +WORKDIR /cumulus +COPY . /cumulus + +RUN cargo build --release --locked -p polkadot-parachain + +# the collator stage is normally built once, cached, and then ignored, but can +# be specified with the --target build flag. This adds some extra tooling to the +# image, which is required for a launcher script. The script simply adds two +# arguments to the list passed in: +# +# --bootnodes /ip4/127.0.0.1/tcp/30333/p2p/PEER_ID +# +# with the appropriate ip and ID for both Alice and Bob +FROM debian:buster-slim as collator +RUN apt-get update && apt-get install jq curl bash -y && \ + curl -sSo /wait-for-it.sh https://raw.githubusercontent.com/vishnubob/wait-for-it/master/wait-for-it.sh && \ + chmod +x /wait-for-it.sh && \ + curl -sL https://deb.nodesource.com/setup_12.x | bash - && \ + apt-get install -y nodejs && \ + npm install --global yarn && \ + yarn global add @polkadot/api-cli@0.10.0-beta.14 +COPY --from=builder \ + /paritytech/cumulus/target/release/polkadot-parachain /usr/bin +COPY ./docker/scripts/inject_bootnodes.sh /usr/bin +CMD ["/usr/bin/inject_bootnodes.sh"] +COPY ./docker/scripts/healthcheck.sh /usr/bin/ +HEALTHCHECK --interval=300s --timeout=75s --start-period=30s --retries=3 \ + CMD ["/usr/bin/healthcheck.sh"] + +# the runtime stage is normally built once, cached, and ignored, but can be +# specified with the --target build flag. This just preserves one of the builder's +# outputs, which can then be moved into a volume at runtime +FROM debian:buster-slim as runtime +COPY --from=builder \ + /paritytech/cumulus/target/release/wbuild/cumulus-test-parachain-runtime/cumulus_test_parachain_runtime.compact.wasm \ + /var/opt/ +CMD ["cp", "-v", "/var/opt/cumulus_test_parachain_runtime.compact.wasm", "/runtime/"] + +FROM debian:buster-slim +COPY --from=builder \ + /paritytech/cumulus/target/release/polkadot-parachain /usr/bin + +CMD ["/usr/bin/polkadot-parachain"] diff --git a/docker/test-parachain_injected.Dockerfile b/docker/test-parachain_injected.Dockerfile new file mode 100644 index 000000000000..0b345e16e4af --- /dev/null +++ b/docker/test-parachain_injected.Dockerfile @@ -0,0 +1,49 @@ +FROM docker.io/library/ubuntu:20.04 + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME + +LABEL io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.title="${IMAGE_NAME}" \ + io.parity.image.description="Test parachain for Zombienet" \ + io.parity.image.source="https://github.com/paritytech/cumulus/blob/${VCS_REF}/docker/test-parachain_injected.Dockerfile" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="https://github.com/paritytech/cumulus/" + +# show backtraces +ENV RUST_BACKTRACE 1 + +# install tools and dependencies +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + libssl1.1 \ + ca-certificates \ + curl && \ + # apt cleanup + apt-get autoremove -y && \ + apt-get clean && \ + find /var/lib/apt/lists/ -type f -not -name lock -delete; \ + # add user and link ~/.local/share/test-parachain to /data + useradd -m -u 10000 -U -s /bin/sh -d /test-parachain test-parachain && \ + mkdir -p /data /test-parachain/.local/share && \ + chown -R test-parachain:test-parachain /data && \ + ln -s /data /test-parachain/.local/share/test-parachain && \ + mkdir -p /specs + +# add test-parachain binary to the docker image +COPY ./artifacts/test-parachain /usr/local/bin +COPY ./cumulus/parachains/chain-specs/*.json /specs/ + +USER test-parachain + +# check if executable works in this container +RUN /usr/local/bin/test-parachain --version + +EXPOSE 30333 9933 9944 +VOLUME ["/test-parachain"] + +ENTRYPOINT ["/usr/local/bin/test-parachain"] diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml new file mode 100644 index 000000000000..69d7ce724f95 --- /dev/null +++ b/polkadot/Cargo.toml @@ -0,0 +1,171 @@ +[[bin]] +name = "polkadot" +path = "src/main.rs" + +[[bin]] +name = "polkadot-execute-worker" +path = "src/bin/execute-worker.rs" + +[[bin]] +name = "polkadot-prepare-worker" +path = "src/bin/prepare-worker.rs" + +[package] +name = "polkadot" +description = "Implementation of a `https://polkadot.network` node in Rust based on the Substrate framework." +license = "GPL-3.0-only" +rust-version = "1.64.0" # workspace properties +readme = "README.md" +authors.workspace = true +edition.workspace = true +version.workspace = true + +[dependencies] +color-eyre = { version = "0.6.1", default-features = false } +tikv-jemallocator = "0.5.0" + +# Crates in our workspace, defined as dependencies so we can pass them feature flags. +polkadot-cli = { path = "cli", features = [ + "kusama-native", + "westend-native", + "rococo-native", +] } +polkadot-node-core-pvf = { path = "node/core/pvf" } +polkadot-node-core-pvf-prepare-worker = { path = "node/core/pvf/prepare-worker" } +polkadot-overseer = { path = "node/overseer" } + +# Needed for worker binaries. +polkadot-node-core-pvf-common = { path = "node/core/pvf/common" } +polkadot-node-core-pvf-execute-worker = { path = "node/core/pvf/execute-worker" } + +[dev-dependencies] +assert_cmd = "2.0.4" +nix = { version = "0.26.1", features = ["signal"] } +tempfile = "3.2.0" +tokio = "1.24.2" +substrate-rpc-client = { path = "../substrate/utils/frame/rpc/client/" } +polkadot-core-primitives = { path = "core-primitives" } + +[build-dependencies] +substrate-build-script-utils = { path = "../substrate/utils/build-script-utils" } + + +[badges] +maintenance = { status = "actively-developed" } + +# The list of dependencies below (which can be both direct and indirect dependencies) are crates +# that are suspected to be CPU-intensive, and that are unlikely to require debugging (as some of +# their debug info might be missing) or to require to be frequently recompiled. We compile these +# dependencies with `opt-level=3` even in "dev" mode in order to make "dev" mode more usable. +# The majority of these crates are cryptographic libraries. +# +# If you see an error mentioning "profile package spec ... did not match any packages", it +# probably concerns this list. +# +# This list is ordered alphabetically. +[profile.dev.package] +blake2 = { opt-level = 3 } +blake2b_simd = { opt-level = 3 } +chacha20poly1305 = { opt-level = 3 } +cranelift-codegen = { opt-level = 3 } +cranelift-wasm = { opt-level = 3 } +crc32fast = { opt-level = 3 } +crossbeam-deque = { opt-level = 3 } +crypto-mac = { opt-level = 3 } +curve25519-dalek = { opt-level = 3 } +ed25519-dalek = { opt-level = 3 } +flate2 = { opt-level = 3 } +futures-channel = { opt-level = 3 } +hash-db = { opt-level = 3 } +hashbrown = { opt-level = 3 } +hmac = { opt-level = 3 } +httparse = { opt-level = 3 } +integer-sqrt = { opt-level = 3 } +keccak = { opt-level = 3 } +libm = { opt-level = 3 } +librocksdb-sys = { opt-level = 3 } +libsecp256k1 = { opt-level = 3 } +libz-sys = { opt-level = 3 } +mio = { opt-level = 3 } +nalgebra = { opt-level = 3 } +num-bigint = { opt-level = 3 } +parking_lot = { opt-level = 3 } +parking_lot_core = { opt-level = 3 } +percent-encoding = { opt-level = 3 } +primitive-types = { opt-level = 3 } +reed-solomon-novelpoly = { opt-level = 3 } +ring = { opt-level = 3 } +rustls = { opt-level = 3 } +sha2 = { opt-level = 3 } +sha3 = { opt-level = 3 } +smallvec = { opt-level = 3 } +snow = { opt-level = 3 } +substrate-bip39 = { opt-level = 3 } +twox-hash = { opt-level = 3 } +uint = { opt-level = 3 } +wasmi = { opt-level = 3 } +x25519-dalek = { opt-level = 3 } +yamux = { opt-level = 3 } +zeroize = { opt-level = 3 } + +[profile.release] +# Polkadot runtime requires unwinding. +panic = "unwind" +opt-level = 3 + +# make sure dev builds with backtrace do +# not slow us down +[profile.dev.package.backtrace] +inherits = "release" + +[profile.production] +inherits = "release" +lto = true +codegen-units = 1 + +[profile.testnet] +inherits = "release" +debug = 1 # debug symbols are useful for profilers +debug-assertions = true +overflow-checks = true + +[features] +runtime-benchmarks = ["polkadot-cli/runtime-benchmarks"] +try-runtime = ["polkadot-cli/try-runtime"] +fast-runtime = ["polkadot-cli/fast-runtime"] +runtime-metrics = ["polkadot-cli/runtime-metrics"] +pyroscope = ["polkadot-cli/pyroscope"] +jemalloc-allocator = [ + "polkadot-node-core-pvf-prepare-worker/jemalloc-allocator", + "polkadot-overseer/jemalloc-allocator", +] + +# Enables timeout-based tests supposed to be run only in CI environment as they may be flaky +# when run locally depending on system load +ci-only-tests = ["polkadot-node-core-pvf/ci-only-tests"] + +# Configuration for building a .deb package - for use with `cargo-deb` +[package.metadata.deb] +name = "polkadot" +extended-description = "Implementation of a https://polkadot.network node in Rust based on the Substrate framework." +section = "misc" +maintainer = "security@parity.io" +license-file = ["LICENSE", "0"] +# https://www.debian.org/doc/debian-policy/ch-maintainerscripts.html +maintainer-scripts = "scripts/packaging/deb-maintainer-scripts" +assets = [ + [ + "target/release/polkadot", + "/usr/bin/", + "755", + ], + [ + "scripts/packaging/polkadot.service", + "/lib/systemd/system/", + "644", + ], +] +conf-files = ["/etc/default/polkadot"] + +[package.metadata.spellcheck] +config = "./scripts/ci/gitlab/spellcheck.toml" diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000000..10568e496801 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,27 @@ +# Basic +hard_tabs = true +max_width = 100 +use_small_heuristics = "Max" +# Imports +imports_granularity = "Crate" +reorder_imports = true +# Consistency +newline_style = "Unix" +# Misc +chain_width = 80 +spaces_around_ranges = false +binop_separator = "Back" +reorder_impl_items = false +match_arm_leading_pipes = "Preserve" +match_arm_blocks = false +match_block_trailing_comma = true +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true +ignore = [ + "bridges", +] +edition = "2021" +# Format comments +comment_width = 100 +wrap_comments = true \ No newline at end of file