diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 21f45ccfd59..a6f62e17848 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,6 +1,20 @@ ## Description +## How Has This Been Tested? + + +## Key Areas to Review + + ## Type of Change - [ ] New feature - [ ] Bug fix @@ -18,22 +32,9 @@ - [ ] Aptos Framework - [ ] Aptos CLI/SDK - [ ] Developer Infrastructure +- [ ] Move Compiler - [ ] Other (specify) -## How Has This Been Tested? - - -## Key Areas to Review - - ## Checklist - [ ] I have read and followed the [CONTRIBUTING](https://github.com/aptos-labs/aptos-core/blob/main/CONTRIBUTING.md) doc - [ ] I have performed a self-review of my own code diff --git a/.github/actions/determine-or-use-target-branch-and-get-last-released-image/action.yaml b/.github/actions/determine-or-use-target-branch-and-get-last-released-image/action.yaml new file mode 100644 index 00000000000..1c3cf1b3a12 --- /dev/null +++ b/.github/actions/determine-or-use-target-branch-and-get-last-released-image/action.yaml @@ -0,0 +1,88 @@ +name: "Determine or Use Target Branch and Get Latest Docker Image" +description: | + Determine the branch to fetch the latest docker image tag from, or use a target branch directly. + - If base-branch is set, determine the target branch to fetch the latest docker image tag from + - Determine the IMAGE_TAG based on the latest release R and check for images built on branch R - 1: + - For commits on the aptos-release-v1.19 branch, the IMAGE_TAG should be the latest commit built on aptos-release-v1.18 + - For commits on the main branch, the IMAGE_TAG should be the latest commit on the max release aptos-release-v branch + - For commits on other branches, the IMAGE_TAG should be the latest commit on the branch's last release aptos-release-v branch + - If branch is set, use it directly + +inputs: + base-branch: + description: "The base branch to determine the target from, or use a target branch directly" + required: false + branch: + description: "Use this branch directly if set" + required: false + variants: + description: "The variants to check, as a space-separated string, e.g. 'performance failpoints'" + required: false + +outputs: + TARGET_BRANCH: + description: "The determined or target target branch" + value: ${{ steps.set-target-branch.outputs.TARGET_BRANCH }} + IMAGE_TAG: + description: "The latest docker image tag for the given branch and variants" + value: ${{ steps.determine-test-image-tag.outputs.IMAGE_TAG }} + +runs: + using: composite + steps: + # Checkout repository based on base branch or target branch + - name: Checkout branch + uses: actions/checkout@v4 + with: + ref: ${{ inputs.base-branch || inputs.branch }} + path: checkout_branch + fetch-depth: 0 + + - name: Setup Python environment + uses: ./checkout_branch/.github/actions/python-setup + with: + pyproject_directory: checkout_branch/testsuite + + # Determine the target branch if base-branch is used + - name: Set target branch + id: set-target-branch + run: | + if [[ -n "${{ inputs.base-branch }}" ]]; then + base_branch="${{ inputs.base-branch }}" + echo "Determining target branch from base branch: $base_branch" + ./testrun determine_target_branch_to_fetch_last_released_image.py "$base_branch" + else + echo "Using target branch: ${{ inputs.branch }}" + echo "TARGET_BRANCH=${{ inputs.branch }}" >> $GITHUB_OUTPUT + fi + shell: bash + working-directory: checkout_branch/testsuite + + # Checkout the determined or target branch + - name: Checkout target branch + if: ${{ steps.set-target-branch.outputs.TARGET_BRANCH != inputs.branch }} + uses: actions/checkout@v4 + with: + ref: ${{ steps.set-target-branch.outputs.TARGET_BRANCH }} + path: checkout_branch + fetch-depth: 0 + + # Setup Python environment again after second checkout, as branches are different + - name: Setup Python environment again + if: ${{ steps.set-target-branch.outputs.TARGET_BRANCH != inputs.branch }} + uses: ./checkout_branch/.github/actions/python-setup + with: + pyproject_directory: checkout_branch/testsuite + + # Determine image tag using the target branch + - name: Determine image tag + id: determine-test-image-tag + run: | + variants=(${{ inputs.variants }}) # split the variants string into an array + variants_args=() + for variant in "${variants[@]}"; do + variants_args+=("--variant" "$variant") + done + ./testrun find_latest_image.py "${variants_args[@]}" + shell: bash + working-directory: checkout_branch/testsuite diff --git a/.github/actions/general-lints/action.yaml b/.github/actions/general-lints/action.yaml index aa01b32c9d5..ac9bcfd334a 100644 --- a/.github/actions/general-lints/action.yaml +++ b/.github/actions/general-lints/action.yaml @@ -16,27 +16,3 @@ runs: sudo apt-get install shellcheck --assume-yes --no-install-recommends shellcheck scripts/dev_setup.sh shell: bash - - # Run the python lints and tests - - name: Run python lints and tests - uses: ./.github/actions/python-lint-tests - - # Setup node - - name: Setup node - uses: actions/setup-node@v3 - with: - node-version-file: .node-version - - # Setup pnpm - - name: Setup pnpm - uses: pnpm/action-setup@v4 - - # Install packages for examples and run package build, lint and tests - - name: Run ecosystem lint - run: | - cd ./ecosystem/typescript/sdk/examples/typescript && pnpm install && cd - - cd ./ecosystem/typescript/sdk/examples/javascript && pnpm install && cd - - cd ./ecosystem/typescript/sdk && pnpm install && cd - - cd ./ecosystem/typescript/sdk && pnpm lint && cd - - cd ./ecosystem/typescript/sdk && pnpm fmt:check && cd - - shell: bash diff --git a/.github/actions/get-latest-docker-image-tag/action.yml b/.github/actions/get-latest-docker-image-tag/action.yml index f0c7940529f..f42a7062f50 100644 --- a/.github/actions/get-latest-docker-image-tag/action.yml +++ b/.github/actions/get-latest-docker-image-tag/action.yml @@ -18,7 +18,7 @@ outputs: runs: using: composite steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ inputs.branch }} path: checkout_branch diff --git a/.github/actions/move-prover-setup/action.yaml b/.github/actions/move-prover-setup/action.yaml index 233347965a3..28171d6e783 100644 --- a/.github/actions/move-prover-setup/action.yaml +++ b/.github/actions/move-prover-setup/action.yaml @@ -14,7 +14,7 @@ runs: # rust-cache action will cache ~/.cargo and ./target # https://github.com/Swatinem/rust-cache#cache-details - name: Run cargo cache - uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # pin@v2.2.0 + uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # pin@v2.7.3 - name: install related tools and prover dependencies shell: bash diff --git a/.github/actions/python-lint-tests/action.yaml b/.github/actions/python-lint-tests/action.yaml deleted file mode 100644 index 6d9b3f9e821..00000000000 --- a/.github/actions/python-lint-tests/action.yaml +++ /dev/null @@ -1,43 +0,0 @@ -name: Run Python Tests -description: Runs all Python tests -inputs: - GIT_SHA: - description: "Optional git sha to checkout" - required: false - -runs: - using: composite - steps: - # The source code must be checkout out by the workflow that invokes this action. - - - name: Get changed files - id: changed-files - uses: tj-actions/changed-files@v42 - - - uses: ./.github/actions/python-setup - with: - pyproject_directory: testsuite - - - name: Should run tests - run: ./testrun determinator.py changed-files --github-output-key SHOULD_RUN --pattern 'testsuite/.*py' ${{steps.changed-files.outputs.all_changed_files }} - id: should-run-tests - working-directory: testsuite - shell: bash - - - name: Run python static type checker - if: steps.should-run-tests.outputs.SHOULD_RUN == 'true' - run: poetry run pyright - working-directory: testsuite - shell: bash - - - name: Run python fmt - if: steps.should-run-tests.outputs.SHOULD_RUN == 'true' - run: poetry run black --check --diff . - working-directory: testsuite - shell: bash - - - name: Run python unit tests - if: steps.should-run-tests.outputs.SHOULD_RUN == 'true' - run: find . -name '*test.py' | xargs poetry run python -m unittest - working-directory: testsuite - shell: bash diff --git a/.github/actions/run-ts-sdk-e2e-tests/action.yaml b/.github/actions/run-ts-sdk-e2e-tests/action.yaml deleted file mode 100644 index 342164652e6..00000000000 --- a/.github/actions/run-ts-sdk-e2e-tests/action.yaml +++ /dev/null @@ -1,75 +0,0 @@ -name: "Run SDK E2E tests" -description: | - Run the SDK E2E tests against a local testnet built from a particular release branch -inputs: - BRANCH: - description: "The branch to use for running the local testnet" - required: true - GCP_DOCKER_ARTIFACT_REPO: - description: "The GCP Docker artifact repository" - required: true - -runs: - using: composite - steps: - # Find a docker image to use for the testnet. - - uses: ./.github/actions/get-latest-docker-image-tag - id: get-docker-image-tag - with: - branch: ${{ inputs.BRANCH }} - - # Set up the necessary env vars for the test suite. - - run: echo "DOCKER_IMAGE=${{ inputs.GCP_DOCKER_ARTIFACT_REPO }}/tools:${{ steps.get-docker-image-tag.outputs.IMAGE_TAG }}" >>.env - shell: bash - working-directory: ./ecosystem/typescript/sdk - - run: echo "APTOS_NODE_URL=http://127.0.0.1:8080/v1" >> .env - shell: bash - working-directory: ./ecosystem/typescript/sdk - - run: echo "APTOS_FAUCET_URL=http://127.0.0.1:8081" >> .env - shell: bash - working-directory: ./ecosystem/typescript/sdk - - run: echo "ANS_TEST_ACCOUNT_PRIVATE_KEY=0x37368b46ce665362562c6d1d4ec01a08c8644c488690df5a17e13ba163e20221" >> .env - shell: bash - working-directory: ./ecosystem/typescript/sdk - - run: echo "ANS_TEST_ACCOUNT_ADDRESS=585fc9f0f0c54183b039ffc770ca282ebd87307916c215a3e692f2f8e4305e82" >> .env - shell: bash - working-directory: ./ecosystem/typescript/sdk - - # Run a local testnet. This installs node and pnpm too. - - uses: aptos-labs/actions/run-local-testnet@main - with: - PNPM_VERSION: 8.3.1 - DOCKER_ARTIFACT_REPO: ${{ inputs.GCP_DOCKER_ARTIFACT_REPO }} - CLI_GIT_REF: ${{ steps.get-docker-image-tag.outputs.IMAGE_TAG }} - - # Run package install. If install fails, it probably means the updated lockfile was - # not included in the commit. - - run: pnpm install --frozen-lockfile - shell: bash - working-directory: ./ecosystem/typescript/sdk - - # Run the non indexer TS SDK tests. - - uses: nick-fields/retry@7f8f3d9f0f62fe5925341be21c2e8314fd4f7c7c # pin@v2 - name: sdk-pnpm-test - env: - # This is important, it ensures that the tempdir we create for cloning the ANS - # repo and mounting it into the CLI container is created in a location that - # actually supports mounting. Learn more here: https://stackoverflow.com/a/76523941/3846032. - TMPDIR: ${{ runner.temp }} - with: - max_attempts: 3 - timeout_minutes: 25 - command: cd ./ecosystem/typescript/sdk && pnpm run test:ci - - # Run the indexer TS SDK tests. - - uses: nick-fields/retry@7f8f3d9f0f62fe5925341be21c2e8314fd4f7c7c # pin@v2 - name: ts-sdk-indexer-test - env: - # This is important, it ensures that the tempdir we create for cloning the ANS - # repo and mounting it into the CLI container is created in a location that - # actually supports mounting. Learn more here: https://stackoverflow.com/a/76523941/3846032. - TMPDIR: ${{ runner.temp }} - with: - max_attempts: 3 - timeout_minutes: 20 - command: cd ./ecosystem/typescript/sdk && pnpm run test:indexer diff --git a/.github/actions/rust-setup/action.yaml b/.github/actions/rust-setup/action.yaml index 1749a6e32ff..acc81bf4bf0 100644 --- a/.github/actions/rust-setup/action.yaml +++ b/.github/actions/rust-setup/action.yaml @@ -20,7 +20,7 @@ runs: # rust-cache action will cache ~/.cargo and ./target # https://github.com/Swatinem/rust-cache#cache-details - name: Run cargo cache - uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # pin@v2.2.0 + uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # pin@v2.7.3 with: key: ${{ inputs.ADDITIONAL_KEY }} diff --git a/.github/actions/rust-unit-tests/action.yaml b/.github/actions/rust-unit-tests/action.yaml index e88a2c2e9a0..dffc54d028d 100644 --- a/.github/actions/rust-unit-tests/action.yaml +++ b/.github/actions/rust-unit-tests/action.yaml @@ -4,6 +4,9 @@ inputs: GIT_CREDENTIALS: description: "Optional credentials to pass to git. Useful if you need to pull private repos for dependencies" required: false + TRUNK_API_TOKEN: + description: "Api key for uploading test results to trunk.io" + required: false runs: using: composite @@ -28,11 +31,22 @@ runs: # Run the rust unit tests - name: Run all unit tests run: | - NEXTEST_EXPERIMENTAL_LIBTEST_JSON=1 cargo nextest run --profile ci --cargo-profile ci --locked --workspace --exclude smoke-test --exclude aptos-testcases --exclude aptos-keyless-circuit --retries 3 --no-fail-fast --message-format libtest-json > nextest_output.json || python3 .github/actions/rust-unit-tests/nextest_summary.py nextest_output.json "$GITHUB_STEP_SUMMARY" -f + cargo nextest run \ + --profile ci \ + --cargo-profile ci \ + --locked \ + --workspace \ + --exclude smoke-test \ + --exclude aptos-testcases \ + --exclude aptos-keyless-circuit \ + --retries 3 \ + --no-fail-fast \ + --message-format libtest-json > nextest_output.json || python3 .github/actions/rust-unit-tests/nextest_summary.py nextest_output.json "$GITHUB_STEP_SUMMARY" -f python3 .github/actions/rust-unit-tests/nextest_summary.py nextest_output.json "$GITHUB_STEP_SUMMARY" || echo "summary generation had an error" rm nextest_output.json shell: bash env: + NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1 INDEXER_DATABASE_URL: postgresql://postgres@localhost/postgres RUST_MIN_STACK: "4297152" MVP_TEST_ON_CI: "true" @@ -41,3 +55,14 @@ runs: CVC5_EXE: /home/runner/bin/cvc5 DOTNET_ROOT: /home/runner/.dotnet BOOGIE_EXE: /home/runner/.dotnet/tools/boogie + + - name: Upload results + # Run this step even if the test step ahead fails + if: "!cancelled() && ${{ inputs.TRUNK_API_TOKEN }}" + uses: trunk-io/analytics-uploader@main + with: + # Configured in the nextest.toml file + junit-paths: target/nextest/ci/junit.xml + org-slug: aptoslabs + token: ${{ inputs.TRUNK_API_TOKEN }} + continue-on-error: true diff --git a/.github/runs-on.yml b/.github/runs-on.yml index 5394f6bda60..721b25e3ab8 100644 --- a/.github/runs-on.yml +++ b/.github/runs-on.yml @@ -2,7 +2,8 @@ images: aptos-ubuntu-x64: platform: "linux" arch: "x64" - ami: "ami-07ce6402e4a205d44" + ami: "ami-09f7d4bfb08b771be" # image for testing, with gcloud sdk and kubectl + owner: "058264343338" # aptos-ci AWS account id aptos-ubuntu-x64-latest: platform: "linux" arch: "x64" diff --git a/.github/workflows/aptos-node-release.yaml b/.github/workflows/aptos-node-release.yaml index b665ace3f1c..e0aed3c5ed3 100644 --- a/.github/workflows/aptos-node-release.yaml +++ b/.github/workflows/aptos-node-release.yaml @@ -19,7 +19,7 @@ jobs: release-aptos-node: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ inputs.branch }} diff --git a/.github/workflows/cargo-metadata-upload.yaml b/.github/workflows/cargo-metadata-upload.yaml index f6654df4755..81c702e9c93 100644 --- a/.github/workflows/cargo-metadata-upload.yaml +++ b/.github/workflows/cargo-metadata-upload.yaml @@ -11,7 +11,7 @@ permissions: id-token: write jobs: cargo-metadata: - runs-on: ubuntu-latest + runs-on: runs-on,cpu=4,ram=16,family=m7a+m7i-flex,image=aptos-ubuntu-x64,run-id=${{ github.run_id }},spot=co steps: - uses: actions/checkout@v4 - uses: dsherret/rust-toolchain-file@v1 @@ -20,7 +20,7 @@ jobs: with: workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} service_account: ${{ secrets.GCP_SERVICE_ACCOUNT_EMAIL }} - - uses: "google-github-actions/setup-gcloud@v2" - shell: bash run: | - cargo metadata --all-features | gsutil cp - gs://aptos-core-cargo-metadata-public/metadata-${{ github.sha }}.json + set -ex + cargo metadata --all-features | gcloud storage cp - gs://aptos-core-cargo-metadata-public/metadata-${{ github.sha }}.json diff --git a/.github/workflows/check-minimum-revision.yaml b/.github/workflows/check-minimum-revision.yaml index a8ff5b694a0..9460df24645 100644 --- a/.github/workflows/check-minimum-revision.yaml +++ b/.github/workflows/check-minimum-revision.yaml @@ -20,7 +20,7 @@ jobs: check-minimum-revision: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ env.GIT_SHA }} fetch-depth: 1000 diff --git a/.github/workflows/check-protos.yaml b/.github/workflows/check-protos.yaml index adc7cae232c..e3f9ac69c81 100644 --- a/.github/workflows/check-protos.yaml +++ b/.github/workflows/check-protos.yaml @@ -34,7 +34,7 @@ jobs: github.event.pull_request.auto_merge != null ) steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 # Install buf, which we use to generate code from the protos for Rust and TS. - name: Install buf diff --git a/.github/workflows/check-sdk-examples.yaml b/.github/workflows/check-sdk-examples.yaml deleted file mode 100644 index b45b52f1126..00000000000 --- a/.github/workflows/check-sdk-examples.yaml +++ /dev/null @@ -1,46 +0,0 @@ -name: "Check SDK examples" -on: - pull_request: - types: [labeled, opened, synchronize, reopened, auto_merge_enabled] - push: - branches: - - devnet - -jobs: - # Run the TS SDK examples. Note: There are small windows where these examples - # might be able to fail. For example, if we released a new devnet and SDK with - # an incompatible change, but haven't updated the examples to use the new SDK. - # That's why this is a separate job, because there are times when it could fail, - # whereas there is no reason why the test-sdk-confirm-client-generated-publish - # job should fail. These could also fail because we run them against devnet, - # whereas we run the test-sdk-confirm-client-generated-publish against a node - # built from the same commit and run as part of that CI job. - run-examples: - if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') - runs-on: ubuntu-latest - env: - APTOS_NODE_URL: https://fullnode.devnet.aptoslabs.com - APTOS_FAUCET_URL: https://faucet.devnet.aptoslabs.com - FAUCET_AUTH_TOKEN: ${{ secrets.DEVNET_TAP_AUTH_TOKEN }} - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-node@v3 - with: - node-version-file: .node-version - - uses: pnpm/action-setup@v4 - - # Run example code in typescript. - - uses: nick-fields/retry@7f8f3d9f0f62fe5925341be21c2e8314fd4f7c7c # pin@v2 - name: ts-example-test - with: - max_attempts: 5 - timeout_minutes: 20 - command: cd ./ecosystem/typescript/sdk/examples/typescript && pnpm install && pnpm test - - # Run example code in javascript. - - uses: nick-fields/retry@7f8f3d9f0f62fe5925341be21c2e8314fd4f7c7c # pin@v2 - name: js-example-test - with: - max_attempts: 5 - timeout_minutes: 20 - command: cd ./ecosystem/typescript/sdk/examples/javascript && pnpm install && pnpm test diff --git a/.github/workflows/cli-e2e-tests.yaml b/.github/workflows/cli-e2e-tests.yaml index 70a1415ba97..8799874e48c 100644 --- a/.github/workflows/cli-e2e-tests.yaml +++ b/.github/workflows/cli-e2e-tests.yaml @@ -30,7 +30,7 @@ jobs: contents: read id-token: write steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 if: ${{ !inputs.SKIP_JOB }} with: ref: ${{ inputs.GIT_SHA }} diff --git a/.github/workflows/cli-external-deps.yaml b/.github/workflows/cli-external-deps.yaml index edb70583557..50b44a423c8 100644 --- a/.github/workflows/cli-external-deps.yaml +++ b/.github/workflows/cli-external-deps.yaml @@ -11,7 +11,7 @@ jobs: check-dynamic-deps: runs-on: macos-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 if: ${{ !inputs.SKIP_JOB }} with: ref: ${{ inputs.GIT_SHA }} diff --git a/.github/workflows/cli-release.yaml b/.github/workflows/cli-release.yaml index 478c606d779..de2c8ae1b3b 100644 --- a/.github/workflows/cli-release.yaml +++ b/.github/workflows/cli-release.yaml @@ -25,7 +25,7 @@ jobs: name: "Build Ubuntu 20.04 binary" runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.source_git_ref_override }} - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main @@ -41,7 +41,7 @@ jobs: name: "Build Ubuntu 22.04 binary" runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.source_git_ref_override }} - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main @@ -57,7 +57,7 @@ jobs: name: "Build Windows binary" runs-on: windows-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.source_git_ref_override }} - name: Build CLI diff --git a/.github/workflows/codeql-analysis.yaml b/.github/workflows/codeql-analysis.yaml index 7591c4c3577..3dd336c0c04 100644 --- a/.github/workflows/codeql-analysis.yaml +++ b/.github/workflows/codeql-analysis.yaml @@ -20,7 +20,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/copy-images-to-dockerhub-nightly.yaml b/.github/workflows/copy-images-to-dockerhub-nightly.yaml index a38aad9e0d1..c9596eda0e1 100644 --- a/.github/workflows/copy-images-to-dockerhub-nightly.yaml +++ b/.github/workflows/copy-images-to-dockerhub-nightly.yaml @@ -13,7 +13,7 @@ jobs: check-repo: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/check-aptos-core@main with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork diff --git a/.github/workflows/coverage-move-only.yaml b/.github/workflows/coverage-move-only.yaml index a195452d5e0..40e29bb0a5d 100644 --- a/.github/workflows/coverage-move-only.yaml +++ b/.github/workflows/coverage-move-only.yaml @@ -32,7 +32,7 @@ jobs: timeout-minutes: 60 runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main - name: prepare move lang prover tooling. shell: bash diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 2490c54b302..d5251473d8f 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -29,7 +29,7 @@ jobs: timeout-minutes: 720 runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 # get all the history because cargo xtest --change-since origin/main requires it. - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main @@ -60,7 +60,7 @@ jobs: timeout-minutes: 720 # incremented from 240 due to execution time limit hit in cron runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 # get all the history because cargo xtest --change-since origin/main requires it. - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main @@ -95,7 +95,7 @@ jobs: env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/download-artifact@v4 with: name: lcov_unit diff --git a/.github/workflows/cut-release-branch.yaml b/.github/workflows/cut-release-branch.yaml index 46021d7b780..5eb8c793529 100644 --- a/.github/workflows/cut-release-branch.yaml +++ b/.github/workflows/cut-release-branch.yaml @@ -31,7 +31,7 @@ jobs: cut-release-branch: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: token: ${{ secrets.CUT_RELEASE_BRANCH_CREDENTIALS }} fetch-depth: 0 diff --git a/.github/workflows/docker-build-rosetta.yaml b/.github/workflows/docker-build-rosetta.yaml index 6139c8d8ed1..1aed7aa6680 100644 --- a/.github/workflows/docker-build-rosetta.yaml +++ b/.github/workflows/docker-build-rosetta.yaml @@ -19,7 +19,7 @@ jobs: build: runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/buildx-setup@main diff --git a/.github/workflows/docker-build-test.yaml b/.github/workflows/docker-build-test.yaml index 22b1936207e..df603a423e0 100644 --- a/.github/workflows/docker-build-test.yaml +++ b/.github/workflows/docker-build-test.yaml @@ -120,7 +120,7 @@ jobs: outputs: only_docs_changed: ${{ steps.determine_file_changes.outputs.only_docs_changed }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Run the file change determinator id: determine_file_changes uses: ./.github/actions/file-change-determinator @@ -132,7 +132,7 @@ jobs: outputs: run_framework_upgrade_test: ${{ steps.determine_test_targets.outputs.run_framework_upgrade_test }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Run the test target determinator id: determine_test_targets uses: ./.github/actions/test-target-determinator @@ -288,11 +288,61 @@ jobs: # by this GHA. If there is a Forge namespace collision, Forge will pre-empt the existing test running in the namespace. FORGE_NAMESPACE: forge-e2e-${{ needs.determine-docker-build-metadata.outputs.targetCacheId }} SKIP_JOB: ${{ needs.file_change_determinator.outputs.only_docs_changed == 'true' }} + + # This job determines the last released docker image tag, which is used by forge compat test. + fetch-last-released-docker-image-tag: + needs: + - permission-check + # runs only when need to run forge-compat-test or forge-framework-upgrade-test + if: | + !failure() && !cancelled() && needs.permission-check.result == 'success' && ( + (github.event_name == 'push' && github.ref_name != 'main') || + github.event_name == 'workflow_dispatch' || + contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || + contains(github.event.pull_request.labels.*.name, 'CICD:run-framework-upgrade-test') || + github.event.pull_request.auto_merge != null || + contains(github.event.pull_request.body, '#e2e') + ) + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + ref: ${{ github.ref }} + + # actions/get-latest-docker-image-tag requires docker utilities and having authenticated to internal docker image registries + - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main + id: docker-setup + with: + GCP_WORKLOAD_IDENTITY_PROVIDER: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} + GCP_SERVICE_ACCOUNT_EMAIL: ${{ secrets.GCP_SERVICE_ACCOUNT_EMAIL }} + EXPORT_GCP_PROJECT_VARIABLES: "false" + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DOCKER_ARTIFACT_REPO: ${{ secrets.AWS_DOCKER_ARTIFACT_REPO }} + GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} + - name: Get Docker Image Tag + uses: ./.github/actions/determine-or-use-target-branch-and-get-last-released-image + id: get-docker-image-tag + with: + base-branch: ${{ github.base_ref }} + variants: "failpoints performance" + + - name: Add Image Tag to Step Summary + run: | + echo "## Image Tag for compat tests" >> $GITHUB_STEP_SUMMARY + echo "IMAGE_TAG: ${{ steps.get-docker-image-tag.outputs.IMAGE_TAG }}" >> $GITHUB_STEP_SUMMARY + echo "TARGET_BRANCH: ${{ steps.get-docker-image-tag.outputs.TARGET_BRANCH }}" >> $GITHUB_STEP_SUMMARY + outputs: + IMAGE_TAG: ${{ steps.get-docker-image-tag.outputs.IMAGE_TAG }} + # Run e2e compat test against testnet branch. This is a PR required job. forge-compat-test: - needs: + needs: - permission-check + - fetch-last-released-docker-image-tag - determine-docker-build-metadata - rust-images - rust-images-failpoints @@ -312,16 +362,17 @@ jobs: with: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} FORGE_TEST_SUITE: compat - IMAGE_TAG: d1bf834728a0cf166d993f4728dfca54f3086fb0 #aptos-node-v1.18.0 + IMAGE_TAG: ${{ needs.fetch-last-released-docker-image-tag.outputs.IMAGE_TAG }} FORGE_RUNNER_DURATION_SECS: 300 COMMENT_HEADER: forge-compat FORGE_NAMESPACE: forge-compat-${{ needs.determine-docker-build-metadata.outputs.targetCacheId }} SKIP_JOB: ${{ needs.file_change_determinator.outputs.only_docs_changed == 'true' }} - + # Run forge framework upgradability test. This is a PR required job. forge-framework-upgrade-test: needs: - permission-check + - fetch-last-released-docker-image-tag - determine-docker-build-metadata - rust-images - rust-images-failpoints @@ -340,7 +391,7 @@ jobs: with: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} FORGE_TEST_SUITE: framework_upgrade - IMAGE_TAG: d1bf834728a0cf166d993f4728dfca54f3086fb0 #aptos-node-v1.18.0 + IMAGE_TAG: ${{ needs.fetch-last-released-docker-image-tag.outputs.IMAGE_TAG }} FORGE_RUNNER_DURATION_SECS: 3600 COMMENT_HEADER: forge-framework-upgrade FORGE_NAMESPACE: forge-framework-upgrade-${{ needs.determine-docker-build-metadata.outputs.targetCacheId }} diff --git a/.github/workflows/docker-indexer-grpc-test.yaml b/.github/workflows/docker-indexer-grpc-test.yaml index dc78a9dc769..8cfb8cf1b1b 100644 --- a/.github/workflows/docker-indexer-grpc-test.yaml +++ b/.github/workflows/docker-indexer-grpc-test.yaml @@ -23,7 +23,7 @@ jobs: IMAGE_TAG: ${{ inputs.GIT_SHA || 'devnet' }} # hardcode to a known good build when not running on workflow_call steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ inputs.GIT_SHA || github.event.pull_request.head.sha || github.sha }} diff --git a/.github/workflows/faucet-tests-main.yaml b/.github/workflows/faucet-tests-main.yaml index 0ca51a2f847..dafa08ff592 100644 --- a/.github/workflows/faucet-tests-main.yaml +++ b/.github/workflows/faucet-tests-main.yaml @@ -51,7 +51,7 @@ jobs: if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 if: ${{ !inputs.SKIP_JOB }} with: ref: ${{ env.GIT_SHA }} diff --git a/.github/workflows/faucet-tests-prod.yaml b/.github/workflows/faucet-tests-prod.yaml index 940ef883a1e..c731966c090 100644 --- a/.github/workflows/faucet-tests-prod.yaml +++ b/.github/workflows/faucet-tests-prod.yaml @@ -39,7 +39,7 @@ jobs: needs: [permission-check] runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main with: GCP_WORKLOAD_IDENTITY_PROVIDER: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} @@ -62,7 +62,7 @@ jobs: contents: read id-token: write steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main with: GCP_WORKLOAD_IDENTITY_PROVIDER: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} diff --git a/.github/workflows/find-packages-with-undeclared-feature-dependencies.yaml b/.github/workflows/find-packages-with-undeclared-feature-dependencies.yaml index 45dfe907189..0087168d290 100644 --- a/.github/workflows/find-packages-with-undeclared-feature-dependencies.yaml +++ b/.github/workflows/find-packages-with-undeclared-feature-dependencies.yaml @@ -6,6 +6,6 @@ jobs: find-packages-with-undeclared-feature-dependencies: runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main - run: scripts/find-packages-with-undeclared-feature-dependencies.sh diff --git a/.github/workflows/forge-pfn.yaml b/.github/workflows/forge-pfn.yaml index 7db14dd04ab..660ed535217 100644 --- a/.github/workflows/forge-pfn.yaml +++ b/.github/workflows/forge-pfn.yaml @@ -40,7 +40,7 @@ jobs: IMAGE_TAG: ${{ steps.get-docker-image-tag.outputs.IMAGE_TAG }} BRANCH: ${{ steps.determine-test-branch.outputs.BRANCH }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Determine branch based on cadence id: determine-test-branch diff --git a/.github/workflows/forge-stable.yaml b/.github/workflows/forge-stable.yaml index 99387204867..82d9aef76ea 100644 --- a/.github/workflows/forge-stable.yaml +++ b/.github/workflows/forge-stable.yaml @@ -47,10 +47,11 @@ jobs: runs-on: ubuntu-latest outputs: IMAGE_TAG: ${{ steps.get-docker-image-tag.outputs.IMAGE_TAG }} + IMAGE_TAG_FOR_COMPAT_TEST: ${{ steps.get-last-released-image-tag-for-compat-test.outputs.IMAGE_TAG }} BRANCH: ${{ steps.determine-test-branch.outputs.BRANCH }} BRANCH_HASH: ${{ steps.hash-branch.outputs.BRANCH_HASH }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Determine branch based on cadence id: determine-test-branch @@ -110,9 +111,17 @@ jobs: branch: ${{ steps.determine-test-branch.outputs.BRANCH }} variants: "failpoints performance" + - uses: ./.github/actions/determine-or-use-target-branch-and-get-last-released-image + id: get-last-released-image-tag-for-compat-test + with: + base-branch: ${{ steps.determine-test-branch.outputs.BRANCH }} + variants: "failpoints performance" + - name: Write summary run: | IMAGE_TAG=${{ steps.get-docker-image-tag.outputs.IMAGE_TAG }} + IMAGE_TAG_FOR_COMPAT_TEST=${{ steps.get-last-released-image-tag-for-compat-test.outputs.IMAGE_TAG }} + TARGET_BRANCH_TO_FETCH_IMAGE_FOR_COMPAT_TEST=${{ steps.get-last-released-image-tag-for-compat-test.outputs.TARGET_BRANCH }} BRANCH=${{ steps.determine-test-branch.outputs.BRANCH }} if [ -n "${BRANCH}" ]; then echo "BRANCH: [${BRANCH}](https://github.com/${{ github.repository }}/tree/${BRANCH})" >> $GITHUB_STEP_SUMMARY @@ -128,7 +137,7 @@ jobs: uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main secrets: inherit with: - IMAGE_TAG: d1bf834728a0cf166d993f4728dfca54f3086fb0 #aptos-node-v1.18.0 + IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG_FOR_COMPAT_TEST }} FORGE_NAMESPACE: forge-framework-upgrade-${{ needs.determine-test-metadata.outputs.BRANCH_HASH }} FORGE_RUNNER_DURATION_SECS: 7200 # Run for 2 hours FORGE_TEST_SUITE: framework_upgrade @@ -269,7 +278,7 @@ jobs: FORGE_RUNNER_DURATION_SECS: 300 # Run for 5 minutes # This will upgrade from testnet branch to the latest main FORGE_TEST_SUITE: compat - IMAGE_TAG: d1bf834728a0cf166d993f4728dfca54f3086fb0 #aptos-node-v1.18.0 + IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG_FOR_COMPAT_TEST }} GIT_SHA: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} # this is the git ref to checkout POST_TO_SLACK: true diff --git a/.github/workflows/forge-unstable.yaml b/.github/workflows/forge-unstable.yaml index bfc92600722..5ff54030cf4 100644 --- a/.github/workflows/forge-unstable.yaml +++ b/.github/workflows/forge-unstable.yaml @@ -40,7 +40,7 @@ jobs: IMAGE_TAG: ${{ steps.get-docker-image-tag.outputs.IMAGE_TAG }} BRANCH: ${{ steps.determine-test-branch.outputs.BRANCH }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Determine branch based on cadence id: determine-test-branch diff --git a/.github/workflows/fullnode-execute-devnet-main.yaml b/.github/workflows/fullnode-execute-devnet-main.yaml index b800d1f6f10..4c9553695cb 100644 --- a/.github/workflows/fullnode-execute-devnet-main.yaml +++ b/.github/workflows/fullnode-execute-devnet-main.yaml @@ -17,7 +17,7 @@ jobs: check-repo: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/check-aptos-core@main with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork diff --git a/.github/workflows/fullnode-execute-devnet-stable.yaml b/.github/workflows/fullnode-execute-devnet-stable.yaml index e0d013b1193..ea82a0d469b 100644 --- a/.github/workflows/fullnode-execute-devnet-stable.yaml +++ b/.github/workflows/fullnode-execute-devnet-stable.yaml @@ -17,7 +17,7 @@ jobs: check-repo: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/check-aptos-core@main with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork diff --git a/.github/workflows/fullnode-fast-mainnet-main.yaml b/.github/workflows/fullnode-fast-mainnet-main.yaml index 2fe43beb801..e298c82ee39 100644 --- a/.github/workflows/fullnode-fast-mainnet-main.yaml +++ b/.github/workflows/fullnode-fast-mainnet-main.yaml @@ -17,7 +17,7 @@ jobs: check-repo: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/check-aptos-core@main with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork diff --git a/.github/workflows/fullnode-fast-mainnet-stable.yaml b/.github/workflows/fullnode-fast-mainnet-stable.yaml index 53afaa9d554..d1bc46d01c0 100644 --- a/.github/workflows/fullnode-fast-mainnet-stable.yaml +++ b/.github/workflows/fullnode-fast-mainnet-stable.yaml @@ -17,7 +17,7 @@ jobs: check-repo: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/check-aptos-core@main with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork diff --git a/.github/workflows/fullnode-fast-testnet-main.yaml b/.github/workflows/fullnode-fast-testnet-main.yaml index 56dbeefa84a..651669603d9 100644 --- a/.github/workflows/fullnode-fast-testnet-main.yaml +++ b/.github/workflows/fullnode-fast-testnet-main.yaml @@ -17,7 +17,7 @@ jobs: check-repo: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/check-aptos-core@main with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork diff --git a/.github/workflows/fullnode-fast-testnet-stable.yaml b/.github/workflows/fullnode-fast-testnet-stable.yaml index 924ef153d5b..5ef7ead938a 100644 --- a/.github/workflows/fullnode-fast-testnet-stable.yaml +++ b/.github/workflows/fullnode-fast-testnet-stable.yaml @@ -17,7 +17,7 @@ jobs: check-repo: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/check-aptos-core@main with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork diff --git a/.github/workflows/fullnode-intelligent-devnet-main.yaml b/.github/workflows/fullnode-intelligent-devnet-main.yaml index 465ce361276..ed42c4ab600 100644 --- a/.github/workflows/fullnode-intelligent-devnet-main.yaml +++ b/.github/workflows/fullnode-intelligent-devnet-main.yaml @@ -18,7 +18,7 @@ jobs: check-repo: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/check-aptos-core@main with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork diff --git a/.github/workflows/fullnode-intelligent-mainnet-main.yaml b/.github/workflows/fullnode-intelligent-mainnet-main.yaml index 4475d6dc60a..b6df15fd54e 100644 --- a/.github/workflows/fullnode-intelligent-mainnet-main.yaml +++ b/.github/workflows/fullnode-intelligent-mainnet-main.yaml @@ -18,7 +18,7 @@ jobs: check-repo: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/check-aptos-core@main with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork diff --git a/.github/workflows/fullnode-intelligent-mainnet-stable.yaml b/.github/workflows/fullnode-intelligent-mainnet-stable.yaml index 34a16ed2833..bb32af33046 100644 --- a/.github/workflows/fullnode-intelligent-mainnet-stable.yaml +++ b/.github/workflows/fullnode-intelligent-mainnet-stable.yaml @@ -18,7 +18,7 @@ jobs: check-repo: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/check-aptos-core@main with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork diff --git a/.github/workflows/fullnode-intelligent-testnet-main.yaml b/.github/workflows/fullnode-intelligent-testnet-main.yaml index 407bec2f47a..7ddc46d3a45 100644 --- a/.github/workflows/fullnode-intelligent-testnet-main.yaml +++ b/.github/workflows/fullnode-intelligent-testnet-main.yaml @@ -18,7 +18,7 @@ jobs: check-repo: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/check-aptos-core@main with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork diff --git a/.github/workflows/indexer-grpc-in-memory-cache-benchmark.yaml b/.github/workflows/indexer-grpc-in-memory-cache-benchmark.yaml index c3548c30b08..5f8028973ef 100644 --- a/.github/workflows/indexer-grpc-in-memory-cache-benchmark.yaml +++ b/.github/workflows/indexer-grpc-in-memory-cache-benchmark.yaml @@ -8,7 +8,7 @@ jobs: run-indexer-grpc-in-memory-cache-benchmark: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install grpcurl run: curl -sSL "https://github.com/fullstorydev/grpcurl/releases/download/v1.8.7/grpcurl_1.8.7_linux_x86_64.tar.gz" | sudo tar -xz -C /usr/local/bin - name: Rust setup diff --git a/.github/workflows/indexer-grpc-integration-tests.yaml b/.github/workflows/indexer-grpc-integration-tests.yaml index 4a1c2aa93b1..e9d73371831 100644 --- a/.github/workflows/indexer-grpc-integration-tests.yaml +++ b/.github/workflows/indexer-grpc-integration-tests.yaml @@ -39,7 +39,7 @@ jobs: IMAGE_TAG: devnet steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install grpcurl run: curl -sSL "https://github.com/fullstorydev/grpcurl/releases/download/v1.8.7/grpcurl_1.8.7_linux_x86_64.tar.gz" | sudo tar -xz -C /usr/local/bin diff --git a/.github/workflows/keyless-circuit-daily-test.yaml b/.github/workflows/keyless-circuit-daily-test.yaml index 8fecb6df2c5..8ddc60e6dfe 100644 --- a/.github/workflows/keyless-circuit-daily-test.yaml +++ b/.github/workflows/keyless-circuit-daily-test.yaml @@ -23,7 +23,7 @@ jobs: runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} timeout-minutes: 30 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 # get all the history because cargo xtest --change-since origin/main requires it. - uses: ./.github/actions/rust-setup diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index a556b8731c4..ccb77ca91de 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -17,7 +17,7 @@ jobs: linkChecker: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Link Checker id: lychee diff --git a/.github/workflows/lint-test.yaml b/.github/workflows/lint-test.yaml index 67c046d6a2b..963729aa4ad 100644 --- a/.github/workflows/lint-test.yaml +++ b/.github/workflows/lint-test.yaml @@ -164,20 +164,7 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # Fetch all git history for accurate target determination - - name: Check if dev_setup.sh has changed - id: check_changes - run: | - git fetch origin ${{ github.event.pull_request.base.sha }} - if git diff --name-only ${{ github.event.pull_request.base.sha }} ${{ github.event.pull_request.head.sha }} | grep -q 'scripts/dev_setup.sh'; then - echo "scripts/dev_setup.sh has changed" - echo "dev_setup_changed=true" >> $GITHUB_ENV - else - echo "scripts/dev_setup.sh has not changed" - echo "dev_setup_changed=false" >> $GITHUB_ENV - fi - - name: Run dev_setup.sh - if: env.dev_setup_changed == 'true' run: | scripts/dev_setup.sh -b -p -r -y -P -t @@ -202,6 +189,7 @@ jobs: uses: ./.github/actions/rust-unit-tests with: GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} + TRUNK_API_TOKEN: ${{ secrets.TRUNK_API_TOKEN }} # Run the cached packages build. This is a PR required job. rust-build-cached-packages: diff --git a/.github/workflows/move-test-compiler-v2.yaml b/.github/workflows/move-test-compiler-v2.yaml index 449892ab72f..047acb6fa44 100644 --- a/.github/workflows/move-test-compiler-v2.yaml +++ b/.github/workflows/move-test-compiler-v2.yaml @@ -28,7 +28,7 @@ jobs: rust-move-tests: runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Run Aptos Move tests with compiler V2 uses: ./.github/actions/move-tests-compiler-v2 with: diff --git a/.github/workflows/node-api-compatibility-tests.yaml b/.github/workflows/node-api-compatibility-tests.yaml index 0593a7ec533..1d5631e6514 100644 --- a/.github/workflows/node-api-compatibility-tests.yaml +++ b/.github/workflows/node-api-compatibility-tests.yaml @@ -48,7 +48,7 @@ jobs: contents: read id-token: write steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 if: ${{ !inputs.SKIP_JOB }} with: ref: ${{ env.GIT_SHA }} @@ -108,21 +108,4 @@ jobs: git diff --no-index --ignore-space-at-eol --ignore-blank-lines ${{ runner.temp }}/specs/spec.json api/doc/spec.json if: ${{ !inputs.SKIP_JOB }} - # Run package install. If install fails, it probably means the lockfile - # was not included in the commit. - - run: cd ./ecosystem/typescript/sdk && pnpm install --frozen-lockfile - if: ${{ !inputs.SKIP_JOB }} - - # Ensure any changes to the generated client were checked in. - - run: cd ./ecosystem/typescript/sdk && pnpm generate-client -o /tmp/generated_client - if: ${{ !inputs.SKIP_JOB }} - - - run: - echo "If this step fails, run the following command locally to fix it:" - echo "cd ecosystem/typescript/sdk && pnpm generate-client" - git diff --no-index --ignore-space-at-eol --ignore-blank-lines ./ecosystem/typescript/sdk/src/generated/ /tmp/generated_client/ - if: ${{ !inputs.SKIP_JOB }} - - # Print out whether the job was skipped. - - run: echo "Skipping node API compatibility tests!" - if: ${{ inputs.SKIP_JOB }} + # TODO: Need to use the other SDKs here to verify correctness \ No newline at end of file diff --git a/.github/workflows/prover-daily-test.yaml b/.github/workflows/prover-daily-test.yaml index 5ed59ec5389..fcaece5daf2 100644 --- a/.github/workflows/prover-daily-test.yaml +++ b/.github/workflows/prover-daily-test.yaml @@ -25,7 +25,7 @@ jobs: runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} timeout-minutes: ${{ github.event_name == 'pull_request' && 10 || 480}} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 # get all the history because cargo xtest --change-since origin/main requires it. - uses: ./.github/actions/move-prover-setup diff --git a/.github/workflows/prune-old-workflow-runs.yaml b/.github/workflows/prune-old-workflow-runs.yaml index 8f715eb35b5..e7e736f195c 100644 --- a/.github/workflows/prune-old-workflow-runs.yaml +++ b/.github/workflows/prune-old-workflow-runs.yaml @@ -16,7 +16,7 @@ jobs: if: github.repository == 'aptos-labs/aptos-core' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-node@v3 with: node-version-file: .node-version diff --git a/.github/workflows/replay-verify.yaml b/.github/workflows/replay-verify.yaml index ce6f9a48aa1..ae44009412a 100644 --- a/.github/workflows/replay-verify.yaml +++ b/.github/workflows/replay-verify.yaml @@ -18,9 +18,20 @@ on: options: [testnet, mainnet, all] default: all description: The chain name to test. If not specified, it will test both testnet and mainnet. + TESTNET_BUCKET: + required: false + type: string + description: The bucket to use for testnet replay. If not specified, it will use aptos-testnet-backup. + default: aptos-testnet-backup + MAINNET_BUCKET: + required: false + type: string + description: The bucket to use for mainnet replay. If not specified, it will use aptos-mainnet-backup. + default: aptos-mainnet-backup pull_request: paths: - ".github/workflows/replay-verify.yaml" + - ".github/workflows/workflow-run-replay-verify.yaml" - "testsuite/replay_verify.py" schedule: - cron: "0 22 * * 0,2,4" # The main branch cadence. This runs every Sun,Tues,Thurs @@ -36,7 +47,7 @@ jobs: runs-on: ubuntu-latest steps: # checkout the repo first, so check-aptos-core can use it and cancel the workflow if necessary - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: ./.github/actions/check-aptos-core with: cancel-workflow: ${{ github.event_name == 'schedule' }} # Cancel the workflow if it is scheduled on a fork @@ -52,14 +63,18 @@ jobs: with: GIT_SHA: ${{ inputs.GIT_SHA }} # replay-verify config - BUCKET: aptos-testnet-backup + BUCKET: ${{ inputs.TESTNET_BUCKET || 'aptos-testnet-backup' }} SUB_DIR: e1 - HISTORY_START: 250000000 # TODO: We need an exhaustive list of txns_to_skip before we can set this to 0. - TXNS_TO_SKIP: 46874937 151020059 409163615 409163669 409163708 409163774 409163845 409163955 409164059 409164191 414625832 761241943 761247376 761483628 761492625 761494608 761500902 761500975 761504816 761508349 761508397 761508969 761509180 761515997 761531553 761531645 761553168 761553288 761659735 761660965 761661629 761662335 761662951 761664420 761666676 761666846 761667070 761667337 761669444 761670483 761671140 761671243 761671650 761672111 761672821 761674579 761674907 761675526 761709313 761709478 761709835 761846021 761846029 761846031 761846038 761846039 761846040 761846041 761846042 761846043 761846044 761846045 761846046 761846047 761846048 761846049 761846050 761846051 761846052 761846053 761846054 761846055 761846056 761846057 761846058 761846059 761846060 761846061 761846062 761846063 761846064 761846065 761846066 761846067 761846068 761846069 761846070 761846071 761846072 761846073 761846074 761846075 761846076 761846077 761846078 761846079 761846080 761846081 761846082 761846083 761846084 761846085 761846086 761846087 761846088 761846089 761846090 761846091 761846092 761846093 761846094 761846095 761846096 761846097 761846098 761846099 761846100 761846101 761846102 761846103 761846104 761846105 761846106 761846107 761846108 761846109 761846110 761846111 761846112 761846113 761846114 761846115 761846137 761846138 761846139 761846140 761846141 761846142 761846143 761846144 761846145 761846146 761846147 761846148 761846149 761846150 761846151 761846152 761846153 761846154 761846155 761846156 761846157 761846158 761846159 761846160 761846161 761846162 761846163 761846164 761846165 761846166 761846167 761846168 761846169 761846170 761846171 761846172 761846173 761846174 761846175 761846176 761846177 761846178 761846179 761846180 761846181 761846182 761846183 761846184 761846185 761846186 761846187 761846188 761846189 761846190 761846191 761846192 761846193 761846194 761846195 761846196 761846197 761846198 761846199 761846200 761846201 761846202 761846203 761846204 761846205 761846206 761846207 761846208 761846209 761846210 761846211 761846212 761846213 761846214 761846215 761846216 761846217 761846218 761846219 761846220 761846223 761846224 761846225 761846226 761846227 761846228 761846229 761846230 761846231 761846232 761846233 761846234 761846235 761846236 761846237 761846238 761846239 761846240 761846241 761846242 761846243 761846244 761846245 761846246 761846247 761846248 761846249 761846250 761846251 761846252 761846253 761846254 761846255 761846256 761846257 761846258 761846259 761846260 761846261 761846262 761846263 761846264 761846265 761846266 761846267 761846268 761846269 761846270 761846271 761846272 761846273 761846274 761846275 761846276 761846277 761846278 761846279 761846280 761846281 761846282 761846283 761846284 761846285 761846286 761846287 761846288 761846289 761846290 761846291 761846292 761846293 761846294 761846295 761846296 761846297 761846298 761846299 761846300 761846301 761846302 761846303 761846304 761846305 761846306 761846307 761846308 761846309 761846310 761846311 761846312 761846313 761846314 761846315 761846316 761846317 761846318 761846319 761846320 761846321 761846322 761846323 761846324 761846325 761846326 761846327 761846328 761846329 761846330 761846331 761846332 761846333 761846334 761846335 761846336 761846337 761846338 761846339 761846340 761846341 761846342 761846343 761846344 761846345 761846346 761846347 761846348 761846349 761846350 761846351 761846352 761846353 761846354 761846355 761846356 761846357 761846358 761846359 761846360 761846361 761846362 761846363 761846364 761846365 761846366 761846367 761846368 761846369 761846370 761846371 761846372 761846373 761846374 761846375 761846376 761846377 761846378 761846379 761846380 761846381 761846382 761846383 761846384 761846385 761846386 761846387 761846388 761846389 761846390 761846391 761846392 761846393 761846394 761846395 761846396 761846397 761846398 761846399 761846400 761846401 761846402 761846403 761846404 761846405 761846406 761846407 761846408 761846409 761846410 761846411 761846412 761846413 761846414 761846415 761846416 761846417 761846418 761846419 761846420 761846421 761846422 761846423 761846424 761846425 761846426 761846427 761846428 761846429 761846430 761846431 761846432 761846433 761846434 761846435 761846436 761846437 761846438 761846439 761846440 761846441 761846442 761846443 761846444 761846445 761846446 761846447 761846448 761846449 761846450 761846451 761846452 761846453 761846454 761846455 761846456 761846457 761846458 761846459 761846460 761846461 761846462 761846463 761846464 761846465 761846466 761846467 761846468 761846469 761846470 761846471 761846472 761846473 761846474 761846475 761846476 761846477 761846478 761846479 761846480 761846481 761846482 761846483 761846484 761846485 761846486 761846487 761846488 761846489 761846490 761846491 761846492 761846493 761846494 761846495 761846496 761846497 761846498 761846499 761846500 761846501 761846502 761846503 761846504 761846505 761846506 761846507 761846508 761846509 761846510 761846511 761846512 761846513 761846514 761846515 761846516 761846517 761846518 761846519 761846520 761846521 761846522 761846523 761846524 761846525 761846526 761846527 761846528 761846529 761846530 761846538 761846539 761846540 761846541 761846542 761846543 761846544 761846545 761846546 761846547 761846548 761846549 761846550 761846551 761846552 761846553 761846554 761846555 761846556 761846557 761846558 761846559 761846560 761846561 761846562 761846563 761846564 761846565 761846566 761846567 761846568 761846569 761846570 761846571 761846572 761846573 761846574 761846575 761846576 761846577 761846578 761846579 761846580 761846581 761846582 761846583 761846584 761846585 761846586 761846587 761846588 761846589 761846590 761846591 761846592 761846593 761846594 761846595 761846596 761846597 761846598 761846599 761846600 761846601 761846602 761846603 761846604 761846605 761846606 761846607 761846608 761846609 761846610 761846611 761846612 761846613 761846614 761846615 761846616 761846617 761846618 761846619 761846620 761846621 761846622 761846623 761846624 761846625 761846626 761846627 761846628 761846629 761846630 761846631 761846632 761846633 761846634 761846635 761846636 761846637 761846638 761846639 761846640 761846641 761846642 761846643 761846644 761846645 761846646 761846647 761846648 761846649 761846650 761846651 761846652 761846653 761846654 761846655 761846656 761846657 761888898 761888908 761888909 761888910 761888911 761888912 761888913 761888914 761888915 761888916 761888917 761888918 761888919 761888920 761888921 761888922 761888923 761888924 761888925 761888926 761888927 761888928 761888929 761888930 761888931 761888932 761888940 761888941 761888943 761888944 761888945 761888946 761888947 761888948 761888949 761888950 761888951 761888952 761888953 761888954 761888955 761888956 761888957 761888958 761888959 761888960 761888961 761888962 761888963 761888964 761888965 761888966 761888967 761888968 761888969 761888970 761888971 761888972 761888973 761888974 761888975 761888976 761888977 761888978 761888979 761888980 761888981 761888982 761888983 761888984 761888985 761888986 761888987 761888988 761888993 761888994 761888995 761888996 761888997 761888998 761888999 761889000 761889001 761889002 761889003 761889004 761889005 761889006 761889007 761889008 761889009 761889010 761889011 761889012 761889013 761889014 761889015 761889016 761889017 761889018 761889019 761889020 761889021 761889022 761889023 761889024 761889025 761889026 761889027 761889028 761889029 761889030 761889031 761889032 761889033 761889034 761889035 761889036 761889037 761889038 761889039 761889040 761889041 761889042 761889043 761889046 761889047 761889048 761889049 761889050 761889051 761889052 761889053 761889054 761889055 761889056 761889057 761889058 761889059 761889060 761889061 761889062 761889063 761889064 761889065 761889066 761889067 761889068 761889069 761889070 761889071 761889072 761889073 761889074 761889075 761889076 761889077 761889078 761889079 761889080 761889081 761889082 761889083 761889084 761889085 761889086 761889087 761889088 761889089 761889090 761889091 761889092 761889093 761889094 761889095 761889096 761889097 761889098 761889099 761889100 761889101 761889102 761889103 761889104 761889105 761889106 761889107 761889108 761889109 761889110 761889111 761889112 761889113 761889114 761889115 761889116 761889117 761889118 761889119 761889120 761889121 761889122 761889123 761889124 761889125 761889126 761889127 761889128 761889129 761889130 761889131 761889132 761889133 761889134 761889135 761889136 761889137 761889138 761889139 761889140 761889141 761889142 761889143 761889146 761889147 761889148 761889149 761889150 761889151 761889152 761889153 761889154 761889155 761889156 761889157 761889158 761889159 761889160 761889161 761889162 761889163 761889164 761889165 761889166 761889167 761889168 761889169 761889170 761889171 761889172 761889173 761889174 761889175 761889176 761889177 761889178 761889179 761889180 761889181 761889182 761889183 761889184 761889185 761889186 761889187 761889188 761889189 761889190 761889191 761889192 761889193 761889194 761889195 761889196 761889197 761889198 761889199 761889200 761889201 761889202 761889203 761889204 761889205 761889206 761889207 761889208 761889209 761889210 761889211 761889212 761889213 761889214 761889215 761889216 761889217 761889218 761889219 761889220 761889221 761889222 761889223 761889224 761889225 761889226 761889227 761889228 761889229 761889230 761889231 761889232 761889233 761889234 761889235 761889236 761889237 761889238 761889239 761889240 761889241 761889242 761889243 761889244 761889245 761889246 761889247 761889248 761889249 761889250 761889251 761889252 761889253 761889254 761889255 761889256 761889257 761889258 761889259 761889260 761889261 761889262 761889263 761889264 761889265 761889266 761889267 761889268 761889269 761889270 761889271 761889272 761889273 761889274 761889275 761889276 761889277 761889278 761889279 761889280 761889281 761889282 761889283 761889284 761889285 761889286 761889287 761889288 761889289 761889290 761889291 761889292 761889293 761889294 761889295 761889296 761889297 761889298 761889299 761889300 761889301 761889302 761889303 761889304 761889305 761889306 761889307 761889308 761889309 761889310 761889311 761889312 761889313 761889314 761889315 761889316 761889317 761889318 761889319 761889320 761889321 761889322 761889323 761889324 761889325 761889326 761889327 761889328 761889329 761889330 761889331 761889332 761889333 761889334 761889335 761889336 761889337 761889338 761889339 761889340 761889341 761889342 761889343 761889344 761889345 761889372 761889373 761889374 761889375 761889376 761889377 761889379 761889380 761889381 761889391 761889392 761889393 761889394 761889395 761889396 761889397 761889398 761889399 761889400 761889401 761889402 761889403 761889404 761889405 761889406 761889407 761889408 761889409 761889410 761889411 761889412 761889413 761889414 761889415 761889417 761889418 761889419 761889420 761889421 761889422 761889423 761889424 761889425 761889426 761889427 761889428 761889429 761889430 761889431 761889432 761889433 761889434 761889435 761889436 761889437 761889438 761889439 761889440 761889441 761889442 761889443 761889444 761889445 761889446 761889447 761889448 761889449 761889450 761889451 761889452 761889453 761889454 761889455 761889456 761889457 761889458 761889459 761889460 761889461 761889462 761889463 761889464 761889465 761889466 761889467 761889468 761889469 761889470 761889471 761889472 761889473 761889474 761889475 761889476 761889477 761889478 761889479 761889480 761889481 761889482 761889483 761889484 761889485 761889486 761889487 761889488 761889489 761889490 761889491 761889492 761889493 761889494 761889495 761889496 761889497 761889498 761889499 761889500 761889501 761889502 761889503 761889504 761889505 761889506 761889507 761889508 761889509 761889510 761889511 761889512 761889513 761889514 761889515 761889516 761889517 761889518 761889519 761889520 761889521 761889522 761889523 761889524 761889525 761889526 761889527 761889528 761889529 761889530 761889531 761889532 761889533 761889534 761889535 761889536 761889537 761889538 761889539 761889540 761889541 761889542 761889543 761889544 761889545 761889546 761889547 761889548 761889549 761889550 761889551 761889552 761889553 761889554 761889555 761889556 761889557 761889558 761889559 761889560 761889561 761889562 761889563 761889564 761889565 761889566 761889567 761889568 761889569 761889570 761889571 761889572 761889573 761889574 761889575 761889576 761889577 761889578 761889579 761889580 761889581 761889582 761889583 761889584 761889585 761889586 761889587 761889588 761889589 761889590 761889591 761889592 761889593 761889594 761889595 761889596 761889597 761889598 761889599 761889600 761889601 761889602 761889603 761889604 761889605 761889606 761889607 761889608 761889609 761889610 761889611 761889612 761889613 761889614 761889615 761889616 761889617 761889618 761889619 761889620 761889621 761889622 761889623 761889624 761889625 761889626 761889627 761889628 761889629 761889630 761889631 761889632 761889633 761889634 761889635 761889636 761889637 761889638 761889639 761889640 761889641 761889642 761889643 761889644 761889645 761889646 761889647 761889648 761889649 761889650 761889651 761889652 761889653 761889654 761889655 761889656 761889657 761889658 761889659 761889660 761889661 761889662 761889663 761889664 761889665 761889666 761889667 761889668 761889669 761889670 761889671 761889672 761889673 761889674 761889675 761889676 761889677 761889678 761889679 761889680 761889681 761889682 761889683 761889684 761889685 761889686 761889687 761889688 761889689 761889690 761889691 761889692 761889693 761889694 761889695 761889696 761889697 761889698 761889699 761889700 761889701 761889702 761889703 761889704 761889705 761889706 761889707 761889708 761889709 761889710 761889711 761889712 761889713 761889714 761889715 761889716 761889717 761889718 761889719 761889720 761889721 761889722 761889723 761889724 761889725 761889726 761889727 761889728 761889729 761889730 761889731 761889732 761889733 761889734 761889735 761889736 761889737 761889738 761889739 761889740 761889741 761889742 761889743 761889744 761889745 761889746 761889747 761889748 761889749 761889750 761889751 761889752 761889753 761889754 761889755 761889756 761889757 761889758 761889759 761889760 761889761 761889762 761889763 761889764 761889765 761889766 761889767 761889768 761889769 761889770 761889771 761889772 761889773 761889774 761889775 761889776 761889777 761889778 761889779 761889780 761889781 761889782 761889783 761889784 761889785 761889786 761889787 761889788 761889789 761889790 761889791 761889792 761889793 761889794 761889795 761889796 761889797 761889798 761889799 761889800 761889801 761889802 761889803 761889804 761889805 761889806 761889807 761889808 761889809 761889810 761889811 761889812 761889813 761889814 761889815 761889816 761889817 761889818 761889819 761889820 761889821 761889822 761889823 761889824 761889825 761889826 761889827 761889828 761889829 761889830 761889831 761889832 761889833 761889834 761889835 761889836 761889837 761889838 761889839 761889840 761889841 761889842 761889843 761889844 761889845 761889846 761889847 761889848 761889849 761889850 761889851 761889852 761889853 761889854 761889855 761889856 761889857 761889858 761889859 761889860 761889861 761889862 761889863 761889864 761889865 761889866 761889867 761889868 761889869 761889870 761889871 761889872 761889873 761889874 761889875 761889876 761889877 761889878 761889879 761889880 761889881 761889882 761889883 761889884 761889885 761889886 761889887 761889888 761889889 761889890 761889891 761889892 761889893 761889894 761889895 761889896 761889897 761889898 761889899 761889900 761889901 761889902 761889903 761889904 761889905 761889906 761889907 761889908 761889909 761889910 761889911 761889912 761889913 761889914 761889915 761889916 761889917 761889918 761889919 761889920 761889921 761889922 761889923 761889924 761889925 761889926 761889927 761889928 761889929 761889930 761889931 761889932 761889933 761889934 761889935 761889936 761889937 761889938 761889939 761889940 761889941 761889942 761889943 761889944 761889945 761889946 761889947 761889948 761889949 761889950 761889951 761889952 761889953 761889954 761889955 761889956 761889957 761889958 761889959 761889960 761889961 761889962 761889963 761889964 761889965 761889966 761889967 761889968 761889969 761889970 761889971 761889972 761889973 761889974 761889975 761889976 761889977 761889978 761889979 761889980 761889981 761889982 761889983 761889984 761889985 761889986 761889987 761889988 761889989 761889990 761889991 761889992 761889993 761889994 761889995 761889996 761889997 761889998 761889999 761890000 761890001 761890002 761890003 761890004 761890005 761890006 761890007 761890008 761890009 761890010 761890011 761890012 761890013 761890014 761890015 761890016 761890017 761890018 761890019 761890020 761890021 761890022 761890023 761890024 761890025 761890026 761890027 761890028 761890029 761890030 761890031 761890032 761890033 761890034 761890035 761890036 761890037 761890038 761890039 761890040 761890041 761890042 761890043 761890044 761890045 761890046 761890047 761890048 761890049 761890050 761890051 761890052 761890053 761890054 761890055 761890056 761890057 761890058 761890059 761890060 761890061 761890062 761890063 761890064 761890065 761890066 761890067 761890068 761890069 761890070 761890071 761890072 761890073 761890074 761890075 761890076 761890077 761890078 761890079 761890080 761890081 761890082 761890083 761890084 761890085 761890086 761890087 761890088 761890089 761890090 761890091 761890092 761890093 761890094 761890095 761890096 761890097 761890098 761890099 761890100 761890101 761890102 761890103 761890104 761890105 761890106 761890107 761890108 761890109 761890110 761890111 761890112 761890113 761890114 761890115 761890116 761890117 761890118 761890119 761890120 761890121 761890122 761890123 761890124 761890125 761890126 761890127 761890128 761890129 761890130 761890131 761890132 761890133 761890134 761890137 761890140 761890141 761890142 761890145 761890146 761890148 761890149 761890150 761890151 761890152 761890153 761890154 761890155 761890156 761890157 761890158 761890159 761890160 761890161 761890162 761890163 761890164 761890165 761890166 761890167 761890168 761890169 761890170 761890171 761890172 761890173 761890174 761890175 761890176 761890177 761890178 761890179 761890180 761890181 761890182 761890183 761890184 761890185 761890186 761890187 761890188 761890189 761890190 761890191 761890192 761890193 761890194 761890195 761890196 761890197 761890198 761890199 761890200 761890201 761890202 761890203 761890204 761890205 761890206 761890207 761890208 761890209 761890210 761890211 761890212 761890213 761890214 761890215 761890216 761890217 761890218 761890219 761890220 761890221 761890222 761890223 761890224 761890225 761890226 761890227 761890228 761890229 761890230 761890231 761890232 761890233 761890234 761890235 761890236 761890237 761890238 761890239 761890240 761890241 761890242 761890243 761890244 761890245 761890246 761890247 761890248 761890249 761890250 761890251 761890252 761890253 761890254 761890255 761890256 761890257 761890258 761890259 761890260 761890261 761890262 761890263 761890264 761890265 761890266 761890267 761890268 761890269 761890270 761890271 761890272 761890273 761890274 761890275 761890276 761890277 761890278 761890279 761890280 761890281 761890282 761890283 761890284 761890285 761890286 761890287 761890288 761890289 761890290 761890291 761890292 761890293 761890294 761890295 761890296 761890297 761890298 761890299 761890300 761890301 761890302 761890303 761890304 761890305 761890306 761890307 761890308 761890309 761890310 761890313 761890314 761890315 761890317 761890318 761890319 761890320 761890321 761890322 761890323 761890324 761890325 761890326 761890327 761890328 761890329 761890330 761890331 761890332 761890333 761890334 761890335 761890336 761890337 761890338 761890339 761890340 761890341 761890342 761890343 761890344 761890345 761890346 761890347 761890348 761890349 761890350 761890351 761890352 761890353 761890354 761890355 761890356 761890357 761890358 761890359 761890360 761890361 761890362 761890363 761890364 761890365 761890366 761890367 761890368 761890369 761890370 761890371 761890372 761890373 761890374 761890375 761890376 761890377 761890378 761890379 761890380 761890381 761890382 761890383 761890384 761890385 761890386 761890387 761890388 761890389 761890390 761890391 761890392 761890393 761890394 761890395 761890396 761890397 761890398 761890399 761890400 761890401 761890402 761890403 761890404 761890405 761890406 761890407 761890408 761890409 761890410 761890411 761890412 761890413 761890414 761890415 761890416 761890417 761890418 761890419 761890420 761890421 761890422 761890423 761890424 761890425 761890426 761890427 761890428 761890429 761890430 761890431 761890432 761890433 761890434 761890435 761890436 761890437 761890438 761890439 761890440 761890441 761890442 761890443 761890444 761890445 761890446 761890447 761890448 761890449 761890450 761890451 761890452 761890453 761890454 761890455 761890456 761890457 761890458 761890459 761890460 761890461 761890462 761890463 761890464 761890465 761890466 761890467 761890468 761890469 761890470 761890471 761890472 761890473 761890474 761890475 761890476 761890477 761890478 761890479 761890480 761890481 761890482 761890483 761890484 761890485 761890486 761890487 761890488 761890489 761890490 761890491 761890492 761890493 761890494 761890495 761890496 761890497 761890498 761890499 761890500 761890501 761890502 761890503 761890504 761890505 761890506 761890507 761890508 761890509 761890510 761890511 761890512 761890513 761890514 761890515 761890516 761890517 761890518 761890519 761890520 761890521 761890522 761890523 761890524 761890525 761890526 761890527 761890528 761890529 761890530 761890531 761890532 761890533 761890534 761890535 761890536 761890537 761890538 761890539 761890540 761890541 761890542 761890543 761890544 761890545 761890546 761890547 761890548 761890549 761890550 761890551 761890552 761890553 761890554 761890555 761890556 761890557 761890558 761890559 761890560 761890561 761890562 761890563 761890564 761890565 761890566 761890567 761890568 761890569 761943777 761959265 762339040 762340585 762368931 762417190 762417238 762417259 762417640 762417968 762417969 762418255 762419031 762419045 762419057 762419144 762419290 762419770 762419908 762419961 762419968 762419976 762419999 762420000 762420045 762420264 762420282 762420297 762420337 762420397 762420400 762420508 762420526 762420537 762420645 762420753 762420779 762420817 762420963 762421011 762421020 762421217 762421412 762755085 762755086 762755087 762755088 762755089 762755090 762755091 762755092 762755093 762755094 762755095 762755103 762755104 762755105 762755106 762755107 762755108 762755109 762755110 762755111 762755112 762755113 762755114 762755115 762755116 762755117 762755118 762755119 762755120 762755121 762755122 762755123 762755124 762755125 762755126 762755127 762755128 762755129 762755130 762755131 762755132 762755133 762755134 762755135 762755136 762755137 762755138 762755139 762755140 762755141 762755142 762755143 762755144 762755145 762755146 762755147 762755148 762755149 762755150 762755151 762755152 762755153 762755154 762755155 762755156 762755157 762755158 762755159 762755160 762755161 762755162 762755170 762755171 762755172 762755173 762755174 762755175 762755176 762755177 762755178 762755179 762755180 762755181 762755182 762755183 762755184 762755185 762755186 762755187 762755188 762755189 762755190 762755191 762755192 762755193 762755194 762755195 762755196 762755197 762755198 762755199 762755200 762755201 762755202 762755203 762755204 762755205 762755206 762755207 762755208 762755209 762755210 762755211 762755212 762755213 762755214 762755215 762755216 762755217 762755218 762755219 762755220 762755221 762755222 762755223 762755224 762755225 762755226 762755227 762755228 762755229 762755230 762755231 762755232 762755233 762755234 762755235 762755236 762755237 762755238 762755239 762755240 762755241 762755242 762755243 762755244 762755245 762755246 762755247 762755248 762755249 762755250 762755251 762755252 762755253 762755254 762755255 762755256 762755257 762755258 762755259 762755260 762755261 762755262 762755263 762755264 762755265 762755266 762755267 762755268 762755269 762755270 762755271 762755272 762755273 762755274 762755275 762755276 762755277 762755278 762755279 762755280 762755281 762755282 762755283 762755284 762755285 762755286 762755287 762755293 762755294 762755295 762755296 762755297 762755298 762755299 762755300 762755301 762755302 762755303 762755304 762755305 762755306 762755307 762755308 762755309 762755310 762755311 762755312 762755313 762755314 762755315 762755316 762755317 762755318 762755319 762755320 762755321 762755322 762755323 762755324 762755325 762755326 762755327 762755328 762755329 762755330 762755331 762755332 762755333 762755334 762755335 762755336 762755337 762755338 762755339 762755340 762755341 762755342 762755343 762755344 762755345 762755346 762755347 762755348 762755349 762755350 762755351 762755352 762755353 762755354 762755355 762755356 762755357 762755358 762755359 762755360 762755361 762755362 762755363 762755364 762755365 762755366 762755367 762755368 762755369 762755370 762755371 762755372 762755373 762755374 762755375 762755376 762755377 762755378 762755379 762755380 762755381 762755382 762755383 762755384 762755385 762755386 762755387 762755388 762755389 762755390 762755391 762755392 762755393 762755394 762755395 762755396 762755397 762755398 762755399 762755400 762755401 762755402 762755403 762755404 762755405 762755406 762755407 762755408 762755409 762755410 762755411 762755412 762755413 762755414 762755415 762755416 762755417 762755418 762755419 762755420 762755421 762755422 762755423 762755424 762755425 762755426 762755427 762755428 762755429 762755430 762755431 762755432 762755433 762755434 762755435 762755436 762755437 762755438 762755439 762755440 762755441 762755442 762755443 762755444 762755445 762755446 762755447 762755448 762755449 762755450 762755451 762755452 762755453 762755454 762755455 762755456 762755457 762755458 762755459 762755460 762755461 762755462 762755463 762755464 762755465 762755466 762755467 762755468 762755469 762755470 762755471 762755472 762755473 762755474 762755475 762755476 762755477 762755478 762755479 762755480 762755481 762755482 762755483 762755484 762755485 762755486 762755487 762755488 762755489 762755490 762755491 762755492 762755493 762755494 762755495 762755496 762755497 762755498 762755499 762755500 762755501 762755502 762755503 762755504 762755505 762755506 762755507 762755508 762755509 762755510 762755511 762755512 762755513 762755514 762755515 762755516 762755517 762755518 762755519 762755520 762755521 762755522 762755523 762755524 762755525 762755526 762755527 762755528 762755529 762755530 762755531 762755532 762755533 762755534 762755535 762755536 762755537 762755538 762755539 762755540 762755541 762755542 762755543 762755544 762755545 762755546 762755547 762755548 762755549 762755550 762755551 762755552 762755553 762755554 762755555 762755556 762755557 762755558 762755559 762755560 762755561 762755562 762755563 762755564 762755565 762755566 762755567 762755568 762755569 762755570 762755571 762755572 762755573 762755574 762755575 762755576 762755577 762755578 762755579 762755580 762755581 762755582 762755583 762755584 762755585 762755586 762755587 762755588 762755589 762755590 762755591 762755592 762755593 762755594 762755595 762755596 762755597 762755598 762755599 762755600 762755601 762755602 762755603 762758490 762758499 762758500 762758501 762758504 762758505 762758506 762758507 762758508 762758509 762758510 762758511 762758512 762758513 762758514 762758515 762758516 762758517 762758518 762758519 762758520 762758521 762758522 762758523 762758524 762758525 762758526 762758527 762758528 762758529 762758530 762758531 762758532 762758533 762758534 762758535 762758536 762758537 762758538 762758539 762758540 762758541 762758542 762758543 762758544 762758545 762758546 762758547 762758548 762758549 762758550 762758551 762758552 762758553 762758554 762758555 762758556 762758557 762758558 762758559 762758560 762758561 762758562 762758563 762758564 762758565 762758566 762758567 762758568 762758569 762758570 762758571 762758572 762758573 762758574 762758575 762758576 762758577 762758578 762758579 762758580 762758581 762758582 762758583 762758584 762758585 762758586 762758587 762758588 762758589 762758590 762758591 762758596 762758597 762758598 762758599 762758600 762758601 762758602 762758603 762758604 762758605 762758606 762758607 762758608 762758609 762758610 762758611 762758612 762758613 762758614 762758615 762758616 762758617 762758618 762758619 762758620 762758621 762758622 762758623 762758624 762758625 762758626 762758627 762758628 762758629 762758630 762758631 762758632 762758633 762758634 762758635 762758636 762758637 762758638 762758639 762758640 762758641 762758642 762758643 762758644 762758645 762758646 762758647 762758648 762758649 762758650 762758651 762758652 762758653 762758654 762758655 762758656 762758657 762758658 762758659 762758660 762758661 762758662 762758663 762758664 762758665 762758666 762758669 762758670 762758671 762758672 762758673 762758675 762758676 762758677 762758678 762758679 762758680 762758681 762758682 762758683 762758684 762758685 762758686 762758687 762758688 762758689 762758690 762758691 762758692 762758693 762758694 762758695 762758696 762758697 762758698 762758699 762758700 762758701 762758702 762758703 762758704 762758705 762758706 762758707 762758708 762758709 762758710 762758711 762758712 762758713 762758714 762758715 762758716 762758717 762758718 762758719 762758720 762758721 762758722 762758723 762758724 762758725 762758726 762758727 762758728 762758729 762758730 762758731 762758732 762758733 762758734 762758735 762758736 762758737 762758738 762758739 762758740 762758741 762758742 762758743 762758744 762758745 762758746 762758747 762758748 762758749 762758750 762758751 762758752 762758753 762758754 762758755 762758756 762758757 762758758 762758759 762758760 762758761 762758762 762758763 762758764 762758765 762758766 762758767 762758768 762758769 762758770 762758771 762758772 762758773 762758774 762758775 762758776 762758777 762758778 762758779 762758780 762758781 762758782 762758783 762758784 762758785 762758786 762758787 762758788 762758789 762758790 762758791 762758792 762758793 762758794 762758795 762758796 762758797 762758798 762758799 762758800 762758801 762758802 762758803 762758804 762758805 762758806 762758807 762758808 762758809 762758810 762758811 762758812 762758813 762758814 762758815 762758816 762758817 762758818 762758819 762758820 762758821 762758822 762758823 762758824 762758825 762758826 762758827 762758828 762758829 762758830 762758831 762758832 762758833 762758834 762758835 762758836 762758837 762758838 762758839 762758840 762758841 762758842 762758843 762758844 762758845 762758846 762758847 762758848 762758849 762758850 762758851 762758852 762758853 762758854 762758855 762758856 762758857 762758858 762758859 762758860 762758861 762758862 762758863 762758864 762758865 762758866 762758867 762758868 762758869 762758870 762758871 762758872 762758873 762758874 762758875 762758876 762758877 762758878 762758879 762758880 762758881 762758882 762758883 762758884 762758885 762758886 762758887 762758888 762758889 762758890 762758891 762758892 762758893 762758894 762758895 762758896 762758897 762758898 762758899 762758900 762758901 762758902 762758903 762758904 762758905 762758906 762758907 762758908 762758909 762758910 762758911 762758912 762758913 762758914 762758915 762758916 762758917 762758918 762758919 762758920 762758921 762758922 762758923 762758924 762758925 762758926 762758927 762758928 762758929 762758930 762758931 762758932 762758933 762758934 762758935 762758936 762758937 762758938 762758939 762758940 762758941 762758942 762758943 762758944 762758945 762758946 762758947 762758948 762758949 762758950 762758951 762758952 762758953 762758954 762758955 762758956 762758957 762758958 762758959 762758960 762758961 762758962 762758963 762758964 762758965 762758966 762758967 762758968 762758969 762758970 762758971 762758972 762758973 762758974 762758975 762758976 762758977 762758978 762758979 762758980 762758981 762758982 762758983 762758984 762758985 762758986 762758987 762758988 762758989 762758990 762758991 762758992 762758993 762758994 762758995 762758996 762758997 762758998 762758999 762759000 762759001 762759002 762759003 762759004 762759005 762759006 762814859 762814862 762814863 762814865 762814866 762814867 762814868 762814869 762814870 762814871 762814872 762814873 762814874 762814875 762814876 762814877 762814878 762814879 762814880 762814881 762814882 762814883 762814884 762814885 762814886 762814887 762814888 762814889 762814890 762814891 762814892 762814893 762814894 762814895 762814896 762814897 762814898 762814899 762814900 762814901 762814902 762814903 762814904 762814905 762814906 762814907 762814908 762814909 762814910 762814911 762814912 762814913 762814914 762814915 762814916 762814917 762814918 762814919 762814920 762814921 762814922 762814923 762814924 762814925 762814926 762814927 762814928 762814929 762814930 762814931 762814932 762814933 762814934 762814935 762814936 762814937 762814938 762814943 762814944 762814945 762814946 762814947 762814948 762814949 762814950 762814951 762814952 762814953 762814954 762814955 762814956 762814957 762814958 762814959 762814960 762814961 762814962 762814963 762814964 762814965 762814966 762814967 762814968 762814969 762814970 762814971 762814972 762814973 762814974 762814975 762814976 762814977 762814978 762814979 762814980 762814981 762814982 762814983 762814984 762814985 762814986 762814987 762814988 762814989 762814990 762814991 762814992 762814993 762814994 762814995 762814996 762814997 762814998 762814999 762815000 762815001 762815002 762815003 762815004 762815005 762815006 762815007 762815008 762815009 762815010 762815011 762815012 762815013 762815014 762815015 762815016 762815017 762815018 762815019 762815020 762815021 762815022 762815023 762815024 762815025 762815026 762815027 762815028 762815029 762815030 762815031 762815032 762815033 762815034 762815035 762815036 762815037 762815038 762815039 762815040 762815041 762815042 762815043 762815044 762815045 762815046 762815047 762815048 762815049 762815050 762815051 762815052 762815053 762815054 762815055 762815056 762815057 762815058 762815059 762815060 762815061 762815062 762815063 762815064 762815065 762815066 762815067 762815068 762815069 762815070 762815071 762815072 762815073 762815074 762815075 762815076 762815077 762815078 762815079 762815080 762815081 762815082 762815083 762815084 762815085 762815086 762815087 762815088 762815089 762815090 762815091 762815092 762815093 762815094 762815095 762815096 762815097 762815098 762815099 762815100 762815101 762815102 762815103 762815104 762815105 762815106 762815107 762815108 762815109 762815110 762815111 762815112 762815113 762815114 762815115 762815116 762815117 762815118 762815119 762815120 762815121 762815122 762815123 762815124 762815125 762815126 762815127 762815128 762815129 762815130 762815131 762815132 762815133 762815134 762815135 762815136 762815137 762815138 762815139 762815140 762815141 762815142 762815143 762815144 762815145 762815146 762815147 762815148 762815149 762815150 762815151 762815152 762815153 762815154 762815155 762815156 762815157 762815158 762815159 762815160 762815161 762815162 762815163 762815164 762815165 762815166 762815167 762815168 762815169 762815170 762815171 762815172 762815173 762815174 762815175 762815176 762815177 762815178 762815183 762815184 762815185 762815186 762815187 762815188 762815189 762815190 762815191 762815192 762815193 762815194 762815195 762815196 762815197 762815198 762815199 762815200 762815201 762815202 762815203 762815204 762815205 762815206 762815207 762815208 762815209 762815210 762815211 762815212 762815213 762815214 762815215 762815216 762815217 762815218 762815219 762815220 762815221 762815222 762815223 762815224 762815225 762815226 762815227 762815228 762815229 762815230 762815231 762815232 762815233 762815234 762815235 762815236 762815237 762815238 762815239 762815240 762815241 762815242 762815243 762815244 762815245 762815246 762815247 762815248 762815249 762815250 762815251 762815252 762815253 762815254 762815255 762815256 762815257 762815258 762815259 762815260 762815261 762815262 762815263 762815264 762815265 762815266 762815267 762815268 762815269 762815270 762815271 762815272 762815273 762815274 762815275 762815276 762815277 762815278 762815279 762815280 762815281 762815282 762815283 762815284 762815285 762815286 762815287 762815288 762815289 762815290 762815291 762815292 762815293 762815294 762815295 762815296 762815297 762815298 762815299 762815300 762815301 762815302 762815303 762815304 762815305 762815306 762815307 762815308 762815309 762815310 762815311 762815312 762815313 762815314 762815315 762815316 762815317 762815318 762815319 762815320 762815321 762815322 762815323 762815324 762815325 762815326 762815327 762815328 762815329 762815330 762815331 762815332 762815333 762815334 762815335 762815336 762815337 762815338 762815339 762815340 762815341 762815342 762815343 762815344 762815345 762815346 762815347 762815348 762815349 762815350 762815351 762815352 762815353 762815354 762815355 762815356 762815357 762815358 762815359 762815360 762815361 762815362 762815363 762815364 762815365 762815366 762815367 762815368 762815369 762817220 762817229 762817230 762817231 762817233 762817234 762817235 762817236 762817237 762817238 762817239 762817240 762817241 762817242 762817243 762817244 762817245 762817246 762817247 762817248 762817249 762817250 762817251 762817252 762817253 762817254 762817255 762817256 762817257 762817258 762817259 762817260 762817261 762817262 762817263 762817264 762817265 762817266 762817267 762817268 762817269 762817270 762817271 762817272 762817273 762817274 762817275 762817276 762817277 762817278 762817279 762817280 762817281 762817282 762817283 762817284 762817285 762817286 762817287 762817288 762817289 762817290 762817291 762817292 762817293 762817294 762817295 762817296 762817297 762817298 762817299 762817300 762817301 762817302 762817303 762817304 762817309 762817310 762817311 762817312 762817313 762817314 762817315 762817316 762817317 762817318 762817319 762817320 762817321 762817322 762817323 762817324 762817325 762817326 762817327 762817328 762817329 762817330 762817331 762817332 762817333 762817334 762817335 762817336 762817337 762817338 762817339 762817340 762817341 762817342 762817343 762817344 762817345 762817346 762817347 762817348 762817349 762817350 762817351 762817352 762817353 762817354 762817355 762817356 762817357 762817358 762817359 762817360 762817361 762817362 762817363 762817364 762817365 762817366 762817367 762817368 762817369 762817370 762817371 762817372 762817373 762817374 762817375 762817376 762817377 762817378 762817379 762817380 762817381 762817382 762817383 762817384 762817385 762817386 762817387 762817388 762817389 762817390 762817391 762817392 762817393 762817394 762817395 762817396 762817397 762817398 762817399 762817400 762817405 762817406 762817407 762817408 762817409 762817410 762817411 762817412 762817413 762817414 762817415 762817416 762817417 762817418 762817419 762817420 762817421 762817422 762817423 762817424 762817425 762817426 762817427 762817428 762817429 762817430 762817431 762817432 762817433 762817434 762817435 762817436 762817437 762817438 762817439 762817440 762817441 762817442 762817443 762817444 762817445 762817446 762817447 762817448 762817449 762817450 762817451 762817452 762817453 762817454 762817455 762817456 762817457 762817458 762817459 762817460 762817461 762817462 762817463 762817464 762817465 762817466 762817467 762817468 762817469 762817470 762817471 762817472 762817473 762817474 762817475 762817476 762817477 762817478 762817479 762817480 762817481 762817482 762817483 762817484 762817485 762817486 762817487 762817488 762817489 762817490 762817491 762817492 762817493 762817494 762817495 762817496 762817497 762817498 762817499 762817500 762817501 762817502 762817503 762817504 762817505 762817506 762817507 762817508 762817509 762817510 762817511 762817512 762817513 762817514 762817515 762817516 762817517 762817518 762817519 762817520 762817521 762817522 762817523 762817524 762817525 762817526 762817527 762817528 762817529 762817530 762817531 762817532 762817533 762817534 762817535 762817536 762817537 762817538 762817539 762817540 762817541 762817542 762817543 762817544 762817545 762817546 762817547 762817548 762817549 762817550 762817551 762817552 762817553 762817554 762817555 762817556 762817557 762817558 762817559 762817560 762817561 762817562 762817563 762817564 762817565 762817566 762817567 762817568 762817569 762817570 762817571 762817572 762817573 762817574 762817575 762817576 762817577 762817578 762817579 762817580 762817581 762817582 762817583 762817584 762817585 762817586 762817587 762817588 762817589 762817590 762817591 762817592 762817593 762817594 762817595 762817596 762817597 762817598 762817599 762817600 762817601 762817602 762817603 762817604 762817605 762817606 762817607 762817608 762817609 762817610 762817611 762817612 762817613 762817614 762817615 762817616 762817617 762817618 762817619 762817620 762817621 762817622 762817623 762817624 762817625 762817626 762817627 762817628 762817629 762817630 762817631 762817632 762817633 762817634 762817635 762817636 762817637 762817638 762817639 762817640 762817641 762817642 762817643 762817644 762817645 762817646 762817647 762817648 762817649 762817650 762817651 762817652 762817653 762817654 762817655 762817656 762817657 762817658 762817659 762817660 762817661 762817662 762817663 762817664 762817665 762817666 762817667 762817668 762817669 762817670 762817671 762817672 762817673 762817674 762817675 762817676 762817677 762817678 762817679 762817680 762817681 762817682 762817683 762817684 762817685 762817686 762817687 762817688 762817689 762817690 762817691 762817692 762817693 762817694 762817695 762817696 762817697 762817698 762817699 762817700 762817701 762817702 762817703 762817704 762817705 762817706 762817707 762817708 762817709 762817710 762817711 762817712 762817713 762817714 762817715 762817716 762817717 762817718 762817719 762817720 762817721 762817722 762817723 762817724 762817725 762817726 762817727 762817728 762817729 762817730 762817731 762817732 762817733 762817734 762817735 762817736 762828406 762828412 762828413 762828414 762828415 762828416 762828417 762828418 762828419 762828420 762828421 762828422 762828423 762828424 762828425 762828426 762828427 762828428 762828429 762828430 762828431 762828432 762828433 762828434 762828435 762828436 762828437 762828438 762828439 762828440 762828441 762828442 762828443 762828444 762828445 762828446 762828447 762828448 762828449 762828450 762828451 762828452 762828453 762828454 762828455 762828456 762828457 762828458 762828459 762828460 762828461 762828462 762828463 762828464 762828465 762828466 762828467 762828468 762828469 762828470 762828471 762828472 762828473 762828474 762828475 762828476 762828477 762828478 762828479 762828480 762828481 762828482 762828483 762828484 762828485 762828486 762828493 762828494 762828495 762828496 762828497 762828498 762828499 762828500 762828501 762828502 762828503 762828504 762828505 762828506 762828507 762828508 762828509 762828510 762828511 762828512 762828513 762828514 762828515 762828516 762828517 762828518 762828519 762828520 762828521 762828522 762828523 762828524 762828525 762828526 762828527 762828528 762828529 762828530 762828531 762828532 762828533 762828534 762828535 762828536 762828537 762828538 762828539 762828540 762828541 762828542 762828543 762828544 762828545 762828546 762828547 762828548 762828549 762828550 762828551 762828552 762828553 762828554 762828555 762828556 762828557 762828558 762828559 762828560 762828561 762828562 762828563 762828564 762828565 762828566 762828567 762828568 762828569 762828570 762828571 762828572 762828573 762828574 762828575 762828576 762828577 762828578 762828579 762828580 762828581 762828582 762828583 762828584 762828585 762828586 762828587 762828588 762828589 762828590 762828591 762828592 762828593 762828594 762828595 762828596 762828597 762828598 762828599 762828600 762828601 762828602 762828603 762828604 762828605 762828606 762828607 762828608 762828609 762828610 762828611 762828612 762828613 762828614 762828615 762828616 762828617 762828618 762828619 762828620 762828621 762828622 762828623 762828624 762828625 762828626 762828627 762828628 762828629 762828630 762828631 762828632 762828633 762828634 762828635 762828636 762828637 762828638 762828639 762828640 762828641 762828642 762828643 762828644 762828645 762828646 762828647 762828648 762828649 762828650 762828651 762828652 762828653 762828654 762828655 762828656 762828657 762828658 762828659 762828660 762828661 762828662 762828663 762828664 762828665 762828666 762828667 762828668 762828669 762828670 762828671 762828672 762828673 762828674 762828675 762828676 762828677 762828678 762828679 762828680 762828681 762828682 762828683 762828684 762828685 762828686 762828687 762828688 762828689 762828690 762828691 762828692 762828693 762828694 762828695 762828696 762828697 762828698 762828699 762828700 762828701 762828702 762828703 762828704 762828705 762828708 762828709 762828710 762828712 762828713 762828714 762828715 762828716 762828717 762828718 762828719 762828720 762828721 762828722 762828723 762828724 762828725 762828726 762828727 762828728 762828729 762828730 762828731 762828732 762828733 762828734 762828735 762828736 762828737 762828738 762828739 762828740 762828741 762828742 762828743 762828744 762828745 762828746 762828747 762828748 762828749 762828750 762828751 762828752 762828753 762828754 762828755 762828756 762828757 762828758 762828759 762828760 762828761 762828762 762828763 762828764 762828765 762828766 762828767 762828768 762828769 762828770 762828771 762828772 762828773 762828774 762828775 762828776 762828777 762828778 762828779 762828780 762828781 762828782 762828783 762828784 762828785 762828786 762828787 762828788 762828789 762828790 762828791 762828792 762828793 762828794 762828795 762828796 762828797 762828798 762828799 762828800 762828801 762828802 762828803 762828804 762828805 762828806 762828807 762828808 762828809 762828810 762828811 762828812 762828813 762828814 762828815 762828816 762828817 762828818 762828819 762828820 762828821 762828822 762828823 762828824 762828825 762828826 762828827 762828828 762828829 762828830 762828831 762828832 762828833 762828834 762828835 762828836 762828837 762828838 762828839 762828840 762828841 762828842 762828843 762828844 762828845 762828846 762828847 762828848 762828849 762828850 762828851 762828852 762828853 762828854 762828855 762828856 762828857 762828858 762828859 762828860 762828861 762828862 762828863 762828864 762828865 762828866 762828867 762828868 762828869 762828870 762828871 762828872 762828873 762828874 762828875 762828876 762828877 762828878 762828879 762828880 762828881 762828882 762828883 762828884 762828885 762828886 762828887 762828888 762828889 762828890 762828891 762828892 762828893 762828894 762828895 762828896 762828897 762828898 762828899 762828900 762828901 762828902 762828903 762828904 762828905 762828906 762828907 762828908 762828909 762828910 762828911 762828912 762828913 762828914 762828915 762828916 762828917 762828918 762828919 762829159 762829164 762829165 762829166 762829167 762829168 762829169 762829170 762829171 762829172 762829173 762829174 762829175 762829176 762829177 762829178 762829184 762829185 762829186 762829187 762829189 762829190 762829191 762829192 762829193 762829194 762829195 762829196 762829197 762829198 762829199 762829200 762829201 762829202 762829203 762829204 762829205 762829206 762829207 762829208 762829209 762829210 762829211 762829212 762829213 762829214 762829215 762829216 762829217 762829218 762829219 762829220 762829221 762829222 762829223 762829224 762829225 762829226 762829227 762829228 762829229 762829230 762829231 762829232 762829233 762829234 762829235 762829236 762829237 762829238 762829239 762829240 762829241 762829242 762829243 762829244 762829245 762829246 762829247 762829248 762829249 762829250 762829251 762829252 762829253 762829254 762829255 762829256 762829257 762829258 762829259 762829260 762829261 762829262 762829263 762829264 762829265 762829266 762829267 762829268 762829269 762829270 762829271 762829272 762829273 762829274 762829275 762829276 762829277 762829278 762829279 762829280 762829281 762829282 762829283 762829284 762829285 762829286 762829287 762829288 762829289 762829290 762829291 762829292 762829293 762829294 762829295 762829296 762829297 762829298 762829299 762829300 762829301 762829302 762829303 762829304 762829305 762829306 762829307 762829308 762829309 762829310 762829311 762829312 762829313 762829314 762829315 762829316 762829317 762829318 762829319 762829320 762829321 762829322 762829323 762829324 762829325 762829326 762829327 762829328 762829329 762829330 762829331 762829332 762829333 762829334 762829335 762829336 762829337 762829338 762829339 762829340 762829341 762829342 762829343 762829344 762829345 762829346 762829347 762829352 762829353 762829354 762829355 762829356 762829357 762829358 762829359 762829360 762829361 762829362 762829363 762829364 762829365 762829366 762829367 762829368 762829369 762829370 762829371 762829372 762829373 762829374 762829375 762829376 762829377 762829378 762829379 762829380 762829381 762829382 762829383 762829384 762829385 762829386 762829387 762829388 762829389 762829390 762829391 762829392 762829393 762829394 762829395 762829396 762829397 762829398 762829399 762829400 762829401 762829402 762829403 762829404 762829405 762829406 762829407 762829408 762829409 762829410 762829411 762829412 762829413 762829414 762829415 762829416 762829417 762829418 762829419 762829420 762829421 762829422 762829423 762829424 762829425 762829426 762829427 762829428 762829429 762829430 762829431 762829432 762829433 762829434 762829435 762829436 762829437 762829438 762829439 762829440 762829441 762829442 762829443 762829444 762829445 762829446 762829447 762829448 762829449 762829450 762829451 762829452 762829453 762829454 762829455 762829456 762829457 762829458 762829459 762829460 762829461 762829462 762829463 762829464 762829465 762829466 762829467 762829468 762829469 762829470 762829471 762829472 762829473 762829474 762829475 762829476 762829477 762829478 762829479 762829480 762829481 762829482 762829483 762829484 762829485 762829486 762829487 762829488 762829489 762829490 762829491 762829492 762829493 762829494 762829495 762829496 762829497 762829498 762829499 762829500 762829501 762829502 762829503 762829504 762829505 762829506 762829507 762829508 762829509 762829510 762829511 762829512 762829513 762829514 762829515 762829516 762829517 762829518 762829519 762829520 762829521 762829522 762829523 762829524 762829525 762829526 762829527 762829528 762829529 762829530 762829531 762829532 762829533 762829534 762829535 762829536 762829537 762829538 762829539 762829540 762829541 762829542 762829543 762829544 762829545 762829546 762829547 762829548 762829549 762829550 762829551 762829552 762829553 762829554 762829555 762829556 762829557 762829558 762829559 762829560 762829561 762829562 762829563 762829564 762829565 762829566 762829567 762829568 762829569 762829570 762829571 762829572 762829573 762829574 762829575 762829576 762829577 762829578 762829579 762829580 762829581 762829582 762829583 762829584 762829585 762829586 762829587 762829588 762829593 762829594 762829595 762829596 762829597 762829598 762829599 762829600 762829601 762829602 762829603 762829604 762829605 762829606 762829607 762829608 762829609 762829610 762829611 762829612 762829613 762829614 762829615 762829616 762829617 762829618 762829619 762829620 762829621 762829622 762829623 762829624 762829625 762829626 762829627 762829628 762829629 762829630 762829631 762829632 762829633 762829634 762829635 762829636 762829637 762829638 762829639 762829640 762829641 762829642 762829643 762829644 762829645 762829646 762829647 762829648 762829649 762829650 762829651 762829652 762829653 762829654 762829655 762829656 762829657 762829658 762829659 762829660 762829661 762829662 762829663 762829664 762829665 762829666 762829667 762829668 762829669 762829670 762829671 762829672 762829673 762829674 762829675 762829676 762831055 762831060 762831061 762831062 762831063 762831064 762831065 762831066 762831067 762831068 762831069 762831070 762831071 762831072 762831073 762831074 762831075 762831076 762831077 762831078 762831079 762831080 762831081 762831082 762831083 762831084 762831085 762831086 762831087 762831088 762831089 762831090 762831091 762831092 762831093 762831094 762831095 762831096 762831097 762831105 762831106 762831107 762831108 762831109 762831110 762831111 762831112 762831113 762831114 762831115 762831116 762831117 762831118 762831119 762831120 762831121 762831122 762831123 762831124 762831125 762831126 762831127 762831128 762831129 762831130 762831131 762831132 762831133 762831134 762831135 762831136 762831137 762831138 762831139 762831140 762831141 762831142 762831143 762831144 762831145 762831146 762831147 762831148 762831149 762831150 762831151 762831152 762831153 762831154 762831155 762831156 762831157 762831158 762831159 762831160 762831161 762831162 762831163 762831164 762831165 762831166 762831167 762831168 762831169 762831170 762831171 762831172 762831173 762831174 762831175 762831176 762831177 762831178 762831179 762831180 762831189 762831190 762831191 762831192 762831193 762831195 762831196 762831197 762831198 762831199 762831200 762831201 762831202 762831203 762831204 762831205 762831206 762831207 762831208 762831209 762831210 762831211 762831212 762831213 762831214 762831215 762831216 762831217 762831218 762831219 762831220 762831221 762831222 762831223 762831224 762831225 762831226 762831227 762831228 762831229 762831230 762831231 762831232 762831233 762831234 762831235 762831236 762831237 762831238 762831239 762831240 762831241 762831242 762831243 762831244 762831245 762831246 762831247 762831248 762831249 762831250 762831251 762831252 762831253 762831254 762831255 762831256 762831257 762831258 762831259 762831260 762831261 762831262 762831263 762831264 762831265 762831266 762831267 762831268 762831269 762831270 762831271 762831272 762831273 762831274 762831275 762831276 762831277 762831278 762831279 762831280 762831281 762831282 762831283 762831284 762831285 762831286 762831287 762831288 762831289 762831290 762831291 762831292 762831293 762831294 762831295 762831296 762831297 762831298 762831299 762831300 762831301 762831302 762831303 762831304 762831305 762831306 762831307 762831308 762831309 762831310 762831311 762831312 762831313 762831314 762831315 762831316 762831317 762831318 762831319 762831320 762831321 762831322 762831323 762831324 762831325 762831326 762831327 762831328 762831329 762831330 762831331 762831332 762831333 762831334 762831335 762831336 762831337 762831338 762831339 762831340 762831341 762831342 762831343 762831344 762831345 762831346 762831347 762831348 762831349 762831350 762831351 762831352 762831353 762831354 762831355 762831356 762831357 762831358 762831359 762831362 762831363 762831364 762831365 762831367 762831368 762831369 762831370 762831371 762831372 762831373 762831374 762831375 762831376 762831377 762831378 762831379 762831380 762831381 762831382 762831383 762831384 762831385 762831386 762831387 762831388 762831389 762831390 762831391 762831392 762831393 762831394 762831395 762831396 762831397 762831398 762831399 762831400 762831401 762831402 762831403 762831404 762831405 762831406 762831407 762831408 762831409 762831410 762831411 762831412 762831413 762831414 762831415 762831416 762831417 762831418 762831419 762831420 762831421 762831422 762831423 762831424 762831425 762831426 762831427 762831428 762831429 762831430 762831431 762831432 762831433 762831434 762831435 762831436 762831437 762831438 762831439 762831440 762831441 762831442 762831443 762831444 762831445 762831446 762831447 762831448 762831449 762831450 762831451 762831452 762831453 762831454 762831455 762831456 762831457 762831458 762831459 762831460 762831461 762831462 762831463 762831464 762831465 762831466 762831467 762831468 762831469 762831470 762831471 762831472 762831473 762831474 762831475 762831476 762831477 762831478 762831479 762831480 762831481 762831482 762831483 762831484 762831485 762831486 762831487 762831488 762831489 762831490 762831491 762831492 762831493 762831494 762831495 762831496 762831497 762831498 762831499 762831500 762831501 762831502 762831503 762831504 762831505 762831506 762831507 762831508 762831509 762831510 762831511 762831512 762831513 762831514 762831515 762831516 762831517 762831518 762831519 762831520 762831521 762831522 762831523 762831524 762831525 762831526 762831527 762831528 762831529 762831530 762831531 762831532 762831533 762831534 762831535 762831536 762831537 762831538 762831539 762831540 762831541 762831542 762831543 762831544 762831545 762831546 762831547 762831548 762831549 762831550 762831551 762831552 762831553 762831554 762831555 762831556 762831557 762831558 762831559 762831560 762831561 762831562 762831563 762831564 762831565 762831566 762831567 762831568 762831569 762831570 762831571 762831572 762831573 762831574 762831575 762831576 762831577 762831578 762831579 762831580 762831581 762831582 762831583 762831584 762831585 762831586 762831587 762831588 762831589 762831590 762831591 762831592 762831593 762831594 762831595 762831596 762831597 762831598 762831599 762831600 762831601 762831602 762831603 762831604 762831605 762831606 762831607 762831608 762831609 762831610 762831611 762831612 762831613 762831614 762831615 762831616 762831617 762831618 762831619 762831620 762831621 762831622 762831623 762831624 762831625 762831626 762831627 762831628 762831629 762831630 762831631 762831632 762831633 762831634 762831635 762831636 762831637 762831638 762831639 762831640 762831641 762831642 762831643 762831644 762831645 762831646 762831647 762831648 762831649 762831650 762831651 762831652 762831653 762831654 762831655 762831656 762831657 762831658 762831659 762831660 762831661 762831662 762831663 762831664 762831665 762831666 762831667 762831668 762831669 762831670 762831671 762831672 762831673 762831674 762831675 762831676 762831677 762831678 762831679 762831680 762831681 762831682 762831683 762831684 762831685 762831686 762831687 762831688 762831689 762831690 762831691 762831692 762831693 762831694 762831695 762831696 762831697 762831698 762831699 762831700 762831701 762831702 762831703 762831704 762831705 762831706 762831707 762831708 762831709 762831710 762831711 762831712 762831713 762831714 762831715 762831716 762831717 762831718 762831719 762831720 762831721 762831722 762831723 762831724 762831725 762831726 762831727 762831728 762831729 762831730 762831731 762831732 762831733 762831734 762831735 762831736 762831737 762831738 762831739 762831740 762831741 762831742 762831743 762831744 762831745 762831746 762831747 762831752 762831753 762831754 762831755 762831756 762831757 762831758 762831759 762831760 762831779 762831780 762831781 762831782 762831784 762831785 762831786 762831787 762831788 762831789 762831790 762831791 762831792 762831793 762831794 762831795 762831796 762831797 762831798 762831799 762831800 762831801 762831802 762831803 762831804 762831805 762831806 762831807 762831808 762831809 762831810 762831811 762831812 762831813 762831814 762831815 762831816 762831817 762831818 762831819 762831820 762831821 762831822 762831823 762831824 762831825 762831826 762831827 762831828 762831829 762831830 762831831 762831832 762831833 762831834 762831835 762831836 762831837 762831838 762831839 762831840 762831841 762831842 762831843 762831844 762831845 762831846 762831847 762831848 762831849 762831850 762831851 762831852 762831853 762831854 762831855 762831856 762831857 762831858 762831859 762831860 762831861 762831862 762831863 762831864 762831865 762831866 762831867 762831868 762831869 762831870 762831871 762831872 762831873 762831874 762831875 762831876 762831877 762831878 762831879 762831880 762831881 762831882 762831883 762831884 762831885 762831886 762831887 762831888 762831889 762831890 762831891 762831892 762831893 762831894 762831895 762831896 762831897 762831898 762831899 762831900 762831901 762831902 762831903 762831904 762831905 762831906 762831907 762831908 762831909 762831910 762831911 762831912 762831913 762831914 762831915 762831916 762831917 762831918 762831919 762831920 762831921 762831922 762831923 762831924 762831925 762831926 762831927 762831928 762831929 762831930 762831931 762831932 762831933 762831934 762831935 762831936 762831937 762831938 762831941 762831943 762831944 762831945 762831946 762831947 762831948 762831949 762831950 762831951 762831952 762831953 762831954 762831955 762831956 762831957 762831958 762831959 762831960 762831961 762831962 762831963 762831964 762831965 762831966 762831967 762831968 762831969 762831970 762831971 762831972 762831973 762831974 762831975 762831976 762831977 762831978 762831979 762831980 762831981 762831982 762831983 762831984 762831985 762831986 762831987 762831988 762831989 762831990 762831991 762831992 762831993 762831994 762831995 762831996 762831997 762831998 762831999 762832000 762832001 762832002 762832003 762832004 762832005 762832006 762832007 762832008 762832009 762832010 762832011 762832012 762832013 762832014 762832015 762832016 762832017 762832018 762832019 762832020 762832021 762832022 762832023 762832024 762832025 762832026 762832027 762832028 762832029 762832030 762832031 762832032 762832033 762832034 762832035 762832036 762832037 762832038 762832039 762832040 762832041 762832042 762832043 762832044 762832045 762832046 762832047 762832048 762832049 762832050 762832051 762832052 762832053 762832054 762832055 762832056 762832057 762832058 762832059 762832060 762832061 762832062 762832063 762832064 762832065 762832066 762832067 762832068 762832069 762832070 762832071 762832072 762832073 762832074 762832075 762832076 762832077 762832078 762832079 762832080 762832081 762832082 762832083 762832084 762832085 762832086 762832087 762832088 762832089 762832090 762832091 762832092 762832093 762832094 762832095 762832096 762832097 762832098 762832099 762832100 762832101 762832102 762832103 762844335 762844348 762844349 762844350 762844351 762844352 762844353 762844354 762844355 762844356 762844357 762844358 762844359 762844360 762844368 762844369 762844370 762844371 762844372 762844373 762844374 762844375 762844376 762844377 762844378 762844379 762844380 762844381 762844382 762844383 762844384 762844385 762844386 762844387 762844388 762844389 762844390 762844391 762844394 762844395 762844396 762844397 762844398 762844399 762844400 762844401 762844402 762844403 762844404 762844405 762844406 762844407 762844408 762844409 762844410 762844411 762844412 762844413 762844414 762844415 762844416 762844417 762844418 762844419 762844420 762844421 762844422 762844423 762844424 762844425 762844426 762844427 762844428 762844429 762844430 762844431 762844432 762844433 762844434 762844435 762844436 762844437 762844438 762844439 762844440 762844441 762844442 762844443 762844444 762844445 762844446 762844447 762844448 762844449 762844450 762844451 762844452 762844453 762844454 762844455 762844456 762844457 762844458 762844459 762844460 762844461 762844462 762844463 762844464 762844465 762844466 762844467 762844468 762844469 762844470 762844471 762844472 762844473 762844474 762844479 762844480 762844481 762844482 762844483 762844484 762844485 762844486 762844487 762844488 762844489 762844490 762844491 762844492 762844493 762844494 762844495 762844496 762844497 762844498 762844499 762844500 762844501 762844502 762844503 762844504 762844505 762844506 762844507 762844508 762844509 762844510 762844511 762844512 762844513 762844514 762844515 762844516 762844517 762844518 762844519 762844520 762844521 762844522 762844523 762844524 762844525 762844526 762844527 762844528 762844529 762844530 762844531 762844532 762844533 762844534 762844535 762844536 762844537 762844538 762844539 762844540 762844541 762844542 762844543 762844544 762844545 762844546 762844547 762844548 762844549 762844550 762844551 762844552 762844553 762844554 762844555 762844556 762844557 762844558 762844559 762844560 762844561 762844562 762844563 762844564 762844565 762844566 762844567 762844568 762844569 762844570 762844571 762844572 762844573 762844574 762844575 762844576 762844577 762844578 762844579 762844580 762844581 762844582 762844583 762844584 762844585 762844586 762844587 762844588 762844589 762844590 762844591 762844592 762844593 762844594 762844595 762844596 762844597 762844598 762844599 762844600 762844601 762844602 762844603 762844604 762844605 762844606 762844607 762844608 762844609 762844610 762844611 762844612 762844613 762844614 762844615 762844616 762844617 762844618 762844619 762844620 762844621 762844622 762844623 762844624 762844625 762844626 762844627 762844628 762844629 762844630 762844631 762844632 762844633 762844634 762844635 762844636 762844637 762844638 762844639 762844640 762844641 762844642 762844643 762844644 762844645 762844646 762844647 762844648 762844649 762844650 762844651 762844652 762844653 762844654 762844655 762844656 762844657 762844658 762844659 762844660 762844661 762844662 762844663 762844664 762844665 762844666 762844667 762844668 762844669 762844670 762844671 762844672 762844673 762844674 762844675 762844676 762844677 762844678 762844679 762844680 762844681 762844682 762844683 762844684 762844685 762844686 762844687 762844688 762844689 762844690 762844691 762844692 762844693 762844694 762844695 762844696 762844697 762844698 762844699 762844700 762844701 762844702 762844703 762844704 762844705 762844706 762844707 762844708 762844709 762844710 762844711 762844712 762844713 762844714 762844715 762844716 762844717 762844718 762844719 762844720 762844721 762844722 762844723 762844724 762844725 762844726 762844727 762844728 762844729 762844730 762844731 762844732 762844733 762844734 762844735 762844736 762844774 762844775 762844776 762844777 762844778 762844779 762844780 762844781 762844782 762844783 762844784 762844785 762844786 762844787 762844792 762844793 762844795 762844796 762844797 762844798 762844799 762844800 762844801 762844802 762844803 762844804 762844805 762844806 762844807 762844808 762844809 762844810 762844811 762844812 762844813 762844814 762844815 762844816 762844817 762844818 762844819 762844820 762844821 762844822 762844823 762844824 762844825 762844826 762844827 762844828 762844829 762844830 762844831 762844832 762844833 762844834 762844835 762844836 762844837 762844838 762844839 762844840 762844841 762844842 762844843 762844844 762844851 762844852 762844853 762844854 762844855 762844856 762844857 762844858 762844859 762844860 762844861 762844862 762844863 762844864 762844865 762844866 762844867 762844868 762844869 762844870 762844871 762844872 762844873 762844874 762844875 762844876 762844877 762844878 762844879 762844880 762844881 762844882 762844883 762844884 762844885 762844886 762844887 762844888 762844889 762844890 762844891 762844892 762844893 762844894 762844895 762844896 762844897 762844898 762844899 762844900 762844901 762844902 762844903 762844904 762844905 762844906 762844907 762844908 762844909 762844910 762844911 762844912 762844913 762844914 762844915 762844916 762844917 762844918 762844919 762844920 762844921 762844922 762844923 762844924 762844925 762844926 762844927 762844928 762844929 762844930 762844931 762844932 762844933 762844934 762844935 762844936 762844937 762844938 762844939 762844940 762844941 762844942 762844943 762844944 762844945 762844946 762844947 762844948 762844949 762844950 762844951 762844952 762844953 762844954 762844955 762844956 762844957 762844958 762844959 762844960 762844961 762844962 762844963 762844964 762844965 762844966 762844967 762844968 762844969 762844970 762844971 762844972 762844973 762844974 762844975 762844976 762844977 762844978 762844979 762844980 762844981 762844982 762844983 762844984 762844987 762844989 762844990 762844991 762844992 762844993 762844994 762844995 762844996 762844997 762844998 762844999 762845000 762845001 762845002 762845003 762845004 762845005 762845006 762845007 762845008 762845009 762845010 762845011 762845012 762845013 762845014 762845015 762845016 762845017 762845018 762845019 762845020 762845021 762845022 762845023 762845024 762845025 762845026 762845027 762845028 762845029 762845030 762845031 762845032 762845033 762845034 762845035 762845036 762845037 762845038 762845039 762845044 762845045 762845047 762845048 762845049 762845050 762845051 762845052 762845053 762845054 762845055 762845056 762845057 762845058 762845059 762845060 762845061 762845062 762845063 762845064 762845065 762845066 762845067 762845068 762845069 762845070 762845071 762845072 762845073 762845074 762845075 762845076 762845077 762845078 762845079 762845080 762845081 762845082 762845083 762845084 762845085 762845086 762845087 762845088 762845089 762845090 762845091 762845092 762845093 762845094 762845095 762845096 762845097 762845098 762845099 762845100 762845101 762845102 762845103 762845104 762845105 762845106 762845107 762845108 762845109 762845110 762845111 762845112 762845113 762845114 762845115 762845116 762845117 762845118 762845119 762845120 762845121 762845122 762845123 762845124 762845125 762845126 762845127 762845128 762845129 762845130 762845131 762845132 762845133 762845134 762845135 762845136 762845137 762845138 762845139 762845140 762845141 762845142 762845143 762845144 762845145 762845146 762845147 762845148 762845149 762845150 762845151 762845152 762845153 762845154 762845155 762845156 762845157 762845158 762845159 762845160 762845161 762845162 762845163 762845164 762845165 762845166 762845167 762845168 762845169 762845170 762845171 762845172 762845173 762845174 762845175 762845176 762845177 762845178 762845179 762845180 762845181 762845182 762845183 762845184 762845185 762845186 762845187 762845188 762845189 762845190 762845191 762845192 762845193 762845194 762845195 762845196 762845197 762845198 762845199 762845200 762845201 762845202 762845203 762845204 762845205 762845206 762845207 762845208 762845209 762845210 762845211 762845212 762845213 762845214 762845215 762845216 762845217 762845218 762845219 762845220 762845221 762845222 762845223 762845224 762845225 762845226 762845227 762845228 762845229 762845230 762845231 762845232 762845233 762845234 762845235 762845236 762845237 762845238 762845239 762845240 762845241 762845242 762845243 762845244 762845245 762845246 762845247 762845248 762845249 762845250 762845251 762845252 762845253 762845254 762845255 762845256 762845257 762845258 762845259 762845260 762845261 762845262 762845263 762845264 762845265 762845266 762845267 762845268 762845269 762845270 762845271 762845272 762845273 762845274 762845275 762845276 762845277 762845278 762845279 762845280 762845281 762845282 762845283 762845287 762845288 762845289 762845290 762845291 762845292 762845293 762845294 762845295 762845296 762845297 762845298 762845299 762845300 762845301 762845302 762845303 762845304 762845305 762845306 762845307 762845308 762845309 762845310 762845311 762845312 762845313 762845314 762845315 762845316 762845317 762845318 762845319 762845320 762845321 762845322 762845323 762845324 762845325 762845326 762845327 762845328 762845329 762845330 762845331 762845332 762845333 762845334 762845335 762845336 762845337 762845338 762845339 762845340 762845341 762845342 762845343 762845344 762845345 762845346 762845347 762845348 762845349 762845350 762845351 762845352 762845353 762845354 762845355 762845356 762845357 762845358 762845359 762845360 762845361 762845362 762845363 762845364 762845365 762845366 762845367 762845368 762845369 762845370 762845371 762845372 762845373 762845374 762845375 762845376 762845377 762845378 762845379 762845380 762845381 762845382 762845383 762845384 762845385 762845386 762845387 762845388 762845389 762845390 762845391 762845392 762845393 762845394 762845395 762845396 762845397 762845398 762845399 762845400 762845401 762845402 762845403 762845404 762845405 762845406 762845407 762845408 762845409 762845410 762845411 762845412 762845413 762845414 762845415 762845416 762845417 762845418 762848830 762848837 762848838 762848839 762848842 762848843 762848844 762848846 762848847 762848848 762848849 762848850 762848851 762848852 762848853 762848854 762848855 762848856 762848857 762848858 762848859 762848860 762848861 762848862 762848863 762848864 762848865 762848866 762848867 762848868 762848869 762848870 762848871 762848872 762848873 762848874 762848875 762848876 762848877 762848878 762848879 762848880 762848881 762848882 762848883 762848884 762848885 762848886 762848887 762848888 762848889 762848890 762848891 762848892 762848893 762848894 762848895 762848906 762848907 762848908 762848909 762848910 762848911 762848912 762848913 762848914 762848924 762848925 762848926 762848927 762848928 762848929 762848930 762848931 762848932 762848933 762848934 762848935 762848936 762848937 762848938 762848939 762848940 762848941 762848942 762848943 762848944 762848945 762848946 762848947 762848948 762848949 762848950 762848951 762848952 762848953 762848954 762848955 762848956 762848957 762848958 762848959 762848960 762848961 762848962 762848963 762848964 762848965 762848966 762848967 762848968 762848969 762848970 762848971 762848972 762848973 762848974 762848975 762848976 762848977 762848978 762848979 762848980 762848981 762848982 762848983 762848984 762848985 762848986 762848987 762848988 762848989 762848990 762848991 762848992 762848993 762848994 762848995 762848996 762848997 762848998 762848999 762849000 762849001 762849002 762849003 762849004 762849005 762849006 762849007 762849008 762849009 762849010 762849011 762849012 762849013 762849014 762849015 762849016 762849017 762849018 762849019 762849020 762849021 762849022 762849023 762849024 762849025 762849026 762849027 762849028 762849029 762849030 762849031 762849032 762849033 762849034 762849035 762849036 762849037 762849038 762849039 762849040 762849041 762849042 762849043 762849044 762849045 762849046 762849047 762849048 762849049 762849050 762849051 762849052 762849053 762849054 762849055 762849056 762849057 762849058 762849059 762849060 762849061 762849062 762849063 762849064 762849065 762849066 762849067 762849068 762849069 762849070 762849071 762849072 762849073 762849074 762849075 762849076 762849077 762849078 762849079 762849080 762849081 762849082 762849083 762849084 762849085 762849086 762849087 762849088 762849089 762849090 762849091 762849092 762849093 762849094 762849095 762849096 762849097 762849098 762849099 762849100 762849101 762849102 762849103 762849104 762849105 762849106 762849107 762849108 762849109 762849110 762849111 762849112 762849113 762849114 762849115 762849116 762849117 762849118 762849119 762849120 762849121 762849122 762849123 762849124 762849125 762849126 762849127 762849128 762849129 762849130 762849131 762849132 762849133 762849134 762849135 762849136 762849137 762849138 762849139 762849140 762849141 762849142 762849143 762849144 762849145 762849146 762849147 762849148 762849149 762849150 762849151 762849152 762849153 762849154 762849155 762849156 762849157 762849158 762849159 762849160 762849161 762849162 762849163 762849164 762849165 762849166 762849167 762849168 762849169 762849170 762849171 762849172 762849173 762849174 762849175 762849176 762849177 762849178 762849179 762849180 762849181 762849182 762849183 762849184 762849185 762849186 762849187 762849188 762849189 762849190 762849191 762849192 762849193 762849194 762849195 762849196 762849197 762849198 762849199 762849200 762849201 762849202 762849203 762849204 762849205 762849206 762849207 762849208 762849209 762849210 762849211 762849212 762849213 762849214 762849215 762849216 762849217 762849218 762849219 762849220 762849221 762849222 762849223 762849224 762849225 762849226 762849227 762849228 762849229 762849230 762849231 762849232 762849233 762849234 762849235 762849236 762849237 762849238 762849239 762849240 762849241 762849242 762849243 762849244 762849245 762849246 762849247 762849248 762849249 762849250 762849251 762849252 762849253 762849254 762849255 762849256 762849257 762849258 762849259 762849260 762849261 762849262 762849263 762849264 762849265 762849266 762849267 762849268 762849269 762849270 762849271 762849272 762849273 762849274 762849275 762849276 762849277 762849278 762849279 762849280 762849281 762849282 762849283 762849284 762849285 762849286 762849287 762849288 762849289 762849290 762849291 762849292 762849293 762849294 762849295 762849296 762849297 762849298 762849299 762849300 762849301 762849302 762849303 762849304 762849305 762849306 762849307 762849308 762849309 762849310 762849311 762849312 762849313 762849314 762849315 762849316 762849317 762849318 762849319 762849320 762849321 762849322 762849323 762849324 762849325 762849326 762849327 762849328 762849329 762849330 762849331 762849332 762849333 762849334 762849335 762849336 762849337 762849338 762849339 762849340 762849341 762849342 762849343 762849344 762849345 762849346 762849347 762849348 762849349 762849350 762849351 762849352 762849353 762849354 762849355 762849356 762849357 762849358 762849359 762849360 762849361 762849362 762849363 762849364 762849365 762849366 762849367 762849368 762849369 762849370 762849371 762849372 762849373 762849374 762849375 762849376 762849377 762849378 762849379 762849380 762849381 762849382 762849383 762849384 762849385 762849386 762849387 762849388 762849389 762849390 762849391 762849392 762849393 762849394 762849395 762849396 762849397 762849398 762849399 762849400 762849401 762849402 762849403 762849404 762849405 762849406 762849407 762849408 762849409 762849410 762849411 762849412 762849413 762849414 762849415 762849416 762849417 762849418 762849419 762849420 762849421 762849422 762849423 762849424 762849425 762849426 762849427 762849428 762849429 762849430 762849431 762849432 762849433 762849434 762849435 762849436 762849437 762849438 762849439 762849440 762849441 762849442 762849443 762849444 762849445 762849446 762849447 762849448 762849449 762849450 762849451 762849452 762849453 762849454 762849455 762849456 762849457 762849458 762849459 762849460 762849461 762849462 762849463 762849464 762849465 762849466 762849467 762849468 762849469 762849470 762849471 762849472 762849473 762849474 762849475 762849476 762849477 762849478 762849479 762849480 762849481 762849482 762849483 762849484 762849485 762849486 762849487 762849488 762849489 762849490 762849491 762849492 762849493 762849494 762849495 762849496 762849497 762849498 762849499 762849500 762849501 762849502 762849503 762849504 762849505 762849506 762849507 762849508 762849509 762849510 762849511 762849512 762849513 762849514 762849515 762849516 762849517 762849518 762849519 762849520 762849521 762849522 762849523 762849524 762849525 762849526 762849527 762849528 762849529 762849530 762849531 762849532 762849533 762849534 762849535 762849536 762849537 762849538 762849539 762849540 762849541 762849542 762849543 762849544 762849545 762849546 762849547 762849548 762849549 762849550 762849551 762849552 762849553 762849554 762849555 762849556 762849557 762849558 762849559 762849560 762849561 762849562 762849563 762849564 762849565 762849566 762849567 762849568 762849569 762849570 762849571 762849572 762849573 762849574 762849575 762849576 762849577 762849578 762849579 762849580 762849581 762849582 762849583 762849584 762849585 762849586 762849587 762849588 762849589 762849590 762849591 762849592 762849593 762849594 762849595 762849596 762849597 762849598 762849599 762849600 762849601 762849602 762849603 762849604 762849605 762849606 762849607 762849608 762849609 762849610 762849611 762849612 762849613 762849614 762849615 762849616 762849617 762849618 762849619 762849620 762849621 762849622 762849623 762849624 762849625 762849626 762849627 762849628 762849629 762849630 762849631 762849632 762849633 762849634 762849635 762849636 762849637 762849638 762849639 762849640 762849641 762849642 762849643 762849644 762849645 762849646 762849647 762849648 762849649 762849650 762849651 762849652 762849653 762849654 762849655 762849656 762849657 762849658 762849659 762849660 762849661 762849662 762849663 762849664 762849665 762849666 762849667 762849668 762849669 762849670 762849671 762849672 762849673 762849674 762849675 762849676 762849677 762849678 762849679 762849680 762849681 762849682 762849683 762849684 762849685 762849686 762849687 762849688 762849689 762849690 762849691 762849692 762849693 762849694 762849695 762849696 762849697 762849698 762849699 762849700 762849701 762849702 762849703 762849704 762849705 762849706 762849707 762849708 762849709 762849710 762849711 762849712 762849713 762849714 762849715 762849716 762849717 762849718 762849719 762849720 762849721 762849722 762849723 762849724 762849725 762849726 762849727 762849728 762849729 762849730 762849731 762849732 762849733 762849734 762849735 762849736 762849737 762849738 762849739 762849740 762849741 762849742 762849743 762849744 762849745 762849746 762849747 762849748 762849749 762849750 762849751 762849752 762849753 762849754 762849755 762849756 762849757 762849758 762849759 762849760 762849761 762849762 762849763 762849764 762849765 762849766 762849767 762849768 762849769 762849770 762849771 762849772 762849773 762849774 762849775 762849776 762849777 762849778 762849779 762849780 762849781 762849782 762849783 762849784 762849785 762849786 762849787 762849788 762849789 762849790 762849791 762849792 762849793 762849794 762849795 762849796 762849797 762849798 762849799 762849800 762849801 762849802 762849803 762849804 762849805 762849806 762849807 762849808 762849809 762849810 762849811 762849812 762849813 762849814 762849815 762849816 762849817 762849818 762849819 762849820 762849821 762849822 762849823 762849824 762849825 762849826 762849827 762849828 762849829 762849830 762849831 762849832 762849833 762849834 762849835 762849836 762849837 762849838 762849839 762849840 762849841 762849842 762849843 762849844 762849845 762849846 762849847 762849848 762849849 762849850 762849851 762849852 762849853 762849854 762849855 762849856 762849857 762850239 762850247 762850248 762850249 762850250 762850251 762850252 762850253 762850254 762850255 762850256 762850257 762850258 762850259 762850260 762850261 762850262 762850263 762850264 762850265 762850266 762850267 762850268 762850269 762850270 762850271 762850272 762850273 762850274 762850275 762850276 762850277 762850278 762850279 762850280 762850281 762850282 762850283 762850284 762850285 762850286 762850287 762850288 762850289 762850290 762850291 762850292 762850293 762850294 762850295 762850296 762850297 762850298 762850299 762850300 762850301 762850302 762850303 762850304 762850305 762850306 762850307 762850308 762850309 762850310 762850311 762850312 762850313 762850314 762850324 762850325 762850326 762850327 762850328 762850329 762850330 762850331 762850332 762850333 762850334 762850335 762850336 762850337 762850338 762850339 762850340 762850341 762850342 762850343 762850344 762850345 762850346 762850347 762850348 762850349 762850350 762850351 762850352 762850353 762850354 762850355 762850356 762850357 762850358 762850359 762850360 762850361 762850362 762850363 762850364 762850365 762850366 762850367 762850368 762850369 762850370 762850371 762850372 762850373 762850374 762850375 762850376 762850377 762850378 762850379 762850380 762850381 762850382 762850383 762850384 762850385 762850386 762850387 762850388 762850389 762850390 762850391 762850392 762850393 762850394 762850395 762850396 762850397 762850398 762850399 762850400 762850401 762850402 762850403 762850404 762850405 762850406 762850407 762850408 762850409 762850410 762850411 762850412 762850413 762850414 762850415 762850416 762850417 762850418 762850419 762850420 762850421 762850422 762850423 762850424 762850425 762850426 762850427 762850428 762850429 762850430 762850431 762850432 762850433 762850434 762850435 762850436 762850437 762850438 762850439 762850440 762850441 762850442 762850443 762850444 762850445 762850446 762850447 762850448 762850449 762850450 762850451 762850452 762850453 762850454 762850455 762850456 762850457 762850458 762850459 762850460 762850461 762850462 762850463 762850464 762850465 762850466 762850467 762850468 762850469 762850470 762850471 762850477 762850478 762850479 762850480 762850481 762850482 762850483 762850484 762850485 762850486 762850487 762850488 762850489 762850490 762850491 762850492 762850493 762850494 762850495 762850496 762850497 762850498 762850499 762850500 762850501 762850502 762850503 762850504 762850505 762850506 762850507 762850508 762850509 762850510 762850511 762850512 762850513 762850514 762850515 762850516 762850517 762850518 762850519 762850520 762850521 762850522 762850523 762850524 762850525 762850526 762850527 762850528 762850529 762850530 762850531 762850532 762850533 762850534 762850535 762850536 762850537 762850538 762850539 762850540 762850541 762850542 762850543 762850544 762850545 762850546 762850547 762850548 762850549 762850550 762850551 762850552 762850553 762850554 762850555 762850556 762850557 762850558 762850559 762850560 762850561 762850562 762850563 762850564 762850565 762850566 762850567 762850568 762850569 762850570 762850571 762850572 762850573 762850574 762850575 762850576 762850577 762850578 762850579 762850580 762850581 762850582 762850583 762850584 762850585 762850586 762850587 762850588 762850589 762850590 762850591 762850592 762850593 762850594 762850595 762850596 762850597 762850598 762850599 762850600 762850601 762850602 762850603 762850604 762850605 762850606 762850607 762850608 762850609 762850610 762850611 762850612 762850613 762850614 762850615 762850616 762850617 762850618 762850619 762850620 762850621 762850622 762850623 762850624 762850625 762850626 762850627 762850628 762850629 762850630 762850631 762850632 762850633 762850634 762850635 762850636 762850637 762850638 762850639 762850640 762850641 762850642 762850643 762850644 762850645 762850646 762850647 762850648 762850649 762850650 762850651 762850652 762850653 762850654 762850655 762850656 762850657 762850658 762850659 762850660 762850661 762850662 762850663 762850664 762850665 762850666 762850667 762850668 762850669 762850670 762850671 762850672 762850673 762850674 762850675 762850676 762850677 762850678 762850679 762850680 762850681 762850682 762850683 762850684 762850685 762850686 762850687 762850688 762850689 762850690 762850691 762850692 762850693 762850694 762850695 762850696 762850697 762850698 762850699 762850700 762850701 762850702 762850703 762850704 762850705 762850706 762850707 762850708 762850709 762850710 762850711 762850712 762850713 762850714 762850715 762850716 762850717 762850718 762850719 762850720 762850721 762850722 762850723 762850724 762850725 762850726 762850727 762850728 762850729 762850730 762850731 762850732 762850733 762850734 762850735 762850736 762850737 762850738 762850739 762850740 762850741 762850742 762850743 762850744 762850745 762850746 762850747 762850748 762850749 762850750 762850751 762850752 762850753 762850754 762850755 762850756 762850757 762850758 762850759 763270975 763294957 763295806 763304366 763304599 763305228 763305975 763309274 763314274 763315741 763315937 763316917 763317970 763318262 763318713 763319742 763319967 763321380 763336881 763339989 763351734 763351765 763351871 763360683 763410221 763413396 763416641 763536557 763536771 766222412 766339175 766339582 766339650 766339968 767275060 767418536 767420776 767441363 767444452 767466774 767466940 767551096 767563766 767567102 767567402 767568483 767573397 767573611 767574307 767584105 767648675 767648768 767652306 767745369 767746068 767746144 767746400 767746701 767746818 767746984 767747089 767748183 767770522 767772992 767773075 767833797 767834276 767843728 767843821 767843852 767844245 767844488 767857001 768418552 768419091 768425649 768457230 768457380 768457832 768789133 768794659 768958184 768963625 768965338 768973188 768976109 768982034 768985197 768986819 768990217 768994175 768995538 768997916 768998913 769002807 769027017 769048826 769050666 769051132 769058425 769082134 769127357 769144978 769153262 769169525 769171105 769177044 769191207 769197221 769216318 769929638 769950853 769981712 770063212 770069820 770201269 770510343 770753083 770756615 770756781 770756985 770769011 770769288 770769577 771035940 771721274 771747500 771749540 771755771 771768633 771777548 771788692 771791931 771872333 771903016 772028804 772039370 772040639 772043141 772043294 772082121 772357462 772358371 772362182 772362353 772365767 772366177 772366443 772367711 772368528 772369703 772370388 772373012 773312959 773348895 773397229 773402075 773464227 773499667 773872671 773873397 775135682 777785696 777812887 778002106 778094454 778134090 778151583 778151694 778151803 778151876 778151957 778152803 778201138 778728117 778728174 778732183 779474346 779497098 779516634 779656559 779779542 780314340 780338135 780338279 780338588 780339353 780339504 780339762 780398143 780398726 780398938 780399138 780399256 780399389 780399458 780400026 780400414 780400884 780401272 780414871 780415746 780446363 780535892 780537177 780557520 780561690 780595424 780595772 780596021 780596253 780596429 780596849 780609559 780619772 780619898 780627268 780627914 780628657 780629613 780630479 780720139 780720518 780721907 780784516 780974266 781007670 781021997 781022497 781111901 781112242 781113723 781114147 781127241 781127397 781141576 781142254 781142572 781147762 781149873 781151224 781151527 781151679 781151788 781152240 781152378 781152547 781152773 781264416 781264732 781265659 781266717 781267761 781268027 781268257 781268516 781332051 781335821 781335941 781377995 781378776 781398771 781415826 781416195 781591415 781682416 781684854 781777222 781780775 781781320 781781398 781785482 781788723 781790875 781796084 781796677 781813396 781828030 781831345 781831363 781831520 781834712 781834764 781834908 781835262 781835308 781836198 781836224 781836814 782007167 782015883 782235282 782318761 782517335 782540644 782607444 782608006 782691329 782691602 782692774 782697560 782698614 782698880 782699232 782699675 782700119 782712300 782719930 782729269 782742656 782749674 782778334 782778538 782808055 783375549 783403095 784059803 784164785 784197322 784254025 784266252 784267718 784268194 784268871 784272349 784275706 784278129 784283081 784283453 784334494 784348544 784488645 784536788 784545210 784552713 784558658 784571272 784599015 784600709 785960951 788595916 788633436 788635458 788664118 788693715 788699805 788707956 788749572 788838807 788849100 788679341 788868020 788693958 788876992 788888813 788890371 788890949 788984747 788989296 789037336 789082306 789082864 789085691 789086886 789163259 789171837 789188381 789201584 789266452 789274573 789585530 789586214 789587678 789699287 789699829 789700285 789702536 789741706 789742472 789744618 789746031 789809918 790129701 790202843 790219215 790226114 790302196 790346824 790347843 790448348 790449712 790450793 790454280 790454991 790456043 790456198 790456489 790458026 790461802 790462129 790467686 790475869 790478765 790483030 790594802 790595374 790603419 790613884 790627324 790629249 790630227 790631189 790632816 790684454 790727188 790742549 790764198 791569889 791801937 791834203 791834864 791893587 791895662 791896230 791897550 791907491 791912434 791993458 792002157 792005358 792150978 792159831 792172891 792178211 792179905 792188386 792195026 792195393 792217694 792256655 792415465 793233316 793233485 793233497 793233498 793273291 793287666 793329077 793331256 793342139 793359112 793369887 793413268 793436815 793522877 793524226 793533662 793537252 793539145 793553592 793554140 793556649 793557384 793557441 793564039 793582318 793662814 793846730 793853488 793860667 793870317 793888558 793892063 793892701 793936849 793956349 793974918 794307557 794804812 794810548 794813736 794813739 794813741 794813742 794813743 794827053 794889353 794922499 795033516 795035012 795036904 795037280 795048241 795050633 795050636 795053207 795059807 795062154 795068927 795074658 795076542 795078367 795094908 795096604 795097989 795100710 795101685 795149590 795149591 795152046 795152051 795152052 795152055 795152206 795152214 795152215 795152237 795152374 795152380 795164685 795164692 795164693 795164694 795164899 795164903 795164904 795164905 795165064 795165073 795165074 795165075 795170338 795170339 795170348 795170587 795170594 795170595 795170752 795170770 795170775 795200226 795238998 795239188 795239352 795239474 795239478 795239506 795239646 795239794 795243883 795243887 795243888 795244099 795244116 795244263 795244271 795244541 795244553 795244554 795244707 795244728 795244743 795244907 795245226 795245232 795245243 795245395 795245407 795247806 795247813 795248432 795248558 795248587 795249607 795249608 795249609 795249792 795249890 795250189 795257288 795257289 795257327 795257463 795257474 795257631 795257644 795257814 795257825 795257831 795258007 795258183 795259274 795259628 795259804 795259833 795259966 795259977 795259984 795260027 795260254 795260510 795260518 795260576 795263903 795265804 795265810 795265811 795265950 795265955 795265973 795265983 795266110 795266171 795266172 795266232 795266290 795266369 795266391 795266401 795266524 795266564 795266570 795266637 795266741 795267110 795267163 795267298 795267375 795267461 795267475 795267478 795267698 795267827 795269596 795269656 795269659 795269670 795269677 795269940 795270103 795283071 796142365 796142738 796267570 797185151 797185862 799007429 799009186 799009195 799009368 799009373 799009388 799009389 799009532 799009540 799009556 799009910 799009916 799009917 799010050 799010071 799010072 799010076 799017560 799017561 799017562 799017564 799017804 799017822 799017823 799017959 799017960 799018006 799019324 799019325 799019328 799019329 799019497 799019511 799019664 799019665 799019677 799019824 799019845 799020707 799020711 799020713 799020714 799020889 799020895 799020896 799020897 799020898 799021070 799021074 799021075 799021077 799021078 799021246 799021247 799021248 799021249 799021263 799021395 799021420 799115390 799240619 799324945 799324946 799324947 799324951 799324952 799325162 799325163 799325168 799325169 799325170 799325348 799325349 799325350 799325518 799325519 799325520 799325526 799325673 799325674 799325677 799325678 799325679 799328639 799334506 799351963 799359944 799361781 799368836 799387625 799434137 799436846 799469053 799493665 799526556 799595188 761164252 761169140 761202002 762417283 762417843 762417919 762418292 763351332 763861811 763863842 771749851 772372212 779410956 779433203 780314210 780462539 780595024 781787054 782273035 782315213 783390405 790536843 791697683 791775969 791919567 793233310 793233317 793233318 793233320 793376458 793523679 794893134 794918134 795259635 795265799 589866662 587489153 610736373 + HISTORY_START: 862000000 + # to see historical TXNS_TO_SKIP, check out ce6158ac2764ee9d4c8738a85f3bcdc6bd0cadc1 + TXNS_TO_SKIP: "0" + # 1195000000-122000000: https://github.com/aptos-labs/aptos-core/pull/13832 + RANGES_TO_SKIP: "1195000000-1220000000" BACKUP_CONFIG_TEMPLATE_PATH: terraform/helm/fullnode/files/backup/gcs.yaml # workflow config RUNS_ON: "high-perf-docker-with-local-ssd" - TIMEOUT_MINUTES: 480 + TIMEOUT_MINUTES: 180 + MAX_VERSIONS_PER_RANGE: 2000000 replay-mainnet: if: | @@ -72,14 +87,18 @@ jobs: with: GIT_SHA: ${{ inputs.GIT_SHA }} # replay-verify config - BUCKET: aptos-mainnet-backup + BUCKET: ${{ inputs.MAINNET_BUCKET || 'aptos-mainnet-backup' }} SUB_DIR: e1 - HISTORY_START: 0 - TXNS_TO_SKIP: 12253479 12277499 148358668 + HISTORY_START: 518000000 + #TXNS_TO_SKIP: 12253479 12277499 148358668 + TXNS_TO_SKIP: "0" + # 1197378568-1198492648: https://github.com/aptos-labs/aptos-core/pull/13832 + RANGES_TO_SKIP: "1197378568-1198492648" BACKUP_CONFIG_TEMPLATE_PATH: terraform/helm/fullnode/files/backup/gcs.yaml # workflow config RUNS_ON: "high-perf-docker-with-local-ssd" - TIMEOUT_MINUTES: 480 + TIMEOUT_MINUTES: 180 + MAX_VERSIONS_PER_RANGE: 800000 test-replay: if: ${{ (github.event_name == 'pull_request') && contains(github.event.pull_request.labels.*.name, 'CICD:test-replay')}} @@ -89,11 +108,15 @@ jobs: with: GIT_SHA: ${{ github.event.pull_request.head.sha }} # replay-verify config - BUCKET: aptos-testnet-backup + BUCKET: ${{ inputs.TESTNET_BUCKET || 'aptos-testnet-backup' }} SUB_DIR: e1 - HISTORY_START: 250000000 # TODO: We need an exhaustive list of txns_to_skip before we can set this to 0. - TXNS_TO_SKIP: 46874937 151020059 409163615 409163669 409163708 409163774 409163845 409163955 409164059 409164191 414625832 + HISTORY_START: 862000000 + # to see historical TXNS_TO_SKIP, check out ce6158ac2764ee9d4c8738a85f3bcdc6bd0cadc1 + TXNS_TO_SKIP: "0" + # 1195000000-1220000000: https://github.com/aptos-labs/aptos-core/pull/13832 + RANGES_TO_SKIP: "1195000000-1220000000" BACKUP_CONFIG_TEMPLATE_PATH: terraform/helm/fullnode/files/backup/gcs.yaml # workflow config RUNS_ON: "high-perf-docker-with-local-ssd" TIMEOUT_MINUTES: 120 # increase test replay timeout to capture more flaky errors + MAX_VERSIONS_PER_RANGE: 2000000 diff --git a/.github/workflows/run-fullnode-sync.yaml b/.github/workflows/run-fullnode-sync.yaml index 0b5d27b6f97..82295e882bc 100644 --- a/.github/workflows/run-fullnode-sync.yaml +++ b/.github/workflows/run-fullnode-sync.yaml @@ -60,7 +60,7 @@ jobs: runs-on: medium-perf-docker-with-local-ssd timeout-minutes: ${{ inputs.TIMEOUT_MINUTES || 300 }} # the default run is 300 minutes (5 hours). Specified here because workflow_dispatch uses string rather than number steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: ./.github/actions/fullnode-sync with: @@ -105,6 +105,6 @@ jobs: # Because we have to checkout the actions and then check out a different # git ref, it's possible the actions directory will be modified. So, we # need to check it out again for the Post Run actions/checkout to succeed. - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: path: actions diff --git a/.github/workflows/run-gas-calibration.yaml b/.github/workflows/run-gas-calibration.yaml index 1f91062a4f6..9d613140b8a 100644 --- a/.github/workflows/run-gas-calibration.yaml +++ b/.github/workflows/run-gas-calibration.yaml @@ -27,7 +27,7 @@ jobs: if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 # get all the history because cargo xtest --change-since origin/main requires it. - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main diff --git a/.github/workflows/rust-client-tests.yaml b/.github/workflows/rust-client-tests.yaml index 0b5ce8e5a9c..d612fc824c5 100644 --- a/.github/workflows/rust-client-tests.yaml +++ b/.github/workflows/rust-client-tests.yaml @@ -33,7 +33,7 @@ jobs: needs: [permission-check] runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main with: GCP_WORKLOAD_IDENTITY_PROVIDER: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} @@ -52,7 +52,7 @@ jobs: needs: [permission-check] runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main with: GCP_WORKLOAD_IDENTITY_PROVIDER: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} @@ -71,7 +71,7 @@ jobs: needs: [permission-check] runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main with: GCP_WORKLOAD_IDENTITY_PROVIDER: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} diff --git a/.github/workflows/semgrep.yaml b/.github/workflows/semgrep.yaml index 19e8577fb38..d05dbe27073 100644 --- a/.github/workflows/semgrep.yaml +++ b/.github/workflows/semgrep.yaml @@ -20,7 +20,7 @@ jobs: if: (github.actor != 'dependabot[bot]') steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - run: semgrep ci env: SEMGREP_RULES: >- diff --git a/.github/workflows/test-copy-images-to-dockerhub.yaml b/.github/workflows/test-copy-images-to-dockerhub.yaml index b488d771f04..25c9e8387fa 100644 --- a/.github/workflows/test-copy-images-to-dockerhub.yaml +++ b/.github/workflows/test-copy-images-to-dockerhub.yaml @@ -18,7 +18,7 @@ jobs: test-copy-images: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-node@v3 with: node-version-file: .node-version diff --git a/.github/workflows/ts-sdk-e2e-tests.yaml b/.github/workflows/ts-sdk-e2e-tests.yaml deleted file mode 100644 index c906451abd7..00000000000 --- a/.github/workflows/ts-sdk-e2e-tests.yaml +++ /dev/null @@ -1,77 +0,0 @@ -# Each of these jobs runs the TS SDK E2E tests from this commit against a local testnet -# built from one of the aptos-core branches. Currently we only test against a local -# testnet in a CLI built from main. - -env: - GIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }} - -name: "TS SDK E2E Tests" -on: - pull_request_target: - types: [labeled, opened, synchronize, reopened, auto_merge_enabled] - push: - branches: - - main - -permissions: - contents: read - id-token: write # Required for GCP Workload Identity federation which we use to login into Google Artifact Registry - -# cancel redundant builds -concurrency: - # cancel redundant builds on PRs (only on PR, not on branches) - group: ${{ github.workflow }}-${{ (github.event_name == 'pull_request_target' && github.event.pull_request.head.sha) || github.sha }} - cancel-in-progress: true - -jobs: - # Note on the job-level `if` conditions: - # This workflow is designed such that we run subsequent jobs only when a 'push' - # triggered the workflow or on 'pull_request's which have set auto_merge=true - # or have the label "CICD:run-e2e-tests". - permission-check: - runs-on: ubuntu-latest - steps: - - name: Check repository permission for user which triggered workflow - uses: sushichop/action-repository-permission@13d208f5ae7a6a3fc0e5a7c2502c214983f0241c - with: - required-permission: write - comment-not-permitted: Sorry, you don't have permission to trigger this workflow. - - # This job determines which files were changed - file_change_determinator: - needs: [permission-check] - runs-on: ubuntu-latest - outputs: - only_docs_changed: ${{ steps.determine_file_changes.outputs.only_docs_changed }} - steps: - - uses: actions/checkout@v3 - - name: Run the file change determinator - id: determine_file_changes - uses: ./.github/actions/file-change-determinator - - # This is a PR required job. This runs both the non-indexer and indexer TS SDK tests. - # Now that the latter runs against the local testnet too we make these land blocking. - run-tests-main-branch: - needs: [permission-check, file_change_determinator] - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} - steps: - - uses: actions/checkout@v3 - if: needs.file_change_determinator.outputs.only_docs_changed != 'true' - with: - ref: ${{ env.GIT_SHA }} - - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main - if: needs.file_change_determinator.outputs.only_docs_changed != 'true' - with: - GCP_WORKLOAD_IDENTITY_PROVIDER: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} - GCP_SERVICE_ACCOUNT_EMAIL: ${{ secrets.GCP_SERVICE_ACCOUNT_EMAIL }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DOCKER_ARTIFACT_REPO: ${{ secrets.AWS_DOCKER_ARTIFACT_REPO }} - GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} - - uses: ./.github/actions/run-ts-sdk-e2e-tests - if: needs.file_change_determinator.outputs.only_docs_changed != 'true' - with: - BRANCH: main - GCP_DOCKER_ARTIFACT_REPO: ${{ vars.GCP_DOCKER_ARTIFACT_REPO }} - - run: echo "Skipping the tests on the main branch! Unrelated changes detected." - if: needs.file_change_determinator.outputs.only_docs_changed == 'true' diff --git a/.github/workflows/windows-build.yaml b/.github/workflows/windows-build.yaml index 920d5d8f942..43cdc00fc8e 100644 --- a/.github/workflows/windows-build.yaml +++ b/.github/workflows/windows-build.yaml @@ -36,7 +36,7 @@ jobs: # this case). See more here: # https://github.com/Swatinem/rust-cache#cache-details - name: Run cargo cache - uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # pin@v2.2.0 + uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # pin@v2.7.3 - name: Install the Developer Tools run: Set-Variable ProgressPreference SilentlyContinue ; PowerShell -ExecutionPolicy Bypass -File scripts/windows_dev_setup.ps1 -t diff --git a/.github/workflows/workflow-run-docker-rust-build.yaml b/.github/workflows/workflow-run-docker-rust-build.yaml index c21a5e10710..50e6e14ee19 100644 --- a/.github/workflows/workflow-run-docker-rust-build.yaml +++ b/.github/workflows/workflow-run-docker-rust-build.yaml @@ -75,7 +75,7 @@ jobs: rust-all: runs-on: runs-on,cpu=64,family=c7,hdd=1024,image=aptos-ubuntu-x64,run-id=${{ github.run_id }},spot=co steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ env.GIT_SHA }} diff --git a/.github/workflows/workflow-run-execution-performance.yaml b/.github/workflows/workflow-run-execution-performance.yaml index 48c5c1ee6ff..086b59b700d 100644 --- a/.github/workflows/workflow-run-execution-performance.yaml +++ b/.github/workflows/workflow-run-execution-performance.yaml @@ -55,7 +55,7 @@ jobs: outputs: run_execution_performance_test: ${{ steps.determine_test_targets.outputs.run_execution_performance_test }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Run the test target determinator id: determine_test_targets uses: ./.github/actions/test-target-determinator @@ -66,7 +66,7 @@ jobs: timeout-minutes: 60 runs-on: ${{ inputs.RUNNER_NAME }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ inputs.GIT_SHA }} if: ${{ inputs.IGNORE_TARGET_DETERMINATION || needs.test-target-determinator.outputs.run_execution_performance_test == 'true' }} diff --git a/.github/workflows/workflow-run-forge.yaml b/.github/workflows/workflow-run-forge.yaml index 08735140d6c..f6c84b31002 100644 --- a/.github/workflows/workflow-run-forge.yaml +++ b/.github/workflows/workflow-run-forge.yaml @@ -119,7 +119,7 @@ env: # at the call site, and don't need to wrap each step in an if statement? jobs: forge: - runs-on: ubuntu-latest + runs-on: runs-on,cpu=4,ram=16,family=m7a+m7i-flex,image=aptos-ubuntu-x64,run-id=${{ github.run_id }},spot=co timeout-minutes: ${{ inputs.TIMEOUT_MINUTES }} steps: - uses: actions/checkout@v4 @@ -165,13 +165,6 @@ jobs: GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} GCP_AUTH_DURATION: ${{ steps.calculate-auth-duration.outputs.auth_duration }} - - name: "Install GCloud SDK" - if: ${{ !inputs.SKIP_JOB }} - uses: "google-github-actions/setup-gcloud@v2" - with: - version: ">= 418.0.0" - install_components: "kubectl,gke-gcloud-auth-plugin" - - name: "Export GCloud auth token" if: ${{ !inputs.SKIP_JOB }} id: gcloud-auth diff --git a/.github/workflows/workflow-run-module-verify.yaml b/.github/workflows/workflow-run-module-verify.yaml index 2dd3a9ac7b0..de16bbade2f 100644 --- a/.github/workflows/workflow-run-module-verify.yaml +++ b/.github/workflows/workflow-run-module-verify.yaml @@ -38,7 +38,7 @@ jobs: timeout-minutes: ${{ inputs.TIMEOUT_MINUTES }} runs-on: ${{ inputs.RUNS_ON }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ inputs.GIT_SHA }} diff --git a/.github/workflows/workflow-run-replay-verify.yaml b/.github/workflows/workflow-run-replay-verify.yaml index 29dbdf1c88c..cb7af2922a6 100644 --- a/.github/workflows/workflow-run-replay-verify.yaml +++ b/.github/workflows/workflow-run-replay-verify.yaml @@ -25,6 +25,10 @@ on: required: false type: string description: The list of transaction versions to skip. If not specified, it will use the default list. + RANGES_TO_SKIP: + required: false + type: string + description: The optional list of transaction ranges to skip.. BACKUP_CONFIG_TEMPLATE_PATH: description: "The path to the backup config template to use." type: string @@ -39,7 +43,11 @@ on: description: "Github job timeout in minutes" type: number required: true - default: 720 + default: 180 + MAX_VERSIONS_PER_RANGE: + description: "The maximum number of versions to process in a single job." + type: number + required: true # This allows the workflow to be triggered manually from the Github UI or CLI # NOTE: because the "number" type is not supported, we default to 720 minute timeout workflow_dispatch: @@ -65,6 +73,10 @@ on: required: false type: string description: The list of transaction versions to skip. If not specified, it will use the default list. + RANGES_TO_SKIP: + required: false + type: string + description: The optional list of transaction ranges to skip.. BACKUP_CONFIG_TEMPLATE_PATH: description: "The path to the backup config template to use." type: string @@ -75,25 +87,128 @@ on: type: string required: true default: "high-perf-docker-with-local-ssd" - + MAX_VERSIONS_PER_RANGE: + description: "The maximum number of versions to process in a single job." + type: number + required: true jobs: + prepare: + runs-on: ${{ inputs.RUNS_ON }} + outputs: + job_ids: ${{ steps.gen-jobs.outputs.job_ids }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ inputs.GIT_SHA }} + + - name: Load cached aptos-debugger binary + id: cache-aptos-debugger-binary + uses: actions/cache@v4 + with: + # copy the binary to the root of the repo and cache it there, because rust-setup calls a cache-rust action + # which cleans up the target directory in its post action + path: | + aptos-debugger + testsuite/replay_verify.py + key: aptos-debugger-${{ inputs.GIT_SHA || github.sha }} + + - name: Prepare for build if not cached + if: steps.cache-aptos-debugger-binary.outputs.cache-hit != 'true' + uses: aptos-labs/aptos-core/.github/actions/rust-setup@main + with: + GIT_CREDENTIALS: ${{ inputs.GIT_CREDENTIALS }} + + - name: Build and strip aptos-debugger binary if not cached + if: steps.cache-aptos-debugger-binary.outputs.cache-hit != 'true' + shell: bash + run: | + cargo build --release -p aptos-debugger + strip -s target/release/aptos-debugger + cp target/release/aptos-debugger . + + - name: Install GCloud SDK + uses: "google-github-actions/setup-gcloud@v2" + with: + version: ">= 418.0.0" + install_components: "kubectl,gke-gcloud-auth-plugin" + + - name: get timestamp to use in cache key + id: get-timestamp + run: echo "ts=$(date +%s)" >> $GITHUB_OUTPUT + + - name: Load cached backup storage metadata cache dir (and save back afterwards) + uses: actions/cache@v4 + with: + path: metadata_cache + key: metadata-cache-${{ inputs.BUCKET }}/${{ inputs.SUB_DIR }}-${{ steps.get-timestamp.outputs.ts }} + restore-keys: metadata-cache-${{ inputs.BUCKET }}/${{ inputs.SUB_DIR }}- + + - name: Generate job ranges + id: gen-jobs + env: + BUCKET: ${{ inputs.BUCKET }} + SUB_DIR: ${{ inputs.SUB_DIR }} + run: | + ./aptos-debugger aptos-db gen-replay-verify-jobs \ + --metadata-cache-dir ./metadata_cache \ + --command-adapter-config ${{ inputs.BACKUP_CONFIG_TEMPLATE_PATH }} \ + --start-version ${{ inputs.HISTORY_START }} \ + --ranges-to-skip "${{ inputs.RANGES_TO_SKIP }}" \ + --max-versions-per-range ${{ inputs.MAX_VERSIONS_PER_RANGE }} \ + \ + --max-ranges-per-job 16 \ + --output-json-file jobs.json \ + + + jq -c 'length as $N | [range(0; $N)]' jobs.json > job_ids.json + + cat job_ids.json + jq . jobs.json + + echo "job_ids=$(cat job_ids.json)" >> $GITHUB_OUTPUT + + - name: Cache backup storage config and job definition + uses: actions/cache/save@v4 + with: + path: | + ${{ inputs.BACKUP_CONFIG_TEMPLATE_PATH }} + jobs.json + key: backup-config-${{ inputs.BUCKET }}/${{ inputs.SUB_DIR }}-${{ github.run_id }} + replay-verify: - timeout-minutes: ${{ inputs.TIMEOUT_MINUTES || 720 }} + needs: prepare + timeout-minutes: ${{ inputs.TIMEOUT_MINUTES || 180 }} runs-on: ${{ inputs.RUNS_ON }} strategy: fail-fast: false matrix: - number: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] # runner number + job_id: ${{ fromJson(needs.prepare.outputs.job_ids) }} steps: - - name: Echo Runner Number - run: echo "Runner is ${{ matrix.number }}" - - uses: actions/checkout@v4 + - name: Load cached aptos-debugger binary and replay_verify.py script + uses: actions/cache/restore@v4 with: - ref: ${{ inputs.GIT_SHA }} + path: | + aptos-debugger + testsuite/replay_verify.py + key: aptos-debugger-${{ inputs.GIT_SHA || github.sha }} + fail-on-cache-miss: true + + - name: Load cached backup storage metadata cache dir + uses: actions/cache/restore@v4 + with: + path: metadata_cache + key: metadata-cache-${{ inputs.BUCKET }}/${{ inputs.SUB_DIR }}- + fail-on-cache-miss: true - - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main + - name: Load cached backup storage config and job definitions + uses: actions/cache/restore@v4 with: - GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} + path: | + ${{ inputs.BACKUP_CONFIG_TEMPLATE_PATH }} + jobs.json + key: backup-config-${{ inputs.BUCKET }}/${{ inputs.SUB_DIR }}-${{ github.run_id }} + fail-on-cache-miss: true - name: Install GCloud SDK uses: "google-github-actions/setup-gcloud@v2" @@ -101,16 +216,79 @@ jobs: version: ">= 418.0.0" install_components: "kubectl,gke-gcloud-auth-plugin" - - name: Build CLI binaries in release mode - shell: bash - run: cargo build --release -p aptos-debugger - - name: Run replay-verify in parallel - shell: bash - run: testsuite/replay_verify.py ${{ matrix.number }} 19 # first argument is the runner number, second argument is the total number of runners env: BUCKET: ${{ inputs.BUCKET }} SUB_DIR: ${{ inputs.SUB_DIR }} - HISTORY_START: ${{ inputs.HISTORY_START }} - TXNS_TO_SKIP: ${{ inputs.TXNS_TO_SKIP }} - BACKUP_CONFIG_TEMPLATE_PATH: ${{ inputs.BACKUP_CONFIG_TEMPLATE_PATH }} + shell: bash + run: | + set -o nounset -o errexit -o pipefail + replay() { + idx=$1 + id=$2 + begin=$3 + end=$4 + desc=$5 + + echo --------- + echo Job start. $id: $desc + echo --------- + + MC=metadata_cache_$idx + cp -r metadata_cache $MC + DB=db_$idx + + for try in {0..6} + do + if [ $try -gt 0 ]; then + SLEEP=$((10 * $try)) + echo "sleeping for $SLEEP seconds before retry #$try" >&2 + sleep $SLEEP + fi + + res=0 + ./aptos-debugger aptos-db replay-verify \ + --metadata-cache-dir $MC \ + --command-adapter-config ${{ inputs.BACKUP_CONFIG_TEMPLATE_PATH }} \ + --start-version $begin \ + --end-version $end \ + \ + --lazy-quit \ + --enable-storage-sharding \ + --target-db-dir $DB \ + --concurrent-downloads 8 \ + --replay-concurrency-level 4 \ + || res=$? + + if [[ $res == 0 || $res == 2 ]] + then + return $res + fi + done + return 1 + } + + pids=() + idx=0 + while read id begin end desc; do + + replay $idx $id $begin $end "$desc" 2>&1 | sed "s/^/[partition $idx]: /" & + + pids[$idx]=$! + idx=$((idx+1)) + done < <(jq '.[${{ matrix.job_id }}][]' jobs.json) + + res=0 + for idx in `seq 0 $((idx-1))` + do + range_res=0 + wait ${pids[$idx]} || range_res=$? + echo partition $idx returned $range_res + if [[ $range_res != 0 ]] + then + res=$range_res + fi + done + + echo All partitions done, returning $res + exit $res diff --git a/.mailmap b/.mailmap deleted file mode 100644 index c938eba08a5..00000000000 --- a/.mailmap +++ /dev/null @@ -1,27 +0,0 @@ -0xbe1 <0xbetrue@gmail.com> <101405096+0xbe1@users.noreply.github.com> -0xchloe <79347459+0xchloe@users.noreply.github.com> <79347459+chloeqjz@users.noreply.github.com> -Bowen Yang -Clay Murphy <114445310+clay-aptos@users.noreply.github.com> -Clay Murphy <114445310+clay-aptos@users.noreply.github.com> -Clay Murphy <114445310+clay-aptos@users.noreply.github.com> -Clay Murphy <114445310+clay-aptos@users.noreply.github.com> -Daniel Porteous (dport) -David Wolinsky -Greg Nazario -Jill Xu -Jill Xu <121921928+jillxuu@users.noreply.github.com> -Jijun Leng -Joshua Lind -Joshua Lind -Joseph Hughes <98909677+moonclavedev@users.noreply.github.com> -Joseph Hughes <98909677+moonclavedev@users.noreply.github.com> <41088824+d2Dreamer@users.noreply.github.com> -Junkil Park -Kevin Hoang -Kevin Hoang <105028215+movekevin@users.noreply.github.com> -Maayan Savir -Maayan Savir -Max Kaplan -Max Kaplan <1482859+CapCap@users.noreply.github.com> -Max Kaplan -Raj Karamchedu -Wolfgang Grieskamp diff --git a/Cargo.lock b/Cargo.lock index 1317bed0c8f..b9a3ffcc14f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -244,9 +244,9 @@ checksum = "34fde25430d87a9388dadbe6e34d7f72a462c8b43ac8d309b42b0a8505d7e2a5" [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" dependencies = [ "backtrace", ] @@ -262,7 +262,7 @@ dependencies = [ [[package]] name = "aptos" -version = "4.1.0" +version = "4.2.3" dependencies = [ "anyhow", "aptos-api-types", @@ -306,6 +306,7 @@ dependencies = [ "chrono", "clap 4.4.14", "clap_complete", + "colored", "dashmap", "diesel", "diesel-async", @@ -918,7 +919,7 @@ dependencies = [ "maplit", "mini-moka", "mirai-annotations", - "mockall", + "mockall 0.11.4", "move-core-types", "num-derive", "num-traits", @@ -1098,7 +1099,7 @@ dependencies = [ "futures", "itertools 0.13.0", "maplit", - "mockall", + "mockall 0.11.4", "ordered-float 3.9.2", "rand 0.8.5", "serde", @@ -1183,6 +1184,7 @@ dependencies = [ "serde", "static_assertions", "status-line", + "tokio", ] [[package]] @@ -1244,6 +1246,7 @@ dependencies = [ "bcs 0.1.4", "clap 4.4.14", "itertools 0.13.0", + "serde_json", "tokio", ] @@ -1667,7 +1670,6 @@ dependencies = [ "serde_json", "serde_yaml 0.8.26", "tokio", - "url", ] [[package]] @@ -2280,6 +2282,38 @@ dependencies = [ "url", ] +[[package]] +name = "aptos-indexer-processor-sdk" +version = "0.1.0" +source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?rev=9ecd252ccff53023664562001dd04c2886488c0d#9ecd252ccff53023664562001dd04c2886488c0d" +dependencies = [ + "anyhow", + "aptos-indexer-transaction-stream", + "aptos-protos 1.3.1 (git+https://github.com/aptos-labs/aptos-core.git?rev=5c48aee129b5a141be2792ffa3d9bd0a1a61c9cb)", + "async-trait", + "bcs 0.1.4", + "bigdecimal", + "chrono", + "derive_builder", + "futures", + "hex", + "instrumented-channel", + "kanal", + "mockall 0.12.1", + "num_cpus", + "once_cell", + "petgraph 0.6.5", + "prometheus", + "prometheus-client", + "serde", + "serde_json", + "thiserror", + "tiny-keccak", + "tokio", + "tracing", + "url", +] + [[package]] name = "aptos-indexer-test-transactions" version = "1.0.0" @@ -2288,6 +2322,52 @@ dependencies = [ "serde_json", ] +[[package]] +name = "aptos-indexer-transaction-generator" +version = "1.0.0" +dependencies = [ + "anyhow", + "aptos", + "aptos-config", + "aptos-faucet-core", + "aptos-indexer-grpc-utils", + "aptos-protos 1.3.1", + "clap 4.4.14", + "futures", + "itertools 0.13.0", + "rand 0.7.3", + "serde", + "serde_json", + "serde_yaml 0.8.26", + "tempfile", + "tokio", + "tokio-stream", + "toml 0.7.8", + "tonic 0.11.0", + "url", +] + +[[package]] +name = "aptos-indexer-transaction-stream" +version = "0.1.0" +source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?rev=9ecd252ccff53023664562001dd04c2886488c0d#9ecd252ccff53023664562001dd04c2886488c0d" +dependencies = [ + "anyhow", + "aptos-moving-average 0.1.0 (git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?rev=9ecd252ccff53023664562001dd04c2886488c0d)", + "aptos-protos 1.3.1 (git+https://github.com/aptos-labs/aptos-core.git?rev=5c48aee129b5a141be2792ffa3d9bd0a1a61c9cb)", + "chrono", + "futures-util", + "once_cell", + "prometheus", + "prost 0.12.3", + "sample", + "serde", + "tokio", + "tonic 0.11.0", + "tracing", + "url", +] + [[package]] name = "aptos-infallible" version = "0.1.0" @@ -2794,7 +2874,15 @@ dependencies = [ [[package]] name = "aptos-moving-average" version = "0.1.0" -source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=fa1ce4947f4c2be57529f1c9732529e05a06cb7f#fa1ce4947f4c2be57529f1c9732529e05a06cb7f" +source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=51a34901b40d7f75767ac907b4d2478104d6a515#51a34901b40d7f75767ac907b4d2478104d6a515" +dependencies = [ + "chrono", +] + +[[package]] +name = "aptos-moving-average" +version = "0.1.0" +source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?rev=9ecd252ccff53023664562001dd04c2886488c0d#9ecd252ccff53023664562001dd04c2886488c0d" dependencies = [ "chrono", ] @@ -3242,7 +3330,7 @@ dependencies = [ "claims", "futures", "maplit", - "mockall", + "mockall 0.11.4", "once_cell", "rand 0.7.3", "serde", @@ -3276,7 +3364,7 @@ dependencies = [ [[package]] name = "aptos-profiler" version = "0.1.0" -source = "git+https://github.com/aptos-labs/aptos-core.git?rev=4541add3fd29826ec57f22658ca286d2d6134b93#4541add3fd29826ec57f22658ca286d2d6134b93" +source = "git+https://github.com/aptos-labs/aptos-core.git?rev=202bdccff2b2d333a385ae86a4fcf23e89da9f62#202bdccff2b2d333a385ae86a4fcf23e89da9f62" dependencies = [ "anyhow", "backtrace", @@ -3741,7 +3829,7 @@ dependencies = [ "bcs 0.1.4", "claims", "futures", - "mockall", + "mockall 0.11.4", "move-core-types", "ntest", "once_cell", @@ -3830,7 +3918,7 @@ dependencies = [ "futures", "maplit", "mini-moka", - "mockall", + "mockall 0.11.4", "once_cell", "rand 0.7.3", "serde", @@ -3878,10 +3966,10 @@ dependencies = [ [[package]] name = "aptos-system-utils" version = "0.1.0" -source = "git+https://github.com/aptos-labs/aptos-core.git?rev=4541add3fd29826ec57f22658ca286d2d6134b93#4541add3fd29826ec57f22658ca286d2d6134b93" +source = "git+https://github.com/aptos-labs/aptos-core.git?rev=202bdccff2b2d333a385ae86a4fcf23e89da9f62#202bdccff2b2d333a385ae86a4fcf23e89da9f62" dependencies = [ "anyhow", - "aptos-profiler 0.1.0 (git+https://github.com/aptos-labs/aptos-core.git?rev=4541add3fd29826ec57f22658ca286d2d6134b93)", + "aptos-profiler 0.1.0 (git+https://github.com/aptos-labs/aptos-core.git?rev=202bdccff2b2d333a385ae86a4fcf23e89da9f62)", "async-mutex", "http 0.2.11", "hyper 0.14.28", @@ -4204,6 +4292,7 @@ dependencies = [ "claims", "coset", "criterion", + "dashmap", "derivative", "fixed", "fxhash", @@ -4811,9 +4900,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.12" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fec134f64e2bc57411226dfc4e52dec859ddfc7e711fc5e07b612584f000e4aa" +checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" dependencies = [ "flate2", "futures-core", @@ -5031,9 +5120,9 @@ checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2", "quote", @@ -5380,7 +5469,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "rustc-hash 1.1.0", + "rustc-hash", "shlex", "syn 2.0.48", ] @@ -5702,7 +5791,7 @@ dependencies = [ "move-binary-format", "move-bytecode-verifier", "move-core-types", - "petgraph 0.5.1", + "petgraph 0.6.5", "proptest", ] @@ -6585,7 +6674,7 @@ dependencies = [ "bitflags 1.3.2", "crossterm_winapi", "libc", - "mio", + "mio 0.8.11", "parking_lot 0.12.1", "signal-hook", "signal-hook-mio", @@ -6601,7 +6690,7 @@ dependencies = [ "bitflags 1.3.2", "crossterm_winapi", "libc", - "mio", + "mio 0.8.11", "parking_lot 0.12.1", "signal-hook", "signal-hook-mio", @@ -6962,6 +7051,17 @@ dependencies = [ "uuid", ] +[[package]] +name = "delegate" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e018fccbeeb50ff26562ece792ed06659b9c2dae79ece77c4456bb10d9bf79b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "der" version = "0.5.1" @@ -7077,7 +7177,7 @@ dependencies = [ "guppy", "guppy-workspace-hack", "once_cell", - "petgraph 0.6.4", + "petgraph 0.6.5", "rayon", "serde", "toml 0.5.11", @@ -7281,6 +7381,12 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" +[[package]] +name = "dtoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" + [[package]] name = "dunce" version = "1.0.4" @@ -8286,9 +8392,9 @@ checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" [[package]] name = "gcloud-sdk" -version = "0.25.5" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77045256cd0d2075e09d62c4c9f27c2b664e2cc806d7ddf3a4293bb0c20b4728" +checksum = "898e349fb0fabc16892de7858e5650b70a8044edeee13469cb2f7649040bf3c2" dependencies = [ "async-trait", "bytes", @@ -8297,14 +8403,14 @@ dependencies = [ "hyper 1.4.1", "jsonwebtoken 9.3.0", "once_cell", - "prost 0.13.2", - "prost-types 0.13.2", + "prost 0.13.1", + "prost-types 0.13.1", "reqwest 0.12.5", "secret-vault-value", "serde", "serde_json", "tokio", - "tonic 0.12.2", + "tonic 0.12.1", "tower", "tower-layer", "tower-util", @@ -8682,7 +8788,7 @@ dependencies = [ "nested", "once_cell", "pathdiff", - "petgraph 0.6.4", + "petgraph 0.6.5", "rayon", "semver", "serde", @@ -8918,9 +9024,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.3" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -9229,16 +9335,16 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.3" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" +checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", "http 1.1.0", "hyper 1.4.1", "hyper-util", "rustls 0.23.7", - "rustls-native-certs 0.8.0", + "rustls-native-certs 0.7.0", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -9602,6 +9708,19 @@ dependencies = [ "web-sys", ] +[[package]] +name = "instrumented-channel" +version = "0.1.0" +source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?rev=9ecd252ccff53023664562001dd04c2886488c0d#9ecd252ccff53023664562001dd04c2886488c0d" +dependencies = [ + "delegate", + "derive_builder", + "kanal", + "once_cell", + "prometheus", + "prometheus-client", +] + [[package]] name = "integer-encoding" version = "3.0.4" @@ -9636,7 +9755,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.3", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -9662,7 +9781,7 @@ version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" dependencies = [ - "hermit-abi 0.3.3", + "hermit-abi 0.3.9", "rustix 0.38.28", "windows-sys 0.52.0", ] @@ -10001,7 +10120,7 @@ dependencies = [ "is-terminal", "itertools 0.10.5", "lalrpop-util", - "petgraph 0.6.4", + "petgraph 0.6.5", "regex", "regex-syntax 0.6.29", "string_cache", @@ -10547,6 +10666,18 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "mio" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.52.0", +] + [[package]] name = "mirai-annotations" version = "1.12.0" @@ -10563,8 +10694,23 @@ dependencies = [ "downcast", "fragile", "lazy_static", - "mockall_derive", - "predicates", + "mockall_derive 0.11.4", + "predicates 2.1.5", + "predicates-tree", +] + +[[package]] +name = "mockall" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "lazy_static", + "mockall_derive 0.12.1", + "predicates 3.1.2", "predicates-tree", ] @@ -10580,6 +10726,18 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "mockall_derive" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "module-generation" version = "0.1.0" @@ -10706,7 +10864,7 @@ dependencies = [ "anyhow", "move-binary-format", "move-core-types", - "petgraph 0.5.1", + "petgraph 0.6.5", "serde-reflection", ] @@ -10720,7 +10878,7 @@ dependencies = [ "move-binary-format", "move-borrow-graph", "move-core-types", - "petgraph 0.5.1", + "petgraph 0.6.5", "serde", "typed-arena", ] @@ -10808,7 +10966,7 @@ dependencies = [ "move-symbol-pool", "once_cell", "pathdiff", - "petgraph 0.5.1", + "petgraph 0.6.5", "regex", "sha3 0.9.1", "tempfile", @@ -10853,7 +11011,7 @@ dependencies = [ "move-symbol-pool", "num 0.4.1", "once_cell", - "petgraph 0.5.1", + "petgraph 0.6.5", "strum 0.24.1", "strum_macros 0.24.3", "walkdir", @@ -10913,7 +11071,7 @@ dependencies = [ "move-command-line-common", "move-core-types", "move-ir-types", - "petgraph 0.5.1", + "petgraph 0.6.5", "serde", ] @@ -11113,7 +11271,7 @@ dependencies = [ "move-to-yul", "named-lock", "once_cell", - "petgraph 0.5.1", + "petgraph 0.6.5", "regex", "serde", "serde_yaml 0.8.26", @@ -11245,7 +11403,7 @@ dependencies = [ "move-stackless-bytecode-test-utils", "num 0.4.1", "paste", - "petgraph 0.5.1", + "petgraph 0.6.5", ] [[package]] @@ -11467,7 +11625,6 @@ dependencies = [ "proptest", "serde", "sha3 0.9.1", - "tracing", "triomphe", "typed-arena", ] @@ -11713,9 +11870,9 @@ dependencies = [ [[package]] name = "ntest" -version = "0.9.0" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da8ec6d2b73d45307e926f5af46809768581044384637af6b3f3fe7c3c88f512" +checksum = "fb183f0a1da7a937f672e5ee7b7edb727bf52b8a52d531374ba8ebb9345c0330" dependencies = [ "ntest_test_cases", "ntest_timeout", @@ -11723,9 +11880,9 @@ dependencies = [ [[package]] name = "ntest_test_cases" -version = "0.9.0" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be7d33be719c6f4d09e64e27c1ef4e73485dc4cc1f4d22201f89860a7fe22e22" +checksum = "16d0d3f2a488592e5368ebbe996e7f1d44aa13156efad201f5b4d84e150eaa93" dependencies = [ "proc-macro2", "quote", @@ -11734,11 +11891,11 @@ dependencies = [ [[package]] name = "ntest_timeout" -version = "0.9.0" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "066b468120587a402f0b47d8f80035c921f6a46f8209efd0632a89a16f5188a4" +checksum = "fcc7c92f190c97f79b4a332f5e81dcf68c8420af2045c936c9be0bc9de6f63b5" dependencies = [ - "proc-macro-crate 1.3.1", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 1.0.109", @@ -11953,7 +12110,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.3", + "hermit-abi 0.3.9", "libc", ] @@ -12308,7 +12465,7 @@ dependencies = [ "async-trait", "coset", "log", - "mockall", + "mockall 0.11.4", "p256", "passkey-types", "rand 0.8.5", @@ -12523,9 +12680,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset 0.4.2", "indexmap 2.2.5", @@ -13017,6 +13174,16 @@ dependencies = [ "regex", ] +[[package]] +name = "predicates" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97" +dependencies = [ + "anstyle", + "predicates-core", +] + [[package]] name = "predicates-core" version = "1.0.6" @@ -13191,13 +13358,14 @@ dependencies = [ [[package]] name = "processor" version = "1.0.0" -source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=fa1ce4947f4c2be57529f1c9732529e05a06cb7f#fa1ce4947f4c2be57529f1c9732529e05a06cb7f" +source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=51a34901b40d7f75767ac907b4d2478104d6a515#51a34901b40d7f75767ac907b4d2478104d6a515" dependencies = [ "ahash 0.8.11", "allocative", "allocative_derive", "anyhow", - "aptos-moving-average 0.1.0 (git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=fa1ce4947f4c2be57529f1c9732529e05a06cb7f)", + "aptos-indexer-processor-sdk", + "aptos-moving-average 0.1.0 (git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=51a34901b40d7f75767ac907b4d2478104d6a515)", "aptos-protos 1.3.1 (git+https://github.com/aptos-labs/aptos-core.git?rev=5c48aee129b5a141be2792ffa3d9bd0a1a61c9cb)", "async-trait", "bcs 0.1.4", @@ -13231,12 +13399,13 @@ dependencies = [ "postgres-native-tls", "prometheus", "prost 0.12.3", + "rayon", "regex", "serde", "serde_json", "server-framework", - "sha2 0.9.9", - "sha3 0.9.1", + "sha2 0.10.8", + "sha3 0.10.8", "strum 0.24.1", "tiny-keccak", "tokio", @@ -13279,9 +13448,33 @@ dependencies = [ "lazy_static", "memchr", "parking_lot 0.12.1", + "protobuf", "thiserror", ] +[[package]] +name = "prometheus-client" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" +dependencies = [ + "dtoa", + "itoa", + "parking_lot 0.12.1", + "prometheus-client-derive-encode", +] + +[[package]] +name = "prometheus-client-derive-encode" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "prometheus-http-query" version = "0.5.2" @@ -13360,12 +13553,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2ecbe40f08db5c006b5764a2645f7f3f141ce756412ac9e1dd6087e6d32995" +checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc" dependencies = [ "bytes", - "prost-derive 0.13.2", + "prost-derive 0.13.1", ] [[package]] @@ -13396,9 +13589,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" +checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" dependencies = [ "anyhow", "itertools 0.13.0", @@ -13427,11 +13620,11 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.13.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60caa6738c7369b940c3d49246a8d1749323674c65cb13010134f5c9bad5b519" +checksum = "cee5168b05f49d4b0ca581206eb14a7b22fafd963efe729ac48eb03266e25cc2" dependencies = [ - "prost 0.13.2", + "prost 0.13.1", ] [[package]] @@ -13578,17 +13771,16 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.5" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" +checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad" dependencies = [ "bytes", "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.0.0", + "rustc-hash", "rustls 0.23.7", - "socket2 0.5.5", "thiserror", "tokio", "tracing", @@ -13596,14 +13788,14 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.8" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +checksum = "ddf517c03a109db8100448a4be38d498df8a210a99fe0e1b9eaf39e78c640efe" dependencies = [ "bytes", "rand 0.8.5", "ring 0.17.7", - "rustc-hash 2.0.0", + "rustc-hash", "rustls 0.23.7", "slab", "thiserror", @@ -13620,7 +13812,6 @@ dependencies = [ "libc", "once_cell", "socket2 0.5.5", - "tracing", "windows-sys 0.52.0", ] @@ -13979,7 +14170,7 @@ dependencies = [ "http-body 1.0.0", "http-body-util", "hyper 1.4.1", - "hyper-rustls 0.27.3", + "hyper-rustls 0.27.2", "hyper-util", "ipnet", "js-sys", @@ -14279,12 +14470,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" -[[package]] -name = "rustc-hash" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" - [[package]] name = "rustc-hex" version = "2.1.0" @@ -14419,19 +14604,6 @@ dependencies = [ "security-framework", ] -[[package]] -name = "rustls-native-certs" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" -dependencies = [ - "openssl-probe", - "rustls-pemfile 2.1.1", - "rustls-pki-types", - "schannel", - "security-framework", -] - [[package]] name = "rustls-pemfile" version = "0.2.1" @@ -14559,6 +14731,11 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "sample" +version = "0.1.0" +source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?rev=9ecd252ccff53023664562001dd04c2886488c0d#9ecd252ccff53023664562001dd04c2886488c0d" + [[package]] name = "scale-info" version = "1.0.0" @@ -14985,10 +15162,10 @@ dependencies = [ [[package]] name = "server-framework" version = "1.0.0" -source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=fa1ce4947f4c2be57529f1c9732529e05a06cb7f#fa1ce4947f4c2be57529f1c9732529e05a06cb7f" +source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=51a34901b40d7f75767ac907b4d2478104d6a515#51a34901b40d7f75767ac907b4d2478104d6a515" dependencies = [ "anyhow", - "aptos-system-utils 0.1.0 (git+https://github.com/aptos-labs/aptos-core.git?rev=4541add3fd29826ec57f22658ca286d2d6134b93)", + "aptos-system-utils 0.1.0 (git+https://github.com/aptos-labs/aptos-core.git?rev=202bdccff2b2d333a385ae86a4fcf23e89da9f62)", "async-trait", "backtrace", "clap 4.4.14", @@ -15143,7 +15320,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af" dependencies = [ "libc", - "mio", + "mio 0.8.11", "signal-hook", ] @@ -15339,7 +15516,6 @@ dependencies = [ "aptos-db", "aptos-db-indexer", "aptos-db-indexer-schemas", - "aptos-debugger", "aptos-dkg", "aptos-faucet-core", "aptos-forge", @@ -16081,7 +16257,7 @@ dependencies = [ "once_cell", "pbkdf2", "rand 0.7.3", - "rustc-hash 1.1.0", + "rustc-hash", "sha2 0.9.9", "thiserror", "unicode-normalization", @@ -16125,22 +16301,21 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.35.1" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", "libc", - "mio", - "num_cpus", + "mio 1.0.2", "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", "socket2 0.5.5", "tokio-macros", "tracing", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -16155,9 +16330,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", @@ -16463,9 +16638,9 @@ dependencies = [ [[package]] name = "tonic" -version = "0.12.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6f6ba989e4b2c58ae83d862d3a3e27690b6e3ae630d0deb59f3697f32aa88ad" +checksum = "38659f4a91aba8598d27821589f5db7dddd94601e7a01b1e485a50e5484c7401" dependencies = [ "async-stream", "async-trait", @@ -16481,7 +16656,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project 1.1.3", - "prost 0.13.2", + "prost 0.13.1", "rustls-native-certs 0.7.0", "rustls-pemfile 2.1.1", "socket2 0.5.5", @@ -17022,9 +17197,9 @@ dependencies = [ [[package]] name = "url" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna 0.5.0", diff --git a/Cargo.toml b/Cargo.toml index 8835db03e4b..f58a1e238a2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -123,6 +123,7 @@ members = [ "ecosystem/indexer-grpc/indexer-grpc-table-info", "ecosystem/indexer-grpc/indexer-grpc-utils", "ecosystem/indexer-grpc/indexer-test-transactions", + "ecosystem/indexer-grpc/indexer-transaction-generator", "ecosystem/indexer-grpc/transaction-filter", "ecosystem/nft-metadata-crawler-parser", "ecosystem/node-checker", @@ -364,6 +365,7 @@ aptos-indexer-grpc-table-info = { path = "ecosystem/indexer-grpc/indexer-grpc-ta aptos-indexer-test-transactions = { path = "ecosystem/indexer-grpc/indexer-test-transactions" } aptos-indexer-grpc-utils = { path = "ecosystem/indexer-grpc/indexer-grpc-utils" } aptos-indexer-grpc-server-framework = { path = "ecosystem/indexer-grpc/indexer-grpc-server-framework" } +aptos-indexer-transaction-generator = { path = "ecosystem/indexer-grpc/indexer-transaction-generator" } aptos-infallible = { path = "crates/aptos-infallible" } aptos-inspection-service = { path = "crates/aptos-inspection-service" } aptos-jellyfish-merkle = { path = "storage/jellyfish-merkle" } @@ -639,7 +641,7 @@ more-asserts = "0.3.0" named-lock = "0.2.0" native-tls = "0.2.10" neptune = { version = "13.0.0", default_features = false } -ntest = "0.9.0" +ntest = "0.9.3" num = "0.4.0" num-bigint = { version = "0.3.2", features = ["rand"] } num_cpus = "1.13.1" @@ -664,7 +666,7 @@ passkey-client = { version = "0.2.0" } passkey-types = { version = "0.2.0" } pbjson = "0.5.1" percent-encoding = "2.1.0" -petgraph = "0.5.1" +petgraph = "0.6.5" pin-project = "1.0.10" plotters = { version = "0.3.5", default-features = false } # We're using git deps until https://github.com/poem-web/poem/pull/829 gets formally released. diff --git a/api/README.md b/api/README.md index 1da7eacfcf2..fb2c6e9207b 100644 --- a/api/README.md +++ b/api/README.md @@ -8,13 +8,12 @@ See spec source: - [HTML in doc/spec.html](doc/spec.html). ## Regenerating docs / code based on API changes -With our API setup, the spec files (`api/doc/spec.yaml` / `api/doc/spec.json`) are generated from the API in code, and the TS SDK client (`ecosystem/typescript/sdk`) is generated from that spec. We have CI that ensures that all of these are updated together. As such, if you want to make a change to the API, do it in this order. +With our API setup, the spec files (`api/doc/spec.yaml` / `api/doc/spec.json`) are generated from the API in code. We have CI that ensures that all of these are updated together. As such, if you want to make a change to the API, do it in this order. ![API + spec + TS SDK generation diagram](doc/api_spec_ts_sdk_diagram.png) This process updates the docs at: - https://fullnode.devnet.aptoslabs.com/v1/spec#/ (and testnet / mainnet, based on the API rollout schedule) -- https://aptos-labs.github.io/ts-sdk-doc/ All commands here are relative to the root of `aptos-core`. @@ -24,17 +23,6 @@ All commands here are relative to the root of `aptos-core`. cargo run -p aptos-openapi-spec-generator -- -f yaml -o api/doc/spec.yaml cargo run -p aptos-openapi-spec-generator -- -f json -o api/doc/spec.json ``` -3. Regenerate the TypeScript SDK client files based upon the new API spec: -``` -cd ecosystem/typescript/sdk -pnpm install -pnpm generate-client -``` -4. Manually update the helper methods in the TypeScript SDK in: `ecosystem/typescript/sdk/src/aptos_client.ts`. Note: This is necessary because we wrap the generated client, so the docs on the methods in that file are written by hand. For example, if you change `/accounts//resources` in the API, the `getAccountResources` method in the generated client will be different. You must therefore then change `getAccountResources` in `ecosystem/typescript/sdk/src/aptos_client.ts`, which wraps the generated method. -5. Update the TS SDK docs site (https://aptos-labs.github.io/ts-sdk-doc/): -``` -pnpm generate-ts-docs -``` ### Sanity checks Double check that the spec looks good by running these commands and then visit http://127.0.0.1:8888/spec.html. diff --git a/api/src/accounts.rs b/api/src/accounts.rs index 3ac9a13005e..d94454f6b34 100644 --- a/api/src/accounts.rs +++ b/api/src/accounts.rs @@ -66,7 +66,7 @@ impl AccountsApi { let context = self.context.clone(); api_spawn_blocking(move || { - let account = Account::new(context, address.0, ledger_version.0, None, None, false)?; + let account = Account::new(context, address.0, ledger_version.0, None, None)?; account.account(&accept_type) }) .await @@ -118,7 +118,6 @@ impl AccountsApi { ledger_version.0, start.0.map(StateKey::from), limit.0, - true, )?; account.resources(&accept_type) }) @@ -171,7 +170,6 @@ impl AccountsApi { ledger_version.0, start.0.map(StateKey::from), limit.0, - true, )?; account.modules(&accept_type) }) @@ -201,24 +199,11 @@ impl Account { requested_ledger_version: Option, start: Option, limit: Option, - require_state_indices: bool, ) -> Result { - let sharding_enabled = context - .node_config - .storage - .rocksdb_configs - .enable_storage_sharding; - - let (latest_ledger_info, requested_version) = if sharding_enabled && require_state_indices { - context.get_latest_ledger_info_and_verify_internal_indexer_lookup_version( + let (latest_ledger_info, requested_version) = context + .get_latest_ledger_info_and_verify_lookup_version( requested_ledger_version.map(|inner| inner.0), - )? - } else { - // Use the latest ledger version, or the requested associated version - context.get_latest_ledger_info_and_verify_lookup_version( - requested_ledger_version.map(|inner| inner.0), - )? - }; + )?; Ok(Self { context, diff --git a/api/src/context.rs b/api/src/context.rs index aa9e5984854..3ba77fe8923 100644 --- a/api/src/context.rs +++ b/api/src/context.rs @@ -16,7 +16,7 @@ use aptos_api_types::{ AptosErrorCode, AsConverter, BcsBlock, GasEstimation, LedgerInfo, ResourceGroup, TransactionOnChainData, }; -use aptos_config::config::{NodeConfig, RoleType}; +use aptos_config::config::{GasEstimationConfig, NodeConfig, RoleType}; use aptos_crypto::HashValue; use aptos_gas_schedule::{AptosGasParameters, FromOnChainGasSchedule}; use aptos_logger::{error, info, Schema}; @@ -29,7 +29,6 @@ use aptos_types::{ access_path::{AccessPath, Path}, account_address::AccountAddress, account_config::{AccountResource, NewBlockEvent}, - block_executor::config::BlockExecutorConfigFromOnchain, chain_id::ChainId, contract_event::EventWithVersion, event::EventKey, @@ -42,7 +41,9 @@ use aptos_types::{ TStateView, }, transaction::{ - block_epilogue::BlockEndInfo, SignedTransaction, Transaction, TransactionWithProof, Version, + block_epilogue::BlockEndInfo, + use_case::{UseCaseAwareTransaction, UseCaseKey}, + SignedTransaction, Transaction, TransactionWithProof, Version, }, }; use futures::{channel::oneshot, SinkExt}; @@ -124,8 +125,7 @@ impl Context { })), gas_limit_cache: Arc::new(RwLock::new(GasLimitCache { last_updated_epoch: None, - block_executor_onchain_config: OnChainExecutionConfig::default_if_missing() - .block_executor_onchain_config(), + execution_onchain_config: OnChainExecutionConfig::default_if_missing(), })), view_function_stats, simulate_txn_stats, @@ -221,20 +221,26 @@ impl Context { .map_err(|e| e.into()) } - pub fn get_latest_ledger_info(&self) -> Result { + pub fn get_oldest_version_and_block_height( + &self, + ) -> Result<(Version, u64), E> { + self.db + .get_first_viable_block() + .context("Failed to retrieve oldest block information") + .map_err(|e| E::service_unavailable_with_code_no_info(e, AptosErrorCode::InternalError)) + } + + pub fn get_latest_storage_ledger_info( + &self, + ) -> Result { let ledger_info = self .get_latest_ledger_info_with_signatures() .context("Failed to retrieve latest ledger info") .map_err(|e| { E::service_unavailable_with_code_no_info(e, AptosErrorCode::InternalError) })?; - let (oldest_version, oldest_block_height) = self - .db - .get_first_viable_block() - .context("Failed to retrieve oldest block information") - .map_err(|e| { - E::service_unavailable_with_code_no_info(e, AptosErrorCode::InternalError) - })?; + + let (oldest_version, oldest_block_height) = self.get_oldest_version_and_block_height()?; let (_, _, newest_block_event) = self .db .get_block_info_by_version(ledger_info.ledger_info().version()) @@ -252,33 +258,13 @@ impl Context { )) } - pub fn get_latest_ledger_info_and_verify_internal_indexer_lookup_version( - &self, - requested_ledger_version: Option, - ) -> Result<(LedgerInfo, Version), E> { - if self.indexer_reader.is_none() { - return Err(E::internal_with_code_no_info( - "Indexer reader doesn't exist", - AptosErrorCode::InternalError, - )); - } - - let (latest_ledger_info, latest_internal_indexer_ledger_version) = - self.get_latest_internal_indexer_ledger_version_and_main_db_info()?; - if let Some(version) = requested_ledger_version { - let request_ledger_version = Version::from(version); - if latest_internal_indexer_ledger_version < request_ledger_version { - return Err(version_not_found( - request_ledger_version, - &latest_ledger_info, - )); - } else if request_ledger_version < latest_ledger_info.oldest_ledger_version.0 { - return Err(version_pruned(request_ledger_version, &latest_ledger_info)); + pub fn get_latest_ledger_info(&self) -> Result { + if let Some(indexer_reader) = self.indexer_reader.as_ref() { + if indexer_reader.is_internal_indexer_enabled() { + return self.get_latest_internal_indexer_ledger_info(); } - Ok((latest_ledger_info, request_ledger_version)) - } else { - Ok((latest_ledger_info, latest_internal_indexer_ledger_version)) } + self.get_latest_storage_ledger_info() } pub fn get_latest_ledger_info_and_verify_lookup_version( @@ -306,21 +292,53 @@ impl Context { Ok((latest_ledger_info, requested_ledger_version)) } - pub fn get_latest_internal_indexer_ledger_version_and_main_db_info( + pub fn get_latest_internal_indexer_ledger_info( &self, - ) -> Result<(LedgerInfo, Version), E> { + ) -> Result { if let Some(indexer_reader) = self.indexer_reader.as_ref() { - if let Some(latest_version) = indexer_reader - .get_latest_internal_indexer_ledger_version() - .map_err(|err| E::internal_with_code_no_info(err, AptosErrorCode::InternalError))? - { - let latest_ledger_info = self.get_latest_ledger_info()?; - return Ok((latest_ledger_info, latest_version)); + if indexer_reader.is_internal_indexer_enabled() { + if let Some(mut latest_version) = indexer_reader + .get_latest_internal_indexer_ledger_version() + .map_err(|err| { + E::service_unavailable_with_code_no_info(err, AptosErrorCode::InternalError) + })? + { + // The internal indexer version can be ahead of the storage committed version since it syncs to db's latest synced version + let last_storage_version = + self.get_latest_storage_ledger_info()?.ledger_version.0; + latest_version = std::cmp::min(latest_version, last_storage_version); + let (_, block_end_version, new_block_event) = self + .db + .get_block_info_by_version(latest_version) + .map_err(|_| { + E::service_unavailable_with_code_no_info( + "Failed to get block", + AptosErrorCode::InternalError, + ) + })?; + let (oldest_version, oldest_block_height) = + self.get_oldest_version_and_block_height()?; + return Ok(LedgerInfo::new_ledger_info( + &self.chain_id(), + new_block_event.epoch(), + block_end_version, + oldest_version, + oldest_block_height, + new_block_event.height(), + new_block_event.proposed_time(), + )); + } else { + // Indexer doesn't have data yet as DB is boostrapping. + return Err(E::service_unavailable_with_code_no_info( + "DB is bootstrapping", + AptosErrorCode::InternalError, + )); + } } } - Err(E::internal_with_code_no_info( - "Indexer reader doesn't exist, or doesn't have data.", + Err(E::service_unavailable_with_code_no_info( + "Indexer reader doesn't exist", AptosErrorCode::InternalError, )) } @@ -992,9 +1010,10 @@ impl Context { start_version: Version, limit: u64, ledger_version: Version, - ) -> Result<(Vec<(u64, u64)>, Vec)> { + count_majority_use_case: bool, + ) -> Result<(Vec<(u64, u64)>, Vec, Option)> { if start_version > ledger_version || limit == 0 { - return Ok((vec![], vec![])); + return Ok((vec![], vec![], None)); } // This is just an estimation, so we can just skip over errors @@ -1006,11 +1025,16 @@ impl Context { let mut gas_prices = Vec::new(); let mut block_end_infos = Vec::new(); + let mut count_by_use_case = HashMap::new(); for (txn, info) in txns.zip(infos) { match txn.as_ref() { Ok(Transaction::UserTransaction(txn)) => { if let Ok(info) = info.as_ref() { gas_prices.push((txn.gas_unit_price(), info.gas_used())); + if count_majority_use_case { + let use_case_key = txn.parse_use_case(); + *count_by_use_case.entry(use_case_key).or_insert(0) += 1; + } } }, Ok(Transaction::BlockEpilogue(txn)) => { @@ -1022,7 +1046,80 @@ impl Context { } } - Ok((gas_prices, block_end_infos)) + let majority_use_case_fraction = if count_majority_use_case { + count_by_use_case.iter().max_by_key(|(_, v)| *v).and_then( + |(max_use_case, max_value)| { + if let UseCaseKey::ContractAddress(_) = max_use_case { + Some(*max_value as f32 / count_by_use_case.values().sum::() as f32) + } else { + None + } + }, + ) + } else { + None + }; + Ok((gas_prices, block_end_infos, majority_use_case_fraction)) + } + + fn block_min_inclusion_price( + &self, + ledger_info: &LedgerInfo, + first: Version, + last: Version, + gas_estimation_config: &GasEstimationConfig, + execution_config: &OnChainExecutionConfig, + ) -> Option { + let user_use_case_spread_factor = if gas_estimation_config.incorporate_reordering_effects { + execution_config + .transaction_shuffler_type() + .user_use_case_spread_factor() + } else { + None + }; + + match self.get_gas_prices_and_used( + first, + last - first, + ledger_info.ledger_version.0, + user_use_case_spread_factor.is_some(), + ) { + Ok((prices_and_used, block_end_infos, majority_use_case_fraction)) => { + let is_full_block = + if majority_use_case_fraction.map_or(false, |fraction| fraction > 0.5) { + // If majority use case is above half of transactions, UseCaseAware block reordering + // will allow other transactions to get in the block (AIP-68) + false + } else if prices_and_used.len() >= gas_estimation_config.full_block_txns { + true + } else if !block_end_infos.is_empty() { + assert_eq!(1, block_end_infos.len()); + block_end_infos.first().unwrap().limit_reached() + } else if let Some(block_gas_limit) = + execution_config.block_gas_limit_type().block_gas_limit() + { + let gas_used = prices_and_used.iter().map(|(_, used)| *used).sum::(); + gas_used >= block_gas_limit + } else { + false + }; + + if is_full_block { + Some( + self.next_bucket( + prices_and_used + .iter() + .map(|(price, _)| *price) + .min() + .unwrap(), + ), + ) + } else { + None + } + }, + Err(_) => None, + } } pub fn estimate_gas_price( @@ -1031,7 +1128,7 @@ impl Context { ) -> Result { let config = &self.node_config.api.gas_estimation; let min_gas_unit_price = self.min_gas_unit_price(ledger_info)?; - let block_config = self.block_executor_onchain_config(ledger_info)?; + let execution_config = self.execution_onchain_config(ledger_info)?; if !config.enabled { return Ok(self.default_gas_estimation(min_gas_unit_price)); } @@ -1112,40 +1209,9 @@ impl Context { let mut min_inclusion_prices = vec![]; // TODO: if multiple calls to db is a perf issue, combine into a single call and then split for (first, last) in blocks { - let min_inclusion_price = match self.get_gas_prices_and_used( - first, - last - first, - ledger_info.ledger_version.0, - ) { - Ok((prices_and_used, block_end_infos)) => { - let is_full_block = if prices_and_used.len() >= config.full_block_txns { - true - } else if !block_end_infos.is_empty() { - assert_eq!(1, block_end_infos.len()); - block_end_infos.first().unwrap().limit_reached() - } else if let Some(block_gas_limit) = - block_config.block_gas_limit_type.block_gas_limit() - { - let gas_used = prices_and_used.iter().map(|(_, used)| *used).sum::(); - gas_used >= block_gas_limit - } else { - false - }; - - if is_full_block { - self.next_bucket( - prices_and_used - .iter() - .map(|(price, _)| *price) - .min() - .unwrap(), - ) - } else { - min_gas_unit_price - } - }, - Err(_) => min_gas_unit_price, - }; + let min_inclusion_price = self + .block_min_inclusion_price(ledger_info, first, last, config, &execution_config) + .unwrap_or(min_gas_unit_price); min_inclusion_prices.push(min_inclusion_price); cache .min_inclusion_prices @@ -1313,16 +1379,16 @@ impl Context { } } - pub fn block_executor_onchain_config( + pub fn execution_onchain_config( &self, ledger_info: &LedgerInfo, - ) -> Result { + ) -> Result { // If it's the same epoch, use the cached results { let cache = self.gas_limit_cache.read().unwrap(); if let Some(ref last_updated_epoch) = cache.last_updated_epoch { if *last_updated_epoch == ledger_info.epoch.0 { - return Ok(cache.block_executor_onchain_config.clone()); + return Ok(cache.execution_onchain_config.clone()); } } } @@ -1333,7 +1399,7 @@ impl Context { // If a different thread updated the cache, we can exit early if let Some(ref last_updated_epoch) = cache.last_updated_epoch { if *last_updated_epoch == ledger_info.epoch.0 { - return Ok(cache.block_executor_onchain_config.clone()); + return Ok(cache.execution_onchain_config.clone()); } } @@ -1345,14 +1411,13 @@ impl Context { E::internal_with_code(e, AptosErrorCode::InternalError, ledger_info) })?; - let block_executor_onchain_config = OnChainExecutionConfig::fetch_config(&state_view) - .unwrap_or_else(OnChainExecutionConfig::default_if_missing) - .block_executor_onchain_config(); + let execution_onchain_config = OnChainExecutionConfig::fetch_config(&state_view) + .unwrap_or_else(OnChainExecutionConfig::default_if_missing); // Update the cache - cache.block_executor_onchain_config = block_executor_onchain_config.clone(); + cache.execution_onchain_config = execution_onchain_config.clone(); cache.last_updated_epoch = Some(ledger_info.epoch.0); - Ok(block_executor_onchain_config) + Ok(execution_onchain_config) } } @@ -1412,7 +1477,7 @@ pub struct GasEstimationCache { pub struct GasLimitCache { last_updated_epoch: Option, - block_executor_onchain_config: BlockExecutorConfigFromOnchain, + execution_onchain_config: OnChainExecutionConfig, } /// This function just calls tokio::task::spawn_blocking with the given closure and in diff --git a/api/src/events.rs b/api/src/events.rs index 5c9266df373..49c4fad21ce 100644 --- a/api/src/events.rs +++ b/api/src/events.rs @@ -77,7 +77,7 @@ impl EventsApi { // Ensure that account exists let api = self.clone(); api_spawn_blocking(move || { - let account = Account::new(api.context.clone(), address.0, None, None, None, true)?; + let account = Account::new(api.context.clone(), address.0, None, None, None)?; account.verify_account_or_object_resource()?; api.list( account.latest_ledger_info, @@ -144,7 +144,7 @@ impl EventsApi { let api = self.clone(); api_spawn_blocking(move || { - let account = Account::new(api.context.clone(), address.0, None, None, None, true)?; + let account = Account::new(api.context.clone(), address.0, None, None, None)?; let key = account.find_event_key(event_handle.0, field_name.0.into())?; api.list(account.latest_ledger_info, accept_type, page, key) }) diff --git a/api/src/index.rs b/api/src/index.rs index 94b52896364..ba91cbb34c3 100644 --- a/api/src/index.rs +++ b/api/src/index.rs @@ -33,7 +33,6 @@ impl IndexApi { self.context .check_api_output_enabled("Get ledger info", &accept_type)?; let ledger_info = self.context.get_latest_ledger_info()?; - let node_role = self.context.node_role(); api_spawn_blocking(move || match accept_type { diff --git a/api/src/tests/multisig_transactions_test.rs b/api/src/tests/multisig_transactions_test.rs index 9cd59d00e41..a716b9d0091 100644 --- a/api/src/tests/multisig_transactions_test.rs +++ b/api/src/tests/multisig_transactions_test.rs @@ -50,6 +50,66 @@ async fn test_multisig_transaction_with_payload_succeeds() { assert_eq!(0, context.get_apt_balance(multisig_account).await); } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_multisig_transaction_with_existing_account() { + let mut context = new_test_context(current_function_name!()); + let multisig_account = &mut context.create_account().await; + let owner_account_1 = &mut context.create_account().await; + let owner_account_2 = &mut context.create_account().await; + let owner_account_3 = &mut context.create_account().await; + let owners = vec![ + owner_account_1.address(), + owner_account_2.address(), + owner_account_3.address(), + ]; + context + .create_multisig_account_with_existing_account(multisig_account, owners.clone(), 2, 1000) + .await; + assert_owners(&context, multisig_account.address(), owners).await; + assert_signature_threshold(&context, multisig_account.address(), 2).await; + + let multisig_payload = construct_multisig_txn_transfer_payload(owner_account_1.address(), 1000); + context + .create_multisig_transaction( + owner_account_1, + multisig_account.address(), + multisig_payload.clone(), + ) + .await; + // Owner 2 approves and owner 3 rejects. There are still 2 approvals total (owners 1 and 2) so + // the transaction can still be executed. + context + .approve_multisig_transaction(owner_account_2, multisig_account.address(), 1) + .await; + context + .reject_multisig_transaction(owner_account_3, multisig_account.address(), 1) + .await; + + let org_multisig_balance = context.get_apt_balance(multisig_account.address()).await; + let org_owner_1_balance = context.get_apt_balance(owner_account_1.address()).await; + + context + .execute_multisig_transaction(owner_account_2, multisig_account.address(), 202) + .await; + + // The multisig tx that transfers away 1000 APT should have succeeded. + assert_multisig_tx_executed( + &mut context, + multisig_account.address(), + multisig_payload, + 1, + ) + .await; + assert_eq!( + org_multisig_balance - 1000, + context.get_apt_balance(multisig_account.address()).await + ); + assert_eq!( + org_owner_1_balance + 1000, + context.get_apt_balance(owner_account_1.address()).await + ); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_multisig_transaction_to_update_owners() { let mut context = new_test_context(current_function_name!()); diff --git a/api/src/tests/transactions_test.rs b/api/src/tests/transactions_test.rs index 82fabded83b..292c5318f8e 100644 --- a/api/src/tests/transactions_test.rs +++ b/api/src/tests/transactions_test.rs @@ -21,7 +21,7 @@ use aptos_types::{ authenticator::{AuthenticationKey, TransactionAuthenticator}, EntryFunction, Script, SignedTransaction, }, - utility_coin::APTOS_COIN_TYPE, + utility_coin::{AptosCoinType, CoinType}, }; use move_core_types::{ identifier::Identifier, @@ -879,7 +879,7 @@ async fn test_get_txn_execute_failed_by_invalid_entry_function_address() { "0x1222", "Coin", "transfer", - vec![APTOS_COIN_TYPE.clone()], + vec![AptosCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&1u64).unwrap(), @@ -898,7 +898,7 @@ async fn test_get_txn_execute_failed_by_invalid_entry_function_module_name() { "0x1", "CoinInvalid", "transfer", - vec![APTOS_COIN_TYPE.clone()], + vec![AptosCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&1u64).unwrap(), @@ -917,7 +917,7 @@ async fn test_get_txn_execute_failed_by_invalid_entry_function_name() { "0x1", "Coin", "transfer_invalid", - vec![APTOS_COIN_TYPE.clone()], + vec![AptosCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&1u64).unwrap(), @@ -936,7 +936,7 @@ async fn test_get_txn_execute_failed_by_invalid_entry_function_arguments() { "0x1", "Coin", "transfer", - vec![APTOS_COIN_TYPE.clone()], + vec![AptosCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&1u8).unwrap(), // invalid type @@ -955,7 +955,7 @@ async fn test_get_txn_execute_failed_by_missing_entry_function_arguments() { "0x1", "Coin", "transfer", - vec![APTOS_COIN_TYPE.clone()], + vec![AptosCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), // missing arguments @@ -978,7 +978,7 @@ async fn test_get_txn_execute_failed_by_entry_function_validation() { "0x1", "Coin", "transfer", - vec![APTOS_COIN_TYPE.clone()], + vec![AptosCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&123u64).unwrap(), // exceed limit, account balance is 0. @@ -1001,7 +1001,7 @@ async fn test_get_txn_execute_failed_by_entry_function_invalid_module_name() { "0x1", "coin", "transfer::what::what", - vec![APTOS_COIN_TYPE.clone()], + vec![AptosCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&123u64).unwrap(), // exceed limit, account balance is 0. @@ -1024,7 +1024,7 @@ async fn test_get_txn_execute_failed_by_entry_function_invalid_function_name() { "0x1", "coin::coin", "transfer", - vec![APTOS_COIN_TYPE.clone()], + vec![AptosCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&123u64).unwrap(), // exceed limit, account balance is 0. @@ -1565,7 +1565,7 @@ async fn test_simulation_failure_with_detail_error() { Identifier::new("MemeCoin").unwrap(), ), Identifier::new("transfer").unwrap(), - vec![APTOS_COIN_TYPE.clone()], + vec![AptosCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&1u64).unwrap(), diff --git a/api/src/transactions.rs b/api/src/transactions.rs index 7a11b73288a..1e121436196 100644 --- a/api/src/transactions.rs +++ b/api/src/transactions.rs @@ -37,7 +37,7 @@ use aptos_types::{ RawTransactionWithData, SignedTransaction, TransactionPayload, }, vm_status::StatusCode, - APTOS_COIN_TYPE, + AptosCoinType, CoinType, }; use aptos_vm::{AptosSimulationVM, AptosVM}; use move_core_types::{ident_str, language_storage::ModuleId, vm_status::VMStatus}; @@ -568,7 +568,7 @@ impl TransactionsApi { &state_view, ModuleId::new(AccountAddress::ONE, ident_str!("coin").into()), ident_str!("balance").into(), - vec![APTOS_COIN_TYPE.clone()], + vec![AptosCoinType::type_tag()], vec![signed_transaction.sender().to_vec()], context.node_config.api.max_gas_view_function, ); @@ -986,7 +986,7 @@ impl TransactionsApi { address: Address, ) -> BasicResultWith404> { // Verify the account exists - let account = Account::new(self.context.clone(), address, None, None, None, true)?; + let account = Account::new(self.context.clone(), address, None, None, None)?; account.get_account_resource()?; let latest_ledger_info = account.latest_ledger_info; diff --git a/api/test-context/src/test_context.rs b/api/test-context/src/test_context.rs index b9f938de52b..759358708bd 100644 --- a/api/test-context/src/test_context.rs +++ b/api/test-context/src/test_context.rs @@ -43,7 +43,7 @@ use aptos_types::{ ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, transaction::{ signature_verified_transaction::into_signature_verified_block, Transaction, - TransactionPayload, TransactionStatus, + TransactionPayload, TransactionStatus, Version, }, }; use aptos_vm::AptosVM; @@ -53,6 +53,7 @@ use hyper::{HeaderMap, Response}; use rand::SeedableRng; use serde_json::{json, Value}; use std::{boxed::Box, net::SocketAddr, path::PathBuf, sync::Arc, time::Duration}; +use tokio::sync::watch::channel; use warp::{http::header::CONTENT_TYPE, Filter, Rejection, Reply}; use warp_reverse_proxy::reverse_proxy_filter; @@ -118,12 +119,14 @@ pub fn new_test_context( let (root_key, genesis, genesis_waypoint, validators) = builder.build(&mut rng).unwrap(); let (validator_identity, _, _, _) = validators[0].get_key_objects(None).unwrap(); let validator_owner = validator_identity.account_address.unwrap(); - + let (sender, recver) = channel::(0); let (db, db_rw) = if use_db_with_indexer { - DbReaderWriter::wrap(AptosDB::new_for_test_with_indexer( + let mut aptos_db = AptosDB::new_for_test_with_indexer( &tmp_dir, node_config.storage.rocksdb_configs.enable_storage_sharding, - )) + ); + aptos_db.add_version_update_subscriber(sender).unwrap(); + DbReaderWriter::wrap(aptos_db) } else { DbReaderWriter::wrap( AptosDB::open( @@ -155,7 +158,7 @@ pub fn new_test_context( .storage .set_data_dir(tmp_dir.path().to_path_buf()); let mock_indexer_service = - MockInternalIndexerDBService::new_for_test(db_rw.reader.clone(), &node_config); + MockInternalIndexerDBService::new_for_test(db_rw.reader.clone(), &node_config, recver); let context = Context::new( ChainId::test(), @@ -448,6 +451,26 @@ impl TestContext { multisig_address } + pub async fn create_multisig_account_with_existing_account( + &mut self, + account: &mut LocalAccount, + owners: Vec, + signatures_required: u64, + initial_balance: u64, + ) { + let factory = self.transaction_factory(); + let txn = account.sign_with_transaction_builder( + factory + .create_multisig_account_with_existing_account(owners, signatures_required) + .expiration_timestamp_secs(u64::MAX), + ); + self.commit_block(&vec![ + txn, + self.account_transfer_to(account, account.address(), initial_balance), + ]) + .await; + } + pub async fn create_multisig_transaction( &mut self, owner: &mut LocalAccount, diff --git a/api/types/src/convert.rs b/api/types/src/convert.rs index 6fdbbc7e3ac..85fecd9ec32 100644 --- a/api/types/src/convert.rs +++ b/api/types/src/convert.rs @@ -1011,14 +1011,9 @@ impl<'a, S: StateView> MoveConverter<'a, S> { fn get_table_info(&self, handle: TableHandle) -> Result> { if let Some(indexer_reader) = self.indexer_reader.as_ref() { - // Attempt to get table_info from the indexer_reader if it exists - Ok(indexer_reader.get_table_info(handle)?) - } else if self.db.indexer_enabled() { - // Attempt to get table_info from the db if indexer is enabled - Ok(Some(self.db.get_table_info(handle)?)) - } else { - Ok(None) + return Ok(indexer_reader.get_table_info(handle).unwrap_or(None)); } + Ok(None) } fn explain_vm_status( diff --git a/api/types/src/error.rs b/api/types/src/error.rs index cb0d61bc67f..3f7454f50c9 100644 --- a/api/types/src/error.rs +++ b/api/types/src/error.rs @@ -53,7 +53,7 @@ impl AptosError { /// These codes provide more granular error information beyond just the HTTP /// status code of the response. -#[derive(Copy, Clone, Debug, Serialize, Deserialize, Enum)] +#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize, Enum)] #[oai(rename_all = "snake_case")] #[serde(rename_all = "snake_case")] #[repr(u32)] diff --git a/api/types/src/ledger_info.rs b/api/types/src/ledger_info.rs index ef912190c94..97438ae1040 100644 --- a/api/types/src/ledger_info.rs +++ b/api/types/src/ledger_info.rs @@ -40,6 +40,26 @@ impl LedgerInfo { } } + pub fn new_ledger_info( + chain_id: &ChainId, + epoch: u64, + ledger_version: u64, + oldest_ledger_version: u64, + oldest_block_height: u64, + block_height: u64, + ledger_timestamp: u64, + ) -> Self { + Self { + chain_id: chain_id.id(), + epoch: epoch.into(), + ledger_version: ledger_version.into(), + oldest_ledger_version: oldest_ledger_version.into(), + block_height: block_height.into(), + oldest_block_height: oldest_block_height.into(), + ledger_timestamp: ledger_timestamp.into(), + } + } + pub fn epoch(&self) -> u64 { self.epoch.into() } diff --git a/aptos-move/aptos-gas-schedule/src/gas_schedule/instr.rs b/aptos-move/aptos-gas-schedule/src/gas_schedule/instr.rs index 26557162fb1..1e7f1a48601 100644 --- a/aptos-move/aptos-gas-schedule/src/gas_schedule/instr.rs +++ b/aptos-move/aptos-gas-schedule/src/gas_schedule/instr.rs @@ -50,15 +50,15 @@ crate::gas_schedule::macros::define_gas_parameters!( [mut_borrow_variant_field: InternalGas, { RELEASE_V1_18.. => "mut_borrow_variant_field" }, 835], [imm_borrow_variant_field_generic: InternalGas, - { RELEASE_V1_18 => "imm_borrow_variant_field_generic" }, 835], + { RELEASE_V1_18.. => "imm_borrow_variant_field_generic" }, 835], [mut_borrow_variant_field_generic: InternalGas, - { RELEASE_V1_18 => "mut_borrow_variant_field_generic" }, 835], + { RELEASE_V1_18.. => "mut_borrow_variant_field_generic" }, 835], // variant testing [test_variant: InternalGas, - { RELEASE_V1_18 => "test_variant" }, 535], + { RELEASE_V1_18.. => "test_variant" }, 535], [test_variant_generic: InternalGas, - { RELEASE_V1_18 => "test_variant_generic" }, 535], + { RELEASE_V1_18.. => "test_variant_generic" }, 535], // locals [copy_loc_base: InternalGas, "copy_loc.base", 294], diff --git a/aptos-move/aptos-gas-schedule/src/ver.rs b/aptos-move/aptos-gas-schedule/src/ver.rs index f8b5d761715..f798c42b401 100644 --- a/aptos-move/aptos-gas-schedule/src/ver.rs +++ b/aptos-move/aptos-gas-schedule/src/ver.rs @@ -69,7 +69,7 @@ /// global operations. /// - V1 /// - TBA -pub const LATEST_GAS_FEATURE_VERSION: u64 = gas_feature_versions::RELEASE_V1_18; +pub const LATEST_GAS_FEATURE_VERSION: u64 = gas_feature_versions::RELEASE_V1_21; pub mod gas_feature_versions { pub const RELEASE_V1_8: u64 = 11; diff --git a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs b/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs index 1d66cc644cf..fb52220d7c2 100644 --- a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs +++ b/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs @@ -26,6 +26,7 @@ use aptos_types::{ Script as TransactionScript, Transaction, TransactionOutput, TransactionStatus, }, vm::configs::set_paranoid_type_checks, + AptosCoinType, }; use aptos_vm::{AptosVM, VMExecutor}; use aptos_vm_genesis::GENESIS_KEYPAIR; @@ -433,7 +434,7 @@ impl<'a> AptosTestAdapter<'a> { /// Obtain the AptosCoin amount under address `signer_addr` fn fetch_account_balance(&self, signer_addr: &AccountAddress) -> Result { - let aptos_coin_tag = CoinStoreResource::struct_tag(); + let aptos_coin_tag = CoinStoreResource::::struct_tag(); let balance_blob = self .storage diff --git a/aptos-move/e2e-move-tests/src/aggregator_v2.rs b/aptos-move/e2e-move-tests/src/aggregator_v2.rs index de32570c756..930e2316164 100644 --- a/aptos-move/e2e-move-tests/src/aggregator_v2.rs +++ b/aptos-move/e2e-move-tests/src/aggregator_v2.rs @@ -77,17 +77,25 @@ fn initialize_harness( let mut harness = MoveHarness::new_with_executor(executor); // Reduce gas scaling, so that smaller differences in gas are caught in comparison testing. harness.modify_gas_scaling(1000); + + let common_features = vec![ + FeatureFlag::AGGREGATOR_V2_API, + FeatureFlag::NEW_ACCOUNTS_DEFAULT_TO_FA_APT_STORE, + FeatureFlag::OPERATIONS_DEFAULT_TO_FA_APT_STORE, + FeatureFlag::DEFAULT_TO_CONCURRENT_FUNGIBLE_BALANCE, + ]; + if aggregator_execution_enabled { harness.enable_features( - vec![ - FeatureFlag::AGGREGATOR_V2_API, + [common_features, vec![ FeatureFlag::AGGREGATOR_V2_DELAYED_FIELDS, FeatureFlag::RESOURCE_GROUPS_SPLIT_IN_VM_CHANGE_SET, - ], + ]] + .concat(), vec![], ); } else { - harness.enable_features(vec![FeatureFlag::AGGREGATOR_V2_API], vec![ + harness.enable_features(common_features, vec![ FeatureFlag::AGGREGATOR_V2_DELAYED_FIELDS, FeatureFlag::RESOURCE_GROUPS_SPLIT_IN_VM_CHANGE_SET, ]); diff --git a/aptos-move/e2e-move-tests/src/harness.rs b/aptos-move/e2e-move-tests/src/harness.rs index 709f2b884b0..eff6c4bde58 100644 --- a/aptos-move/e2e-move-tests/src/harness.rs +++ b/aptos-move/e2e-move-tests/src/harness.rs @@ -9,7 +9,7 @@ use aptos_gas_schedule::{ AptosGasParameters, FromOnChainGasSchedule, InitialGasSchedule, ToOnChainGasSchedule, }; use aptos_language_e2e_tests::{ - account::{Account, AccountData, TransactionBuilder}, + account::{Account, TransactionBuilder}, executor::FakeExecutor, }; use aptos_types::{ @@ -32,6 +32,7 @@ use aptos_types::{ TransactionArgument, TransactionOutput, TransactionPayload, TransactionStatus, ViewFunctionOutput, }, + AptosCoinType, }; use claims::assert_ok; use move_core_types::{ @@ -154,8 +155,9 @@ impl MoveHarness { } pub fn store_and_fund_account(&mut self, acc: &Account, balance: u64, seq_num: u64) -> Account { - let data = AccountData::with_account(acc.clone(), balance, seq_num); - self.executor.add_account_data(&data); + let data = self + .executor + .store_and_fund_account(acc.clone(), balance, seq_num); self.txn_seq_no.insert(*acc.address(), seq_num); data.account().clone() } @@ -253,8 +255,8 @@ impl MoveHarness { account: &Account, payload: TransactionPayload, ) -> TransactionBuilder { - let on_chain_seq_no = self.sequence_number(account.address()); - let seq_no_ref = self.txn_seq_no.get_mut(account.address()).unwrap(); + let on_chain_seq_no = self.sequence_number_opt(account.address()).unwrap_or(0); + let seq_no_ref = self.txn_seq_no.entry(*account.address()).or_insert(0); let seq_no = std::cmp::max(on_chain_seq_no, *seq_no_ref); *seq_no_ref = seq_no + 1; account @@ -790,12 +792,15 @@ impl MoveHarness { } pub fn read_aptos_balance(&self, addr: &AccountAddress) -> u64 { - self.read_resource::(addr, CoinStoreResource::struct_tag()) - .map(|c| c.coin()) - .unwrap_or(0) + self.read_resource::>( + addr, + CoinStoreResource::::struct_tag(), + ) + .map(|c| c.coin()) + .unwrap_or(0) + self .read_resource_from_resource_group::( - &aptos_types::account_config::fungible_store::primary_store(addr), + &aptos_types::account_config::fungible_store::primary_apt_store(*addr), ObjectGroupResource::struct_tag(), FungibleStoreResource::struct_tag(), ) @@ -877,10 +882,14 @@ impl MoveHarness { self.override_one_gas_param("txn.max_transaction_size_in_bytes", 1000 * 1024); } - pub fn sequence_number(&self, addr: &AccountAddress) -> u64 { + pub fn sequence_number_opt(&self, addr: &AccountAddress) -> Option { self.read_resource::(addr, AccountResource::struct_tag()) - .unwrap() - .sequence_number() + .as_ref() + .map(AccountResource::sequence_number) + } + + pub fn sequence_number(&self, addr: &AccountAddress) -> u64 { + self.sequence_number_opt(addr).unwrap() } fn chain_id_is_mainnet(&self, addr: &AccountAddress) -> bool { diff --git a/aptos-move/e2e-move-tests/src/tests/federated_keyless_init_config.data/pack/Move.toml b/aptos-move/e2e-move-tests/src/tests/federated_keyless_init_config.data/pack/Move.toml new file mode 100644 index 00000000000..fd1578801ec --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/federated_keyless_init_config.data/pack/Move.toml @@ -0,0 +1,6 @@ +[package] +name = 'FederatedKeylessInitConfig' +version = "0.0.0" + +[dependencies] +AptosFramework = { local = "../../../../../framework/aptos-framework" } diff --git a/aptos-move/e2e-move-tests/src/tests/federated_keyless_init_config.data/pack/sources/main.move b/aptos-move/e2e-move-tests/src/tests/federated_keyless_init_config.data/pack/sources/main.move new file mode 100644 index 00000000000..51225875bc6 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/federated_keyless_init_config.data/pack/sources/main.move @@ -0,0 +1,20 @@ +script { + use aptos_framework::aptos_governance; + use aptos_framework::jwks; + use aptos_framework::keyless_account; + + fun main(core_resources: &signer, max_exp_horizon_secs: u64) { + let fx = aptos_governance::get_signer_testnet_only(core_resources, @aptos_framework); + + keyless_account::update_max_exp_horizon_for_next_epoch(&fx, max_exp_horizon_secs); + + // remove all the JWKs in 0x1 (since we will be reusing the iss as a federated one; and we don't want the 0x1 JWKs to take priority over our federated JWKs) + let patches = vector[ + jwks::new_patch_remove_all(), + ]; + jwks::set_patches(&fx, patches); + + // sets the pending Configuration change to the max expiration horizon from above + aptos_governance::force_end_epoch_test_only(core_resources); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/fee_payer.rs b/aptos-move/e2e-move-tests/src/tests/fee_payer.rs index 8342c94ad47..d9586b1a8ed 100644 --- a/aptos-move/e2e-move-tests/src/tests/fee_payer.rs +++ b/aptos-move/e2e-move-tests/src/tests/fee_payer.rs @@ -13,6 +13,7 @@ use aptos_types::{ move_utils::MemberId, on_chain_config::FeatureFlag, transaction::{EntryFunction, ExecutionStatus, Script, TransactionPayload, TransactionStatus}, + AptosCoinType, }; use aptos_vm_types::storage::StorageGasParameters; use move_core_types::{move_resource::MoveStructType, vm_status::StatusCode}; @@ -114,8 +115,10 @@ fn test_account_not_exist_with_fee_payer() { let alice = Account::new(); let bob = h.new_account_at(AccountAddress::from_hex_literal("0xb0b").unwrap()); - let alice_start = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_start = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_start.is_none()); let bob_start = h.read_aptos_balance(bob.address()); @@ -131,8 +134,10 @@ fn test_account_not_exist_with_fee_payer() { let output = h.run_raw(transaction); assert_success!(*output.status()); - let alice_after = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_after = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_after.is_none()); let bob_after = h.read_aptos_balance(bob.address()); @@ -152,8 +157,10 @@ fn test_account_not_exist_with_fee_payer_insufficient_gas() { let alice = Account::new(); let bob = h.new_account_at(AccountAddress::from_hex_literal("0xb0b").unwrap()); - let alice_start = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_start = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_start.is_none()); let bob_start = h.read_aptos_balance(bob.address()); @@ -172,8 +179,10 @@ fn test_account_not_exist_with_fee_payer_insufficient_gas() { &TransactionStatus::Discard(StatusCode::MAX_GAS_UNITS_BELOW_MIN_TRANSACTION_GAS_UNITS), )); - let alice_after = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_after = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_after.is_none()); let bob_after = h.read_aptos_balance(bob.address()); assert_eq!(bob_start, bob_after); @@ -192,8 +201,10 @@ fn test_account_not_exist_and_move_abort_with_fee_payer_create_account() { let alice = Account::new(); let bob = h.new_account_at(AccountAddress::from_hex_literal("0xb0b").unwrap()); - let alice_start = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_start = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_start.is_none()); let bob_start = h.read_aptos_balance(bob.address()); @@ -227,8 +238,10 @@ fn test_account_not_exist_and_move_abort_with_fee_payer_create_account() { assert!(output.gas_used() <= PRICING.new_account_upfront(GAS_UNIT_PRICE)); assert!(output.gas_used() > PRICING.new_account_min_abort(GAS_UNIT_PRICE)); - let alice_after = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_after = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_after.is_none()); let bob_after = h.read_aptos_balance(bob.address()); @@ -335,8 +348,10 @@ fn test_account_not_exist_with_fee_payer_without_create_account() { let alice = Account::new(); let bob = h.new_account_at(AccountAddress::from_hex_literal("0xb0b").unwrap()); - let alice_start = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_start = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_start.is_none()); let payload = aptos_stdlib::aptos_account_set_allow_direct_coin_transfers(true); diff --git a/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs b/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs index 7c8be22e3d7..2a26fa5d216 100644 --- a/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs +++ b/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs @@ -1,8 +1,16 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{assert_success, tests::common, MoveHarness}; -use aptos_types::account_address::{self, AccountAddress}; +use crate::{assert_success, tests::common, BlockSplit, MoveHarness, SUCCESS}; +use aptos_cached_packages::aptos_stdlib::{aptos_account_batch_transfer, aptos_account_transfer}; +use aptos_language_e2e_tests::{ + account::Account, + executor::{ExecutorMode, FakeExecutor}, +}; +use aptos_types::{ + account_address::{self, AccountAddress}, + on_chain_config::FeatureFlag, +}; use move_core_types::{ identifier::Identifier, language_storage::{StructTag, TypeTag}, @@ -223,3 +231,58 @@ fn test_coin_to_fungible_asset_migration() { ) .is_some()); } + +/// Trigger speculative error in prologue, from accessing delayed field that was created later than +/// last committed index (so that read_last_commited_value fails speculatively) +/// +/// We do that by having an expensive transaction first (to make sure committed index isn't moved), +/// and then create some new aggregators (concurrent balances for new accounts), and then have them issue +/// transactions - so their balance is checked in prologue. +#[test] +fn test_prologue_speculation() { + let executor = FakeExecutor::from_head_genesis().set_executor_mode(ExecutorMode::ParallelOnly); + + let mut harness = MoveHarness::new_with_executor(executor); + harness.enable_features( + vec![ + FeatureFlag::NEW_ACCOUNTS_DEFAULT_TO_FA_APT_STORE, + FeatureFlag::OPERATIONS_DEFAULT_TO_FA_APT_STORE, + FeatureFlag::DEFAULT_TO_CONCURRENT_FUNGIBLE_BALANCE, + ], + vec![], + ); + let independent_account = harness.new_account_at(AccountAddress::random()); + + let sink_txn = harness.create_transaction_payload( + &independent_account, + aptos_account_batch_transfer(vec![AccountAddress::random(); 50], vec![10_000_000_000; 50]), + ); + + let account = harness.new_account_at(AccountAddress::ONE); + let dst_1 = Account::new(); + let dst_2 = Account::new(); + let dst_3 = Account::new(); + + let fund_txn = harness.create_transaction_payload( + &account, + aptos_account_batch_transfer( + vec![*dst_1.address(), *dst_2.address(), *dst_3.address()], + vec![10_000_000_000, 10_000_000_000, 10_000_000_000], + ), + ); + + let transfer_1_txn = + harness.create_transaction_payload(&dst_1, aptos_account_transfer(*dst_2.address(), 1)); + let transfer_2_txn = + harness.create_transaction_payload(&dst_2, aptos_account_transfer(*dst_3.address(), 1)); + let transfer_3_txn = + harness.create_transaction_payload(&dst_3, aptos_account_transfer(*dst_1.address(), 1)); + + harness.run_block_in_parts_and_check(BlockSplit::Whole, vec![ + (SUCCESS, sink_txn), + (SUCCESS, fund_txn), + (SUCCESS, transfer_1_txn), + (SUCCESS, transfer_2_txn), + (SUCCESS, transfer_3_txn), + ]); +} diff --git a/aptos-move/e2e-move-tests/src/tests/keyless_feature_gating.rs b/aptos-move/e2e-move-tests/src/tests/keyless_feature_gating.rs index 58fdd63b758..aac8a492508 100644 --- a/aptos-move/e2e-move-tests/src/tests/keyless_feature_gating.rs +++ b/aptos-move/e2e-move-tests/src/tests/keyless_feature_gating.rs @@ -6,26 +6,35 @@ use aptos_cached_packages::aptos_stdlib; use aptos_crypto::{hash::CryptoHash, SigningKey}; use aptos_language_e2e_tests::account::{Account, AccountPublicKey, TransactionBuilder}; use aptos_types::{ + account_config::CORE_CODE_ADDRESS, + jwks::{rsa::RSA_JWK, secure_test_rsa_jwk}, keyless::{ test_utils::{ get_groth16_sig_and_pk_for_upgraded_vk, get_sample_esk, get_sample_groth16_sig_and_pk, get_sample_iss, get_sample_jwk, get_sample_openid_sig_and_pk, get_upgraded_vk, }, - Configuration, EphemeralCertificate, Groth16VerificationKey, KeylessPublicKey, - KeylessSignature, TransactionAndProof, + AnyKeylessPublicKey, Configuration, EphemeralCertificate, FederatedKeylessPublicKey, + Groth16VerificationKey, KeylessPublicKey, KeylessSignature, TransactionAndProof, }, on_chain_config::FeatureFlag, transaction::{ authenticator::{AnyPublicKey, AuthenticationKey, EphemeralSignature}, - Script, SignedTransaction, Transaction, TransactionStatus, + EntryFunction, Script, SignedTransaction, Transaction, TransactionStatus, }, }; use move_core_types::{ account_address::AccountAddress, + ident_str, + language_storage::ModuleId, transaction_argument::TransactionArgument, - vm_status::{StatusCode, StatusCode::FEATURE_UNDER_GATING}, + value::{serialize_values, MoveValue}, + vm_status::{ + StatusCode, + StatusCode::{FEATURE_UNDER_GATING, INVALID_SIGNATURE}, + }, }; +/// Initializes an Aptos VM and sets the keyless configuration via script (the VK is already set in genesis). fn init_feature_gating( enabled_features: Vec, disabled_features: Vec, @@ -89,7 +98,7 @@ fn test_rotate_vk() { // Old proof for old VK let (old_sig, pk) = get_sample_groth16_sig_and_pk(); - let account = create_keyless_account(&mut h, pk.clone()); + let account = create_keyless_account(&mut h, pk); let transaction = spend_keyless_account(&mut h, old_sig.clone(), &account, *recipient.address()); let output = h.run_raw(transaction); @@ -203,11 +212,117 @@ fn test_feature_gating_with_zk_off() { test_feature_gating(&mut h, &recipient, get_sample_openid_sig_and_pk, false); } +/// Creates a federated keyless account associated with a JWK addr. First, ensures TXN validation +/// for this account fails because the JWKs are not installed at that JWK addr. Second, installs the +/// JWK at this address and ensures TXN validation now succeeds. +#[test] +fn test_federated_keyless_at_jwk_addr() { + let mut h = MoveHarness::new_with_features( + vec![ + FeatureFlag::CRYPTOGRAPHY_ALGEBRA_NATIVES, + FeatureFlag::BN254_STRUCTURES, + FeatureFlag::KEYLESS_ACCOUNTS, + FeatureFlag::FEDERATED_KEYLESS, + ], + vec![], + ); + + let jwk_addr = AccountAddress::from_hex_literal("0xadd").unwrap(); + + // Step 1: Make sure TXN validation fails if JWKs are not installed at jwk_addr. + let (sig, pk) = get_sample_groth16_sig_and_pk(); + let sender = create_federated_keyless_account(&mut h, jwk_addr, pk); + let recipient = h.new_account_at(AccountAddress::from_hex_literal("0xb0b").unwrap()); + let txn = spend_keyless_account(&mut h, sig.clone(), &sender, *recipient.address()); + let output = h.run_raw(txn); + + match output.status() { + TransactionStatus::Discard(status) => { + assert_eq!( + *status, INVALID_SIGNATURE, + "Expected TransactionStatus::Discard to be INVALID_SIGNATURE, but got: {:?}", + status + ) + }, + _ => panic!( + "Expected TransactionStatus::Discard, got: {:?}", + output.status() + ), + } + + // Step 1: Make sure TXN validation succeeds once JWKs are installed at jwk_addr. + let iss = get_sample_iss(); + let jwk = get_sample_jwk(); + let _core_resources = install_federated_jwks_and_set_keyless_config(&mut h, jwk_addr, iss, jwk); + + let txn = spend_keyless_account(&mut h, sig, &sender, *recipient.address()); + let output = h.run_raw(txn); + + assert_success!( + output.status().clone(), + "Expected TransactionStatus::Keep(ExecutionStatus::Success), but got: {:?}", + output.status() + ); +} + +/// Tests that the default JWKs at 0x1 work as an override when the JWKs at jwk_addr don't work. +#[test] +fn test_federated_keyless_override_at_0x1() { + let mut h = MoveHarness::new_with_features( + vec![ + FeatureFlag::CRYPTOGRAPHY_ALGEBRA_NATIVES, + FeatureFlag::BN254_STRUCTURES, + FeatureFlag::KEYLESS_ACCOUNTS, + FeatureFlag::FEDERATED_KEYLESS, + ], + vec![], + ); + + let jwk_addr = AccountAddress::from_hex_literal("0xadd").unwrap(); + let iss = get_sample_iss(); + let jwk = secure_test_rsa_jwk(); // this will be the wrong JWK + let _core_resources = install_federated_jwks_and_set_keyless_config(&mut h, jwk_addr, iss, jwk); + + // Step 1: Make sure the TXN does not validate, since the wrong JWK is installed at JWK addr + let (sig, pk) = get_sample_groth16_sig_and_pk(); + let sender = create_federated_keyless_account(&mut h, jwk_addr, pk); + let recipient = h.new_account_at(AccountAddress::from_hex_literal("0xb0b").unwrap()); + let txn = spend_keyless_account(&mut h, sig.clone(), &sender, *recipient.address()); + let output = h.run_raw(txn); + + match output.status() { + TransactionStatus::Discard(status) => { + assert_eq!( + *status, INVALID_SIGNATURE, + "Expected TransactionStatus::Discard to be INVALID_SIGNATURE, but got: {:?}", + status + ) + }, + _ => panic!( + "Expected TransactionStatus::Discard, got: {:?}", + output.status() + ), + } + + // Step 2: Install the correct JWK at 0x1 and resubmit the TXN; it should now validate + run_jwk_and_config_script(&mut h); + let txn = spend_keyless_account(&mut h, sig, &sender, *recipient.address()); + let output = h.run_raw(txn); + + assert_success!( + output.status().clone(), + "Expected TransactionStatus::Keep(ExecutionStatus::Success), but got: {:?}", + output.status() + ); +} + fn create_keyless_account(h: &mut MoveHarness, pk: KeylessPublicKey) -> Account { - let apk = AnyPublicKey::keyless(pk.clone()); - let addr = AuthenticationKey::any_key(apk.clone()).account_address(); + let addr = AuthenticationKey::any_key(AnyPublicKey::keyless(pk.clone())).account_address(); let account = h.store_and_fund_account( - &Account::new_from_addr(addr, AccountPublicKey::Keyless(pk)), + &Account::new_from_addr( + addr, + AccountPublicKey::AnyPublicKey(AnyPublicKey::Keyless { public_key: pk }), + ), 100000000, 0, ); @@ -251,8 +366,12 @@ fn spend_keyless_account( } sig.ephemeral_signature = EphemeralSignature::ed25519(esk.sign(&txn_and_zkp).unwrap()); - let transaction = - SignedTransaction::new_keyless(raw_txn, account.pubkey.as_keyless().unwrap(), sig); + let transaction = match account.pubkey.as_keyless().unwrap() { + AnyKeylessPublicKey::Normal(pk) => SignedTransaction::new_keyless(raw_txn, pk, sig), + AnyKeylessPublicKey::Federated(pk) => { + SignedTransaction::new_federated_keyless(raw_txn, pk, sig) + }, + }; println!( "Submitted TXN hash: {}", Transaction::UserTransaction(transaction.clone()).hash() @@ -260,6 +379,29 @@ fn spend_keyless_account( transaction } +fn create_federated_keyless_account( + h: &mut MoveHarness, + jwk_addr: AccountAddress, + pk: KeylessPublicKey, +) -> Account { + let fed_pk = FederatedKeylessPublicKey { jwk_addr, pk }; + let addr = AuthenticationKey::any_key(AnyPublicKey::federated_keyless(fed_pk.clone())) + .account_address(); + let account = h.store_and_fund_account( + &Account::new_from_addr( + addr, + AccountPublicKey::AnyPublicKey(AnyPublicKey::FederatedKeyless { public_key: fed_pk }), + ), + 100000000, + 0, + ); + + println!("Actual address: {}", addr.to_hex()); + println!("Account address: {}", account.address().to_hex()); + + account +} + /// Creates and funds a new account at `pk` and sends coins to `recipient`. fn create_and_spend_keyless_account( h: &mut MoveHarness, @@ -267,11 +409,12 @@ fn create_and_spend_keyless_account( pk: KeylessPublicKey, recipient: AccountAddress, ) -> SignedTransaction { - let account = create_keyless_account(h, pk.clone()); + let account = create_keyless_account(h, pk); spend_keyless_account(h, sig, &account, recipient) } +/// Sets the keyless configuration (Note: the VK is already set in genesis.) fn run_jwk_and_config_script(h: &mut MoveHarness) -> Account { let core_resources = h.new_account_at(AccountAddress::from_hex_literal("0xA550C18").unwrap()); @@ -305,13 +448,94 @@ fn run_jwk_and_config_script(h: &mut MoveHarness) -> Account { .sign(); // NOTE: We cannot write the Configuration and Groth16Verification key via MoveHarness::set_resource - // because it does not (yet) work with resource groups. + // because it does not (yet) work with resource groups. This is okay, because the VK will be + // there from genesis. assert_success!(h.run(txn)); core_resources } +/// Sets the keyless configuration and installs the sample RSA JWK as a federated JWK +/// (Note: the VK is already set in genesis.) +fn install_federated_jwks_and_set_keyless_config( + h: &mut MoveHarness, + jwk_owner: AccountAddress, + iss: String, + jwk: RSA_JWK, +) -> Account { + let core_resources = h.new_account_at(AccountAddress::from_hex_literal("0xA550C18").unwrap()); + + federated_keyless_init_config(h, core_resources.clone()); + + federated_keyless_install_jwk(h, jwk_owner, iss, jwk); + + core_resources +} + +fn federated_keyless_init_config(h: &mut MoveHarness, core_resources: Account) { + let package = build_package( + common::test_dir_path("federated_keyless_init_config.data/pack"), + aptos_framework::BuildOptions::default(), + ) + .expect("building package must succeed"); + + let txn = h.create_publish_built_package(&core_resources, &package, |_| {}); + assert_success!(h.run(txn)); + + let script = package.extract_script_code()[0].clone(); + + let config = Configuration::new_for_testing(); + + let txn = TransactionBuilder::new(core_resources.clone()) + .script(Script::new(script, vec![], vec![TransactionArgument::U64( + config.max_exp_horizon_secs, + )])) + .sequence_number(h.sequence_number(core_resources.address())) + .max_gas_amount(1_000_000) + .gas_unit_price(1) + .sign(); + + // NOTE: We cannot write the Configuration and Groth16Verification key via MoveHarness::set_resource + // because it does not (yet) work with resource groups. This is okay, because the VK will be + // there from genesis. + + assert_success!(h.run(txn)); +} + +fn federated_keyless_install_jwk( + h: &mut MoveHarness, + jwk_owner: AccountAddress, + iss: String, + jwk: RSA_JWK, +) { + let jwk_owner_account = h.new_account_at(jwk_owner); + + let txn = TransactionBuilder::new(jwk_owner_account.clone()) + .entry_function(EntryFunction::new( + ModuleId::new(CORE_CODE_ADDRESS, ident_str!("jwks").to_owned()), + ident_str!("update_federated_jwk_set").to_owned(), + vec![], + serialize_values(&vec![ + MoveValue::vector_u8(iss.into_bytes()), + MoveValue::Vector(vec![MoveValue::vector_u8(jwk.kid.into_bytes())]), + MoveValue::Vector(vec![MoveValue::vector_u8(jwk.alg.into_bytes())]), + MoveValue::Vector(vec![MoveValue::vector_u8(jwk.e.into_bytes())]), + MoveValue::Vector(vec![MoveValue::vector_u8(jwk.n.into_bytes())]), + ]), + )) + .sequence_number(h.sequence_number(jwk_owner_account.address())) + .max_gas_amount(1_000_000) + .gas_unit_price(1) + .sign(); + + // NOTE: We cannot write the Configuration and Groth16Verification key via MoveHarness::set_resource + // because it does not (yet) work with resource groups. This is okay, because the VK will be + // there from genesis. + + assert_success!(h.run(txn)); +} + fn run_upgrade_vk_script(h: &mut MoveHarness, core_resources: Account, vk: Groth16VerificationKey) { let package = build_package( common::test_dir_path("keyless_new_vk.data/pack"), diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/Move.toml b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/Move.toml new file mode 100644 index 00000000000..114818601d2 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/Move.toml @@ -0,0 +1,9 @@ +[package] +name = "LargePackageExample" +version = "0.0.0" +upgrade_policy = "compatible" +[dependencies] +AptosFramework = { local = "../../../../../framework/aptos-framework" } + +[addresses] +large_package_example = "_" diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/eight.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/eight.move new file mode 100644 index 00000000000..e3decbd981f --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/eight.move @@ -0,0 +1 @@ +module large_package_example::eight {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/five.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/five.move new file mode 100644 index 00000000000..caf0559b7a4 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/five.move @@ -0,0 +1 @@ +module large_package_example::five {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/four.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/four.move new file mode 100644 index 00000000000..ca2b7e66c72 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/four.move @@ -0,0 +1 @@ +module large_package_example::four {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/one.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/one.move new file mode 100644 index 00000000000..faf2db53b59 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/one.move @@ -0,0 +1 @@ +module large_package_example::one {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/seven.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/seven.move new file mode 100644 index 00000000000..d38cd0b0bf7 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/seven.move @@ -0,0 +1 @@ +module large_package_example::seven {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/six.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/six.move new file mode 100644 index 00000000000..5373b207c4f --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/six.move @@ -0,0 +1 @@ +module large_package_example::six {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/three.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/three.move new file mode 100644 index 00000000000..4aa308268d4 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/three.move @@ -0,0 +1 @@ +module large_package_example::three {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/two.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/two.move new file mode 100644 index 00000000000..4197c8ac592 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/two.move @@ -0,0 +1 @@ +module large_package_example::two {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/zero.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/zero.move new file mode 100644 index 00000000000..bc516399bb3 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/zero.move @@ -0,0 +1 @@ +module large_package_example::zero {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.rs b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.rs new file mode 100644 index 00000000000..2476ff308c8 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.rs @@ -0,0 +1,418 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{assert_move_abort, assert_success, assert_vm_status, tests::common, MoveHarness}; +use aptos_framework::{ + chunked_publish::{ + chunk_package_and_create_payloads, PublishType, LARGE_PACKAGES_MODULE_ADDRESS, + }, + natives::{ + code::{PackageMetadata, PackageRegistry, UpgradePolicy}, + object_code_deployment::ManagingRefs, + }, + BuildOptions, BuiltPackage, +}; +use aptos_language_e2e_tests::account::Account; +use aptos_types::{ + object_address::create_object_code_deployment_address, + transaction::{AbortInfo, TransactionPayload, TransactionStatus}, +}; +use move_core_types::{ + account_address::AccountAddress, parser::parse_struct_tag, vm_status::StatusCode, +}; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, option::Option, path::Path}; + +/// Number of transactions needed for staging code chunks before publishing to accounts or objects +/// This is used to derive object address for testing object code deployment feature +const NUMBER_OF_TRANSACTIONS_FOR_STAGING: u64 = 2; + +/// Mimics `0xcafe::eight::State` +#[derive(Serialize, Deserialize)] +struct State { + value: u64, +} + +struct LargePackageTestContext { + harness: MoveHarness, + account: Account, // used for testing account code deployment for large packages + object_address: AccountAddress, // used for testing object code deployment for large packages +} + +impl LargePackageTestContext { + /// Create a new test context with initialized accounts and published `large_packages.move` module. + fn new() -> Self { + let mut harness = MoveHarness::new(); + let admin_account = harness.new_account_at( + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + ); + let account = harness.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); + let sequence_number = harness.sequence_number(account.address()); + let object_address = create_object_code_deployment_address( + *account.address(), + sequence_number + NUMBER_OF_TRANSACTIONS_FOR_STAGING + 1, + ); + + // publish `large_packages.move` module + let build_option = Self::get_named_addresses_build_options(vec![( + String::from("large_packages"), + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + )]); + + let txn = harness.create_publish_package( + &admin_account, + &common::test_dir_path("../../../move-examples/large_packages"), + Some(build_option), + |_| {}, + ); + assert_success!(harness.run(txn)); + + LargePackageTestContext { + harness, + account, + object_address, + } + } + + fn get_named_addresses_build_options( + named_addresses: Vec<(String, AccountAddress)>, + ) -> BuildOptions { + let mut build_options = BuildOptions::default(); + let mut map = BTreeMap::new(); + for (k, v) in named_addresses { + map.insert(k, v); + } + build_options.named_addresses = map; + + build_options + } + + /// Publish a large package by creating and running the necessary transactions. + fn publish_large_package( + &mut self, + account: &Account, + path: &Path, + patch_metadata: impl FnMut(&mut PackageMetadata), + publish_type: PublishType, + ) -> Vec { + let deploy_address = match publish_type { + PublishType::AccountDeploy => AccountAddress::from_hex_literal("0xcafe").unwrap(), + PublishType::ObjectDeploy | PublishType::ObjectUpgrade => self.object_address, + }; + + let build_options = Self::get_named_addresses_build_options(vec![( + String::from("large_package_example"), + deploy_address, + )]); + let payloads = self.create_publish_large_package_from_path( + path, + Some(build_options), + patch_metadata, + publish_type, + ); + payloads + .into_iter() + .map(|payload| { + let signed_tx = self + .harness + .create_transaction_without_sign(account, payload) + .sign(); + self.harness.run(signed_tx) + }) + .collect() + } + + /// Create transactions for publishing a large package. + fn create_publish_large_package_from_path( + &mut self, + path: &Path, + options: Option, + mut patch_metadata: impl FnMut(&mut PackageMetadata), + publish_type: PublishType, + ) -> Vec { + let package = BuiltPackage::build(path.to_owned(), options.unwrap()) + .expect("package build must succeed"); + let package_code = package.extract_code(); + let mut metadata = package + .extract_metadata() + .expect("extracting package metadata must succeed"); + patch_metadata(&mut metadata); + let metadata_serialized = bcs::to_bytes(&metadata).expect("Failed deserializing metadata"); + chunk_package_and_create_payloads( + metadata_serialized, + package_code, + publish_type, + Some(self.object_address), + ) + } +} + +#[test] +fn large_package_publishing_basic() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Test transactions for publishing the large package are successful + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::AccountDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Validate metadata + let registry = context + .harness + .read_resource::( + acc.address(), + parse_struct_tag("0x1::code::PackageRegistry").unwrap(), + ) + .unwrap(); + assert_eq!(registry.packages.len(), 1); + assert_eq!(registry.packages[0].name, "LargePackageExample"); + assert_eq!(registry.packages[0].modules.len(), 9); // `LargePackageExample` package includes 9 modules + + // Validate code loaded as expected. + assert_success!(context.harness.run_entry_function( + &acc, + str::parse("0xcafe::eight::hello").unwrap(), + vec![], + vec![bcs::to_bytes::(&42).unwrap()] + )); + let state = context + .harness + .read_resource::( + acc.address(), + parse_struct_tag("0xcafe::eight::State").unwrap(), + ) + .unwrap(); + assert_eq!(state.value, 42); +} + +#[test] +fn large_package_upgrade_success_compat() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Initial version + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::AccountDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Upgrade to compatible version + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), // upgrade with the same package + |_| {}, + PublishType::AccountDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } +} + +#[test] +fn large_package_upgrade_fail_compat() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Initial version + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::AccountDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Upgrade to incompatible version should fail + // Staging metadata and code should pass, and the final publishing transaction should fail + let mut tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("large_package_publishing.data/large_pack_upgrade_incompat"), + |_| {}, + PublishType::AccountDeploy, + ); + + let last_tx_status = tx_statuses.pop().unwrap(); // transaction for publishing + + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + assert_vm_status!( + last_tx_status, + StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE + ); +} + +#[test] +fn large_package_upgrade_fail_immutable() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Initial version (immutable package) + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |metadata| metadata.upgrade_policy = UpgradePolicy::immutable(), + PublishType::AccountDeploy, + ); + + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Upgrading immutable package should fail + // Staging metadata and code should pass, and the final publishing transaction should fail + let mut tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::AccountDeploy, + ); + let last_tx_status = tx_statuses.pop().unwrap(); // transaction for publishing + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + let abort_info = Some(AbortInfo { + reason_name: "EUPGRADE_IMMUTABLE".to_string(), + description: "Cannot upgrade an immutable package".to_string(), + }); + assert_move_abort!(last_tx_status, abort_info); +} + +#[test] +fn large_package_upgrade_fail_overlapping_module() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Initial version + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::AccountDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Publishing the same package with different name should fail + // Staging metadata and code should pass, and the final publishing transaction should fail + let mut tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |metadata| metadata.name = "other_large_pack".to_string(), + PublishType::AccountDeploy, + ); + + let last_tx_status = tx_statuses.pop().unwrap(); // transaction for publishing + + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + let abort_info = Some(AbortInfo { + reason_name: "EMODULE_NAME_CLASH".to_string(), + description: "Package contains duplicate module names with existing modules publised in other packages on this address".to_string(), + }); + assert_move_abort!(last_tx_status, abort_info); +} + +#[test] +fn large_package_object_code_deployment_basic() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Test transactions for publishing the large package are successful + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::ObjectDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Validate metadata + let registry = context + .harness + .read_resource::( + &context.object_address, + parse_struct_tag("0x1::code::PackageRegistry").unwrap(), + ) + .unwrap(); + assert_eq!(registry.packages.len(), 1); + assert_eq!(registry.packages[0].name, "LargePackageExample"); + assert_eq!(registry.packages[0].modules.len(), 9); + + let code_object: ManagingRefs = context + .harness + .read_resource_from_resource_group( + &context.object_address, + parse_struct_tag("0x1::object::ObjectGroup").unwrap(), + parse_struct_tag("0x1::object_code_deployment::ManagingRefs").unwrap(), + ) + .unwrap(); + // Verify the object created owns the `ManagingRefs` + assert_eq!(code_object, ManagingRefs::new(context.object_address)); + + let module_address = context.object_address.to_string(); + + // Validate code loaded as expected. + assert_success!(context.harness.run_entry_function( + &acc, + str::parse(&format!("{}::eight::hello", module_address)).unwrap(), + vec![], + vec![bcs::to_bytes::(&42).unwrap()] + )); + + let state = context + .harness + .read_resource::( + acc.address(), + parse_struct_tag(&format!("{}::eight::State", module_address)).unwrap(), + ) + .unwrap(); + + assert_eq!(state.value, 42); +} + +#[test] +fn large_package_object_code_deployment_upgrade_success_compat() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Initial version + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::ObjectDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Upgrade to compatible version + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), // upgrade with the same package + |_| {}, + PublishType::ObjectUpgrade, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/mod.rs b/aptos-move/e2e-move-tests/src/tests/mod.rs index efcafc20fd1..33c8ac97e7e 100644 --- a/aptos-move/e2e-move-tests/src/tests/mod.rs +++ b/aptos-move/e2e-move-tests/src/tests/mod.rs @@ -24,6 +24,7 @@ mod governance_updates; mod infinite_loop; mod init_module; mod keyless_feature_gating; +mod large_package_publishing; mod lazy_natives; mod max_loop_depth; mod memory_quota; diff --git a/aptos-move/e2e-tests/src/account.rs b/aptos-move/e2e-tests/src/account.rs index 0a3c5d8ad9c..468c4859d14 100644 --- a/aptos-move/e2e-tests/src/account.rs +++ b/aptos-move/e2e-tests/src/account.rs @@ -10,16 +10,21 @@ use aptos_keygen::KeyGen; use aptos_types::{ access_path::AccessPath, account_address::AccountAddress, - account_config::{self, AccountResource, CoinStoreResource}, + account_config::{ + self, primary_apt_store, AccountResource, CoinStoreResource, + ConcurrentFungibleBalanceResource, FungibleStoreResource, MigrationFlag, + ObjectCoreResource, ObjectGroupResource, + }, chain_id::ChainId, event::{EventHandle, EventKey}, - keyless::KeylessPublicKey, + keyless::AnyKeylessPublicKey, state_store::state_key::StateKey, transaction::{ authenticator::{AnyPublicKey, AuthenticationKey}, EntryFunction, RawTransaction, Script, SignedTransaction, TransactionPayload, }, write_set::{WriteOp, WriteSet, WriteSetMut}, + AptosCoinType, }; use aptos_vm_genesis::GENESIS_KEYPAIR; use move_core_types::move_resource::MoveStructType; @@ -30,27 +35,38 @@ pub const DEFAULT_EXPIRATION_TIME: u64 = 4_000_000; #[derive(Debug, Clone, Eq, PartialEq)] pub enum AccountPublicKey { Ed25519(Ed25519PublicKey), - Keyless(KeylessPublicKey), + AnyPublicKey(AnyPublicKey), } impl AccountPublicKey { pub fn to_bytes(&self) -> Vec { match self { AccountPublicKey::Ed25519(pk) => pk.to_bytes().to_vec(), - AccountPublicKey::Keyless(pk) => pk.to_bytes(), + AccountPublicKey::AnyPublicKey(pk) => pk.to_bytes().to_vec(), } } pub fn as_ed25519(&self) -> Option { match self { AccountPublicKey::Ed25519(pk) => Some(pk.clone()), - AccountPublicKey::Keyless(_) => None, + AccountPublicKey::AnyPublicKey(pk) => match pk { + AnyPublicKey::Ed25519 { public_key } => Some(public_key.clone()), + _ => None, + }, } } - pub fn as_keyless(&self) -> Option { + pub fn as_keyless(&self) -> Option { match self { - AccountPublicKey::Keyless(pk) => Some(pk.clone()), + AccountPublicKey::AnyPublicKey(pk) => match pk { + AnyPublicKey::Keyless { public_key } => { + Some(AnyKeylessPublicKey::Normal(public_key.clone())) + }, + AnyPublicKey::FederatedKeyless { public_key } => { + Some(AnyKeylessPublicKey::Federated(public_key.clone())) + }, + _ => None, + }, AccountPublicKey::Ed25519(_) => None, } } @@ -66,7 +82,8 @@ impl AccountPublicKey { pub struct Account { addr: AccountAddress, /// The current private key for this account. - /// TODO: When `pubkey` is of type `AccountPublicKey::Keyless`, this will be undefined. + /// TODO: Refactor appropriately since, for example, when `pubkey` is of type + /// `AccountPublicKey::AnyPublicKey::Keyless`, this `privkey` field will be undefined. pub privkey: Ed25519PrivateKey, /// The current public key for this account. pub pubkey: AccountPublicKey, @@ -171,8 +188,11 @@ impl Account { /// /// Use this to retrieve or publish the Account CoinStore blob. pub fn make_coin_store_access_path(&self) -> AccessPath { - AccessPath::resource_access_path(self.addr, CoinStoreResource::struct_tag()) - .expect("access path in test") + AccessPath::resource_access_path( + self.addr, + CoinStoreResource::::struct_tag(), + ) + .expect("access path in test") } /// Changes the keys for this account to the provided ones. @@ -187,9 +207,7 @@ impl Account { pub fn auth_key(&self) -> Vec { match &self.pubkey { AccountPublicKey::Ed25519(pk) => AuthenticationKey::ed25519(pk), - AccountPublicKey::Keyless(pk) => { - AuthenticationKey::any_key(AnyPublicKey::keyless(pk.clone())) - }, + AccountPublicKey::AnyPublicKey(pk) => AuthenticationKey::any_key(pk.clone()), } .to_vec() } @@ -381,7 +399,7 @@ impl CoinStore { /// Returns the Move Value for the account's CoinStore pub fn to_bytes(&self) -> Vec { - let coin_store = CoinStoreResource::new( + let coin_store = CoinStoreResource::::new( self.coin, self.frozen, self.deposit_events.clone(), @@ -391,6 +409,77 @@ impl CoinStore { } } +/// Struct that represents an account FungibleStore resource for tests. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct FungibleStore { + pub owner: AccountAddress, + pub metadata: AccountAddress, + pub balance: u64, + pub frozen: bool, + pub concurrent_balance: bool, +} + +impl FungibleStore { + pub fn new( + owner: AccountAddress, + metadata: AccountAddress, + balance: u64, + frozen: bool, + concurrent_balance: bool, + ) -> Self { + Self { + owner, + metadata, + balance, + frozen, + concurrent_balance, + } + } + + /// Retrieve the balance inside of this + pub fn balance(&self) -> u64 { + self.balance + } + + pub fn to_bytes(&self) -> Vec { + let primary_store_object_address = primary_apt_store(self.owner); + let mut object_group = ObjectGroupResource::default(); + object_group.insert( + ObjectCoreResource::struct_tag(), + bcs::to_bytes(&ObjectCoreResource::new( + self.owner, + false, + new_event_handle(0, primary_store_object_address), + )) + .unwrap(), + ); + object_group.insert( + FungibleStoreResource::struct_tag(), + bcs::to_bytes(&FungibleStoreResource::new( + self.metadata, + if self.concurrent_balance { + 0 + } else { + self.balance + }, + self.frozen, + )) + .unwrap(), + ); + if self.concurrent_balance { + object_group.insert( + ConcurrentFungibleBalanceResource::struct_tag(), + bcs::to_bytes(&ConcurrentFungibleBalanceResource::new(self.balance)).unwrap(), + ); + } + object_group.insert( + MigrationFlag::struct_tag(), + bcs::to_bytes(&MigrationFlag::default()).unwrap(), + ); + bcs::to_bytes(&object_group).unwrap() + } +} + //--------------------------------------------------------------------------- // Account resource representation //--------------------------------------------------------------------------- @@ -404,7 +493,8 @@ pub struct AccountData { sequence_number: u64, coin_register_events: EventHandle, key_rotation_events: EventHandle, - coin_store: CoinStore, + coin_store: Option, + fungible_store: Option, } fn new_event_handle(count: u64, address: AccountAddress) -> EventHandle { @@ -416,7 +506,7 @@ impl AccountData { /// /// This constructor is non-deterministic and should not be used against golden file. pub fn new(balance: u64, sequence_number: u64) -> Self { - Self::with_account(Account::new(), balance, sequence_number) + Self::with_account(Account::new(), balance, sequence_number, false, false) } pub fn increment_sequence_number(&mut self) { @@ -427,12 +517,33 @@ impl AccountData { /// /// Most tests will want to use this constructor. pub fn new_from_seed(seed: &mut KeyGen, balance: u64, sequence_number: u64) -> Self { - Self::with_account(Account::new_from_seed(seed), balance, sequence_number) + Self::with_account( + Account::new_from_seed(seed), + balance, + sequence_number, + false, + false, + ) } /// Creates a new `AccountData` with the provided account. - pub fn with_account(account: Account, balance: u64, sequence_number: u64) -> Self { - Self::with_account_and_event_counts(account, balance, sequence_number, 0, 0) + pub fn with_account( + account: Account, + balance: u64, + sequence_number: u64, + use_fa_apt: bool, + use_concurrent_balance: bool, + ) -> Self { + if use_fa_apt { + Self::with_account_and_fungible_store( + account, + balance, + sequence_number, + use_concurrent_balance, + ) + } else { + Self::with_account_and_event_counts(account, balance, sequence_number, 0, 0) + } } /// Creates a new `AccountData` with the provided account. @@ -443,7 +554,7 @@ impl AccountData { sequence_number: u64, ) -> Self { let account = Account::with_keypair(privkey, pubkey); - Self::with_account(account, balance, sequence_number) + Self::with_account(account, balance, sequence_number, false, false) } /// Creates a new `AccountData` with custom parameters. @@ -457,11 +568,36 @@ impl AccountData { let addr = *account.address(); Self { account, - coin_store: CoinStore::new( + coin_store: Some(CoinStore::new( balance, new_event_handle(received_events_count, addr), new_event_handle(sent_events_count, addr), - ), + )), + fungible_store: None, + sequence_number, + coin_register_events: new_event_handle(0, addr), + key_rotation_events: new_event_handle(1, addr), + } + } + + /// Creates a new `AccountData` with custom parameters. + pub fn with_account_and_fungible_store( + account: Account, + fungible_balance: u64, + sequence_number: u64, + use_concurrent_balance: bool, + ) -> Self { + let addr = *account.address(); + Self { + account, + coin_store: None, + fungible_store: Some(FungibleStore::new( + addr, + AccountAddress::TEN, + fungible_balance, + false, + use_concurrent_balance, + )), sequence_number, coin_register_events: new_event_handle(0, addr), key_rotation_events: new_event_handle(1, addr), @@ -501,16 +637,30 @@ impl AccountData { /// Creates a writeset that contains the account data and can be patched to the storage /// directly. pub fn to_writeset(&self) -> WriteSet { - let write_set = vec![ - ( - StateKey::resource_typed::(self.address()).unwrap(), - WriteOp::legacy_modification(self.to_bytes().into()), - ), - ( - StateKey::resource_typed::(self.address()).unwrap(), - WriteOp::legacy_modification(self.coin_store.to_bytes().into()), - ), - ]; + let mut write_set = vec![( + StateKey::resource_typed::(self.address()).unwrap(), + WriteOp::legacy_modification(self.to_bytes().into()), + )]; + + if let Some(coin_store) = &self.coin_store { + write_set.push(( + StateKey::resource_typed::>(self.address()) + .unwrap(), + WriteOp::legacy_modification(coin_store.to_bytes().into()), + )); + } + + if let Some(fungible_store) = &self.fungible_store { + let primary_store_object_address = primary_apt_store(*self.address()); + + write_set.push(( + StateKey::resource_group( + &primary_store_object_address, + &ObjectGroupResource::struct_tag(), + ), + WriteOp::legacy_modification(fungible_store.to_bytes().into()), + )); + } WriteSetMut::new(write_set).freeze().unwrap() } @@ -534,8 +684,12 @@ impl AccountData { } /// Returns the initial balance. - pub fn balance(&self) -> u64 { - self.coin_store.coin() + pub fn coin_balance(&self) -> Option { + self.coin_store.as_ref().map(CoinStore::coin) + } + + pub fn fungible_balance(&self) -> Option { + self.fungible_store.as_ref().map(FungibleStore::balance) } /// Returns the initial sequence number. @@ -545,21 +699,21 @@ impl AccountData { /// Returns the unique key for this sent events stream. pub fn sent_events_key(&self) -> &EventKey { - self.coin_store.withdraw_events.key() + self.coin_store.as_ref().unwrap().withdraw_events.key() } /// Returns the initial sent events count. pub fn sent_events_count(&self) -> u64 { - self.coin_store.withdraw_events.count() + self.coin_store.as_ref().unwrap().withdraw_events.count() } /// Returns the unique key for this received events stream. pub fn received_events_key(&self) -> &EventKey { - self.coin_store.deposit_events.key() + self.coin_store.as_ref().unwrap().deposit_events.key() } /// Returns the initial received events count. pub fn received_events_count(&self) -> u64 { - self.coin_store.deposit_events.count() + self.coin_store.as_ref().unwrap().deposit_events.count() } } diff --git a/aptos-move/e2e-tests/src/account_universe.rs b/aptos-move/e2e-tests/src/account_universe.rs index 2283c7392c8..a2ec9a1ab78 100644 --- a/aptos-move/e2e-tests/src/account_universe.rs +++ b/aptos-move/e2e-tests/src/account_universe.rs @@ -128,7 +128,7 @@ pub struct AccountCurrent { impl AccountCurrent { fn new(initial_data: AccountData) -> Self { - let balance = initial_data.balance(); + let balance = initial_data.coin_balance().unwrap(); let sequence_number = initial_data.sequence_number(); let sent_events_count = initial_data.sent_events_count(); let received_events_count = initial_data.received_events_count(); @@ -401,7 +401,7 @@ pub fn assert_accounts_match( .read_account_resource(account.account()) .expect("account resource must exist"); let coin_store_resource = executor - .read_coin_store_resource(account.account()) + .read_apt_coin_store_resource(account.account()) .expect("account balance resource must exist"); let auth_key = account.account().auth_key(); prop_assert_eq!( diff --git a/aptos-move/e2e-tests/src/account_universe/create_account.rs b/aptos-move/e2e-tests/src/account_universe/create_account.rs index c5a760fe056..db2b1f57228 100644 --- a/aptos-move/e2e-tests/src/account_universe/create_account.rs +++ b/aptos-move/e2e-tests/src/account_universe/create_account.rs @@ -59,6 +59,8 @@ impl AUTransactionGen for CreateAccountGen { self.new_account.clone(), self.amount, 0, + false, + false, )); } else { gas_used = 0; diff --git a/aptos-move/e2e-tests/src/data_store.rs b/aptos-move/e2e-tests/src/data_store.rs index 8013243ca36..b17d54afcf2 100644 --- a/aptos-move/e2e-tests/src/data_store.rs +++ b/aptos-move/e2e-tests/src/data_store.rs @@ -14,6 +14,7 @@ use aptos_types::{ }, transaction::ChangeSet, write_set::{TransactionWrite, WriteSet}, + AptosCoinType, }; use aptos_vm_genesis::{ generate_genesis_change_set_for_mainnet, generate_genesis_change_set_for_testing, @@ -105,7 +106,7 @@ impl FakeDataStore { /// Adds CoinInfo to this data store. pub fn add_coin_info(&mut self) { - let coin_info = CoinInfoResource::random(u128::MAX); + let coin_info = CoinInfoResource::::random(u128::MAX); let write_set = coin_info.to_writeset(0).expect("access path in test"); self.add_write_set(&write_set) } diff --git a/aptos-move/e2e-tests/src/executor.rs b/aptos-move/e2e-tests/src/executor.rs index 3ef32daeba3..018472fe9a9 100644 --- a/aptos-move/e2e-tests/src/executor.rs +++ b/aptos-move/e2e-tests/src/executor.rs @@ -24,8 +24,8 @@ use aptos_gas_schedule::{AptosGasParameters, InitialGasSchedule, LATEST_GAS_FEAT use aptos_keygen::KeyGen; use aptos_types::{ account_config::{ - new_block_event_key, AccountResource, CoinInfoResource, CoinStoreResource, NewBlockEvent, - CORE_CODE_ADDRESS, + new_block_event_key, AccountResource, CoinInfoResource, CoinStoreResource, + ConcurrentSupply, NewBlockEvent, ObjectGroupResource, CORE_CODE_ADDRESS, }, block_executor::config::{ BlockExecutorConfig, BlockExecutorConfigFromOnchain, BlockExecutorLocalConfig, @@ -34,7 +34,7 @@ use aptos_types::{ chain_id::ChainId, contract_event::ContractEvent, move_utils::MemberId, - on_chain_config::{AptosVersion, OnChainConfig, ValidatorSet}, + on_chain_config::{AptosVersion, FeatureFlag, Features, OnChainConfig, ValidatorSet}, state_store::{state_key::StateKey, state_value::StateValue, StateView, TStateView}, transaction::{ signature_verified_transaction::{ @@ -44,7 +44,8 @@ use aptos_types::{ TransactionPayload, TransactionStatus, VMValidatorResult, ViewFunctionOutput, }, vm_status::VMStatus, - write_set::WriteSet, + write_set::{WriteOp, WriteSet, WriteSetMut}, + AptosCoinType, CoinType, }; use aptos_vm::{ block_executor::{AptosTransactionOutput, BlockAptosVM}, @@ -63,14 +64,14 @@ use bytes::Bytes; use move_core_types::{ account_address::AccountAddress, identifier::Identifier, - language_storage::{ModuleId, TypeTag}, - move_resource::MoveResource, + language_storage::{ModuleId, StructTag, TypeTag}, + move_resource::{MoveResource, MoveStructType}, }; use move_vm_runtime::module_traversal::{TraversalContext, TraversalStorage}; use move_vm_types::gas::UnmeteredGasMeter; use serde::Serialize; use std::{ - collections::BTreeSet, + collections::{BTreeMap, BTreeSet}, env, fs::{self, OpenOptions}, io::Write, @@ -367,8 +368,30 @@ impl FakeExecutor { pub fn new_account_data_at(&mut self, addr: AccountAddress) -> AccountData { // The below will use the genesis keypair but that should be fine. let acc = Account::new_genesis_account(addr); + // Mint the account 10M Aptos coins (with 8 decimals). - let data = AccountData::with_account(acc, 1_000_000_000_000_000, 0); + self.store_and_fund_account(acc, 1_000_000_000_000_000, 0) + } + + pub fn store_and_fund_account( + &mut self, + account: Account, + balance: u64, + seq_num: u64, + ) -> AccountData { + let features = Features::fetch_config(&self.data_store).unwrap_or_default(); + let use_fa_balance = features.is_enabled(FeatureFlag::NEW_ACCOUNTS_DEFAULT_TO_FA_APT_STORE); + let use_concurrent_balance = + features.is_enabled(FeatureFlag::DEFAULT_TO_CONCURRENT_FUNGIBLE_BALANCE); + + // Mint the account 10M Aptos coins (with 8 decimals). + let data = AccountData::with_account( + account, + balance, + seq_num, + use_fa_balance, + use_concurrent_balance, + ); self.add_account_data(&data); data } @@ -385,20 +408,60 @@ impl FakeExecutor { /// Adds an account to this executor's data store. pub fn add_account_data(&mut self, account_data: &AccountData) { self.data_store.add_account_data(account_data); - let new_added_supply = account_data.balance(); // When a new account data with balance is initialized. The total_supply should be updated // correspondingly to be consistent with the global state. // if new_added_supply = 0, it is a noop. - if new_added_supply != 0 { - let coin_info_resource = self - .read_coin_info_resource() - .expect("coin info must exist in data store"); - let old_supply = self.read_coin_supply().unwrap(); - self.data_store.add_write_set( - &coin_info_resource - .to_writeset(old_supply + (new_added_supply as u128)) + + if let Some(new_added_supply) = account_data.coin_balance() { + if new_added_supply != 0 { + let coin_info_resource = self + .read_apt_coin_info_resource() + .expect("coin info must exist in data store"); + let old_supply = self.read_coin_supply().unwrap(); + self.data_store.add_write_set( + &coin_info_resource + .to_writeset(old_supply + (new_added_supply as u128)) + .unwrap(), + ) + } + } + + if let Some(new_added_supply) = account_data.fungible_balance() { + if new_added_supply != 0 { + let mut fa_resource_group = self + .read_resource_group::(&AccountAddress::TEN) + .expect("resource group must exist in data store"); + let mut supply = bcs::from_bytes::( + fa_resource_group + .group + .get(&ConcurrentSupply::struct_tag()) + .unwrap(), + ) + .unwrap(); + supply + .current + .set(supply.current.get() + new_added_supply as u128); + fa_resource_group + .group + .insert( + ConcurrentSupply::struct_tag(), + bcs::to_bytes(&supply).unwrap(), + ) + .unwrap(); + self.data_store.add_write_set( + &WriteSetMut::new(vec![( + StateKey::resource_group( + &AccountAddress::TEN, + &ObjectGroupResource::struct_tag(), + ), + WriteOp::legacy_modification( + bcs::to_bytes(&fa_resource_group).unwrap().into(), + ), + )]) + .freeze() .unwrap(), - ) + ) + } } } @@ -429,6 +492,37 @@ impl FakeExecutor { bcs::from_bytes(&data_blob).ok() } + pub fn read_resource_group(&self, addr: &AccountAddress) -> Option { + let data_blob = TStateView::get_state_value_bytes( + &self.data_store, + &StateKey::resource_group(addr, &T::struct_tag()), + ) + .expect("account must exist in data store") + .unwrap_or_else(|| panic!("Can't fetch {} resource group for {}", T::STRUCT_NAME, addr)); + bcs::from_bytes(&data_blob).ok() + } + + pub fn read_resource_from_group( + &self, + addr: &AccountAddress, + resource_group_tag: &StructTag, + ) -> Option { + let bytes_opt = TStateView::get_state_value_bytes( + &self.data_store, + &StateKey::resource_group(addr, resource_group_tag), + ) + .expect("account must exist in data store"); + + let group: Option>> = bytes_opt + .map(|bytes| bcs::from_bytes(&bytes)) + .transpose() + .unwrap(); + group + .and_then(|g| g.get(&T::struct_tag()).map(|b| bcs::from_bytes(b))) + .transpose() + .unwrap() + } + /// Reads the resource `Value` for an account under the given address from /// this executor's data store. pub fn read_account_resource_at_address( @@ -439,8 +533,11 @@ impl FakeExecutor { } /// Reads the CoinStore resource value for an account from this executor's data store. - pub fn read_coin_store_resource(&self, account: &Account) -> Option { - self.read_coin_store_resource_at_address(account.address()) + pub fn read_apt_coin_store_resource( + &self, + account: &Account, + ) -> Option> { + self.read_apt_coin_store_resource_at_address(account.address()) } /// Reads supply from CoinInfo resource value from this executor's data store. @@ -462,16 +559,16 @@ impl FakeExecutor { } /// Reads the CoinInfo resource value from this executor's data store. - pub fn read_coin_info_resource(&self) -> Option { - self.read_resource(&AccountAddress::ONE) + pub fn read_apt_coin_info_resource(&self) -> Option> { + self.read_resource(&AptosCoinType::coin_info_address()) } /// Reads the CoinStore resource value for an account under the given address from this executor's /// data store. - pub fn read_coin_store_resource_at_address( + pub fn read_apt_coin_store_resource_at_address( &self, addr: &AccountAddress, - ) -> Option { + ) -> Option> { self.read_resource(addr) } diff --git a/aptos-move/e2e-testsuite/src/tests/create_account.rs b/aptos-move/e2e-testsuite/src/tests/create_account.rs index 0a16e65d97c..0d2a48056c5 100644 --- a/aptos-move/e2e-testsuite/src/tests/create_account.rs +++ b/aptos-move/e2e-testsuite/src/tests/create_account.rs @@ -35,7 +35,7 @@ fn create_account() { .expect("sender must exist"); let updated_receiver_balance = executor - .read_coin_store_resource(&new_account) + .read_apt_coin_store_resource(&new_account) .expect("receiver balance must exist"); assert_eq!(initial_amount, updated_receiver_balance.coin()); assert_eq!(1, updated_sender.sequence_number()); diff --git a/aptos-move/e2e-testsuite/src/tests/peer_to_peer.rs b/aptos-move/e2e-testsuite/src/tests/peer_to_peer.rs index dfb5884450d..bee9a68986d 100644 --- a/aptos-move/e2e-testsuite/src/tests/peer_to_peer.rs +++ b/aptos-move/e2e-testsuite/src/tests/peer_to_peer.rs @@ -40,10 +40,10 @@ fn single_peer_to_peer_with_event() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); let updated_receiver_balance = executor - .read_coin_store_resource(receiver.account()) + .read_apt_coin_store_resource(receiver.account()) .expect("receiver balance must exist"); assert_eq!(receiver_balance, updated_receiver_balance.coin()); assert_eq!(sender_balance, updated_sender_balance.coin()); @@ -102,10 +102,10 @@ fn few_peer_to_peer_with_event() { } let original_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); let original_receiver_balance = executor - .read_coin_store_resource(receiver.account()) + .read_apt_coin_store_resource(receiver.account()) .expect("receiver balcne must exist"); executor.apply_write_set(txn_output.write_set()); @@ -116,10 +116,10 @@ fn few_peer_to_peer_with_event() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); let updated_receiver_balance = executor - .read_coin_store_resource(receiver.account()) + .read_apt_coin_store_resource(receiver.account()) .expect("receiver balance must exist"); assert_eq!(receiver_balance, updated_receiver_balance.coin()); assert_eq!(sender_balance, updated_sender_balance.coin()); @@ -249,12 +249,12 @@ pub(crate) fn check_and_apply_transfer_output( .read_account_resource(sender) .expect("sender must exist"); let sender_balance = executor - .read_coin_store_resource(sender) + .read_apt_coin_store_resource(sender) .expect("sender balance must exist"); let sender_initial_balance = sender_balance.coin(); let sender_seq_num = sender_resource.sequence_number(); let receiver_initial_balance = executor - .read_coin_store_resource(receiver) + .read_apt_coin_store_resource(receiver) .expect("receiver balance must exist") .coin(); @@ -269,10 +269,10 @@ pub(crate) fn check_and_apply_transfer_output( .read_account_resource(sender) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender) + .read_apt_coin_store_resource(sender) .expect("sender balance must exist"); let updated_receiver_balance = executor - .read_coin_store_resource(receiver) + .read_apt_coin_store_resource(receiver) .expect("receiver balance must exist"); assert_eq!(receiver_balance, updated_receiver_balance.coin()); assert_eq!(sender_balance, updated_sender_balance.coin()); diff --git a/aptos-move/e2e-testsuite/src/tests/scripts.rs b/aptos-move/e2e-testsuite/src/tests/scripts.rs index 881f35dd674..413b7c845fb 100644 --- a/aptos-move/e2e-testsuite/src/tests/scripts.rs +++ b/aptos-move/e2e-testsuite/src/tests/scripts.rs @@ -63,7 +63,7 @@ fn script_code_unverifiable() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); @@ -142,7 +142,7 @@ fn script_none_existing_module_dep() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); @@ -221,7 +221,7 @@ fn script_non_existing_function_dep() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); @@ -301,7 +301,7 @@ fn script_bad_sig_function_dep() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); @@ -367,7 +367,7 @@ fn script_type_argument_module_does_not_exist() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); @@ -435,7 +435,7 @@ fn script_nested_type_argument_module_does_not_exist() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); @@ -516,7 +516,7 @@ fn forbid_script_emitting_events() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); diff --git a/aptos-move/framework/README.md b/aptos-move/framework/README.md index d308719bb50..31f6e61e81b 100644 --- a/aptos-move/framework/README.md +++ b/aptos-move/framework/README.md @@ -79,7 +79,7 @@ The overall structure of the Aptos Framework is as follows: ├── aptos-token # Sources, testing and generated documentation for Aptos token component ├── aptos-stdlib # Sources, testing and generated documentation for Aptos stdlib component ├── move-stdlib # Sources, testing and generated documentation for Move stdlib component -├── cached-packages # Tooling to generate SDK from mvoe sources. +├── cached-packages # Tooling to generate SDK from move sources. ├── src # Compilation and generation of information from Move source files in the Aptos Framework. Not designed to be used as a Rust library ├── releases # Move release bundles └── tests diff --git a/aptos-move/framework/aptos-framework/doc/account.md b/aptos-move/framework/aptos-framework/doc/account.md index 276d6bd47b6..ce49d6226cc 100644 --- a/aptos-move/framework/aptos-framework/doc/account.md +++ b/aptos-move/framework/aptos-framework/doc/account.md @@ -1542,7 +1542,7 @@ Revoke the rotation capability offer given to to_be_revoked_recipient_addr
public entry fun revoke_rotation_capability(account: &signer, to_be_revoked_address: address) acquires Account {
     assert!(exists_at(to_be_revoked_address), error::not_found(EACCOUNT_DOES_NOT_EXIST));
     let addr = signer::address_of(account);
-    let account_resource = borrow_global_mut<Account>(addr);
+    let account_resource = borrow_global<Account>(addr);
     assert!(
         option::contains(&account_resource.rotation_capability_offer.for, &to_be_revoked_address),
         error::not_found(ENO_SUCH_ROTATION_CAPABILITY_OFFER)
@@ -1712,7 +1712,7 @@ has a signer capability offer from accoun
 
public entry fun revoke_signer_capability(account: &signer, to_be_revoked_address: address) acquires Account {
     assert!(exists_at(to_be_revoked_address), error::not_found(EACCOUNT_DOES_NOT_EXIST));
     let addr = signer::address_of(account);
-    let account_resource = borrow_global_mut<Account>(addr);
+    let account_resource = borrow_global<Account>(addr);
     assert!(
         option::contains(&account_resource.signer_capability_offer.for, &to_be_revoked_address),
         error::not_found(ENO_SUCH_SIGNER_CAPABILITY)
@@ -2194,7 +2194,7 @@ Capability based functions for efficient use.
     signed_message_bytes: vector<u8>,
     message: T,
 ) acquires Account {
-    let account_resource = borrow_global_mut<Account>(account);
+    let account_resource = borrow_global<Account>(account);
     // Verify that the `SignerCapabilityOfferProofChallengeV2` has the right information and is signed by the account owner's key
     if (account_scheme == ED25519_SCHEME) {
         let pubkey = ed25519::new_unvalidated_public_key_from_bytes(account_public_key);
@@ -3151,6 +3151,7 @@ The value of signer_capability_offer.for of Account resource under the signer is
 pragma aborts_if_is_strict = false;
 aborts_if [abstract] false;
 ensures [abstract] result == spec_create_resource_address(source, seed);
+ensures [abstract] source != result;
 
diff --git a/aptos-move/framework/aptos-framework/doc/aptos_account.md b/aptos-move/framework/aptos-framework/doc/aptos_account.md index 33cd0ff87e5..82fd7df7b7f 100644 --- a/aptos-move/framework/aptos-framework/doc/aptos_account.md +++ b/aptos-move/framework/aptos-framework/doc/aptos_account.md @@ -879,10 +879,6 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to let account_addr_source = signer::address_of(source); let coin_store_source = global<coin::CoinStore<AptosCoin>>(account_addr_source); let balance_source = coin_store_source.coin.value; -requires forall i in 0..len(recipients): - recipients[i] != account_addr_source; -requires exists i in 0..len(recipients): - amounts[i] > 0; aborts_if len(recipients) != len(amounts); aborts_if exists i in 0..len(recipients): !account::exists_at(recipients[i]) && length_judgment(recipients[i]); @@ -920,7 +916,6 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to
pragma verify = false;
 let account_addr_source = signer::address_of(source);
-requires account_addr_source != to;
 include CreateAccountTransferAbortsIf;
 include GuidAbortsIf<AptosCoin>;
 include WithdrawAbortsIf<AptosCoin>{from: source};
@@ -948,10 +943,6 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to
 let account_addr_source = signer::address_of(from);
 let coin_store_source = global<coin::CoinStore<CoinType>>(account_addr_source);
 let balance_source = coin_store_source.coin.value;
-requires forall i in 0..len(recipients):
-    recipients[i] != account_addr_source;
-requires exists i in 0..len(recipients):
-    amounts[i] > 0;
 // This enforces high-level requirement 7:
 aborts_if len(recipients) != len(amounts);
 aborts_if exists i in 0..len(recipients):
@@ -992,7 +983,6 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to
 
 
pragma verify = false;
 let account_addr_source = signer::address_of(from);
-requires account_addr_source != to;
 include CreateAccountTransferAbortsIf;
 include WithdrawAbortsIf<CoinType>;
 include GuidAbortsIf<CoinType>;
diff --git a/aptos-move/framework/aptos-framework/doc/coin.md b/aptos-move/framework/aptos-framework/doc/coin.md
index 07e97171f99..f43569cd078 100644
--- a/aptos-move/framework/aptos-framework/doc/coin.md
+++ b/aptos-move/framework/aptos-framework/doc/coin.md
@@ -1963,7 +1963,7 @@ Return the BurnRef with the hot potato receipt.
     let metadata = assert_paired_metadata_exists<CoinType>();
     let metadata_addr = object_address(&metadata);
     assert!(exists<PairedFungibleAssetRefs>(metadata_addr), error::internal(EPAIRED_FUNGIBLE_ASSET_REFS_NOT_FOUND));
-    let burn_ref_opt = &mut borrow_global_mut<PairedFungibleAssetRefs>(metadata_addr).burn_ref_opt;
+    let burn_ref_opt = &borrow_global<PairedFungibleAssetRefs>(metadata_addr).burn_ref_opt;
     assert!(option::is_some(burn_ref_opt), error::not_found(EBURN_REF_NOT_FOUND));
     option::borrow(burn_ref_opt)
 }
@@ -3100,7 +3100,7 @@ available.
 
     // Can only succeed once on-chain governance agreed on the upgrade.
     assert!(
-        borrow_global_mut<SupplyConfig>(@aptos_framework).allow_upgrades,
+        borrow_global<SupplyConfig>(@aptos_framework).allow_upgrades,
         error::permission_denied(ECOIN_SUPPLY_UPGRADE_NOT_SUPPORTED)
     );
 
diff --git a/aptos-move/framework/aptos-framework/doc/fungible_asset.md b/aptos-move/framework/aptos-framework/doc/fungible_asset.md
index f12ee14942b..17ee3056667 100644
--- a/aptos-move/framework/aptos-framework/doc/fungible_asset.md
+++ b/aptos-move/framework/aptos-framework/doc/fungible_asset.md
@@ -3170,19 +3170,29 @@ Mutate specified fields of the fungible asset's Metadata>(metadata_address);
 
     if (option::is_some(&name)){
-        mutable_metadata.name = option::extract(&mut name);
+        let name = option::extract(&mut name);
+        assert!(string::length(&name) <= MAX_NAME_LENGTH, error::out_of_range(ENAME_TOO_LONG));
+        mutable_metadata.name = name;
     };
     if (option::is_some(&symbol)){
-        mutable_metadata.symbol = option::extract(&mut symbol);
+        let symbol = option::extract(&mut symbol);
+        assert!(string::length(&symbol) <= MAX_SYMBOL_LENGTH, error::out_of_range(ESYMBOL_TOO_LONG));
+        mutable_metadata.symbol = symbol;
     };
     if (option::is_some(&decimals)){
-        mutable_metadata.decimals = option::extract(&mut decimals);
+        let decimals = option::extract(&mut decimals);
+        assert!(decimals <= MAX_DECIMALS, error::out_of_range(EDECIMALS_TOO_LARGE));
+        mutable_metadata.decimals = decimals;
     };
     if (option::is_some(&icon_uri)){
-        mutable_metadata.icon_uri = option::extract(&mut icon_uri);
+        let icon_uri = option::extract(&mut icon_uri);
+        assert!(string::length(&icon_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+        mutable_metadata.icon_uri = icon_uri;
     };
     if (option::is_some(&project_uri)){
-        mutable_metadata.project_uri = option::extract(&mut project_uri);
+        let project_uri = option::extract(&mut project_uri);
+        assert!(string::length(&project_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+        mutable_metadata.project_uri = project_uri;
     };
 }
 
diff --git a/aptos-move/framework/aptos-framework/doc/jwks.md b/aptos-move/framework/aptos-framework/doc/jwks.md index 2f3e0f34e30..5a0b02e22ff 100644 --- a/aptos-move/framework/aptos-framework/doc/jwks.md +++ b/aptos-move/framework/aptos-framework/doc/jwks.md @@ -30,6 +30,7 @@ have a simple layout which is easily accessible in Rust. - [Resource `FederatedJWKs`](#0x1_jwks_FederatedJWKs) - [Constants](#@Constants_0) - [Function `patch_federated_jwks`](#0x1_jwks_patch_federated_jwks) +- [Function `update_federated_jwk_set`](#0x1_jwks_update_federated_jwk_set) - [Function `get_patched_jwk`](#0x1_jwks_get_patched_jwk) - [Function `try_get_patched_jwk`](#0x1_jwks_try_get_patched_jwk) - [Function `upsert_oidc_provider`](#0x1_jwks_upsert_oidc_provider) @@ -660,6 +661,15 @@ JWKs for federated keyless accounts are stored in this resource. + + + + +
const EINVALID_FEDERATED_JWK_SET: u64 = 9;
+
+ + + @@ -775,11 +785,8 @@ Note: If too large, validators waste work reading it for invalid TXN signatures. ## Function `patch_federated_jwks` Called by a federated keyless dapp owner to install the JWKs for the federated OIDC provider (e.g., Auth0, AWS -Cognito, etc). - -For type-safety, we explicitly use a struct FederatedJWKs { jwks: AllProviderJWKs } instead of -reusing PatchedJWKs { jwks: AllProviderJWKs }, which is a JWK-consensus-specific struct. We'd -need to be careful how we read it in Rust (but BCS serialization should be the same). +Cognito, etc). For type-safety, we explicitly use a struct FederatedJWKs { jwks: AllProviderJWKs } instead of +reusing PatchedJWKs { jwks: AllProviderJWKs }, which is a JWK-consensus-specific struct.
public fun patch_federated_jwks(jwk_owner: &signer, patches: vector<jwks::Patch>)
@@ -816,6 +823,100 @@ need to be careful how we read it in Rust (but BCS serialization should be the s
 
 
 
+
+
+
+
+## Function `update_federated_jwk_set`
+
+This can be called to install or update a set of JWKs for a federated OIDC provider.  This function should
+be invoked to intially install a set of JWKs or to update a set of JWKs when a keypair is rotated.
+
+The iss parameter is the value of the iss claim on the JWTs that are to be verified by the JWK set.
+kid_vec, alg_vec, e_vec, n_vec are String vectors of the JWK attributes kid, alg, e and n respectively.
+See https://datatracker.ietf.org/doc/html/rfc7517#section-4 for more details about the JWK attributes aforementioned.
+
+For the example JWK set snapshot below containing 2 keys for Google found at https://www.googleapis.com/oauth2/v3/certs -
+```json
+{
+"keys": [
+{
+"alg": "RS256",
+"use": "sig",
+"kty": "RSA",
+"n": "wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw",
+"kid": "d7b939771a7800c413f90051012d975981916d71",
+"e": "AQAB"
+},
+{
+"kty": "RSA",
+"kid": "b2620d5e7f132b52afe8875cdf3776c064249d04",
+"alg": "RS256",
+"n": "pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w",
+"e": "AQAB",
+"use": "sig"
+}
+]
+}
+```
+
+We can call update_federated_jwk_set for Google's iss - "https://accounts.google.com" and for each vector
+argument kid_vec, alg_vec, e_vec, n_vec, we set in index 0 the corresponding attribute in the first JWK and we set in index 1 the
+the corresponding attribute in the second JWK as shown below.
+
+```move
+use std::string::utf8;
+aptos_framework::jwks::update_federated_jwk_set(
+jwk_owner,
+b"https://accounts.google.com",
+vector[utf8(b"d7b939771a7800c413f90051012d975981916d71"), utf8(b"b2620d5e7f132b52afe8875cdf3776c064249d04")],
+vector[utf8(b"RS256"), utf8(b"RS256")],
+vector[utf8(b"AQAB"), utf8(b"AQAB")],
+vector[
+utf8(b"wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw"),
+utf8(b"pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w")
+]
+)
+```
+
+See AIP-96 for more details about federated keyless - https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-96.md
+
+NOTE: Currently only RSA keys are supported.
+
+
+
public entry fun update_federated_jwk_set(jwk_owner: &signer, iss: vector<u8>, kid_vec: vector<string::String>, alg_vec: vector<string::String>, e_vec: vector<string::String>, n_vec: vector<string::String>)
+
+ + + +
+Implementation + + +
public entry fun update_federated_jwk_set(jwk_owner: &signer, iss: vector<u8>, kid_vec: vector<String>, alg_vec: vector<String>, e_vec: vector<String>, n_vec: vector<String>) acquires FederatedJWKs {
+    assert!(!vector::is_empty(&kid_vec), error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+    let num_jwk = vector::length<String>(&kid_vec);
+    assert!(vector::length(&alg_vec) == num_jwk , error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+    assert!(vector::length(&e_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+    assert!(vector::length(&n_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+
+    let remove_all_patch = new_patch_remove_all();
+    let patches = vector[remove_all_patch];
+    while (!vector::is_empty(&kid_vec)) {
+        let kid = vector::pop_back(&mut kid_vec);
+        let alg = vector::pop_back(&mut alg_vec);
+        let e = vector::pop_back(&mut e_vec);
+        let n = vector::pop_back(&mut n_vec);
+        let jwk = new_rsa_jwk(kid, alg, e, n);
+        let patch = new_patch_upsert_jwk(iss, jwk);
+        vector::push_back(&mut patches, patch)
+    };
+    patch_federated_jwks(jwk_owner, patches);
+}
+
+ + +
@@ -937,7 +1038,7 @@ aptos_framework::aptos_governance::reconfigure(&framework_signer); let provider_set = if (config_buffer::does_exist<SupportedOIDCProviders>()) { config_buffer::extract<SupportedOIDCProviders>() } else { - *borrow_global_mut<SupportedOIDCProviders>(@aptos_framework) + *borrow_global<SupportedOIDCProviders>(@aptos_framework) }; let old_config_url = remove_oidc_provider_internal(&mut provider_set, name); @@ -1012,7 +1113,7 @@ aptos_framework::aptos_governance::reconfigure(&framework_signer); let provider_set = if (config_buffer::does_exist<SupportedOIDCProviders>()) { config_buffer::extract<SupportedOIDCProviders>() } else { - *borrow_global_mut<SupportedOIDCProviders>(@aptos_framework) + *borrow_global<SupportedOIDCProviders>(@aptos_framework) }; let ret = remove_oidc_provider_internal(&mut provider_set, name); config_buffer::upsert(provider_set); diff --git a/aptos-move/framework/aptos-framework/doc/multisig_account.md b/aptos-move/framework/aptos-framework/doc/multisig_account.md index a5fcc8ab9b8..74dca99c66d 100644 --- a/aptos-move/framework/aptos-framework/doc/multisig_account.md +++ b/aptos-move/framework/aptos-framework/doc/multisig_account.md @@ -82,7 +82,9 @@ and implement the governance voting logic on top. - [Function `next_sequence_number`](#0x1_multisig_account_next_sequence_number) - [Function `vote`](#0x1_multisig_account_vote) - [Function `available_transaction_queue_capacity`](#0x1_multisig_account_available_transaction_queue_capacity) +- [Function `create_with_existing_account_call`](#0x1_multisig_account_create_with_existing_account_call) - [Function `create_with_existing_account`](#0x1_multisig_account_create_with_existing_account) +- [Function `create_with_existing_account_and_revoke_auth_key_call`](#0x1_multisig_account_create_with_existing_account_and_revoke_auth_key_call) - [Function `create_with_existing_account_and_revoke_auth_key`](#0x1_multisig_account_create_with_existing_account_and_revoke_auth_key) - [Function `create`](#0x1_multisig_account_create) - [Function `create_with_owners`](#0x1_multisig_account_create_with_owners) @@ -1831,7 +1833,7 @@ Return the id of the last transaction that was executed (successful or failed) o
public fun last_resolved_sequence_number(multisig_account: address): u64 acquires MultisigAccount {
-    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
     multisig_account_resource.last_executed_sequence_number
 }
 
@@ -1858,7 +1860,7 @@ Return the id of the next transaction created.
public fun next_sequence_number(multisig_account: address): u64 acquires MultisigAccount {
-    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
     multisig_account_resource.next_sequence_number
 }
 
@@ -1886,7 +1888,7 @@ Return a bool tuple indicating whether an owner has voted and if so, whether the
public fun vote(
     multisig_account: address, sequence_number: u64, owner: address): (bool, bool) acquires MultisigAccount {
-    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
     assert!(
         sequence_number > 0 && sequence_number < multisig_account_resource.next_sequence_number,
         error::invalid_argument(EINVALID_SEQUENCE_NUMBER),
@@ -1920,7 +1922,7 @@ Return a bool tuple indicating whether an owner has voted and if so, whether the
 
 
 
public fun available_transaction_queue_capacity(multisig_account: address): u64 acquires MultisigAccount {
-    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
     let num_pending_transactions = multisig_account_resource.next_sequence_number - multisig_account_resource.last_executed_sequence_number - 1;
     if (num_pending_transactions > MAX_PENDING_TRANSACTIONS) {
         0
@@ -1932,6 +1934,50 @@ Return a bool tuple indicating whether an owner has voted and if so, whether the
 
 
 
+
+
+
+
+## Function `create_with_existing_account_call`
+
+Private entry function that creates a new multisig account on top of an existing account.
+
+This offers a migration path for an existing account with any type of auth key.
+
+Note that this does not revoke auth key-based control over the account. Owners should separately rotate the auth
+key after they are fully migrated to the new multisig account. Alternatively, they can call
+create_with_existing_account_and_revoke_auth_key_call instead.
+
+
+
entry fun create_with_existing_account_call(multisig_account: &signer, owners: vector<address>, num_signatures_required: u64, metadata_keys: vector<string::String>, metadata_values: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
entry fun create_with_existing_account_call(
+    multisig_account: &signer,
+    owners: vector<address>,
+    num_signatures_required: u64,
+    metadata_keys: vector<String>,
+    metadata_values: vector<vector<u8>>,
+) acquires MultisigAccount {
+    create_with_owners_internal(
+        multisig_account,
+        owners,
+        num_signatures_required,
+        option::none<SignerCapability>(),
+        metadata_keys,
+        metadata_values,
+    );
+}
+
+ + +
@@ -2002,6 +2048,61 @@ create_with_existing_account_and_revoke_auth_key instead. + + + + +## Function `create_with_existing_account_and_revoke_auth_key_call` + +Private entry function that creates a new multisig account on top of an existing account and immediately rotate +the origin auth key to 0x0. + +Note: If the original account is a resource account, this does not revoke all control over it as if any +SignerCapability of the resource account still exists, it can still be used to generate the signer for the +account. + + +
entry fun create_with_existing_account_and_revoke_auth_key_call(multisig_account: &signer, owners: vector<address>, num_signatures_required: u64, metadata_keys: vector<string::String>, metadata_values: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
entry fun create_with_existing_account_and_revoke_auth_key_call(
+    multisig_account: &signer,
+    owners: vector<address>,
+    num_signatures_required: u64,
+    metadata_keys: vector<String>,
+    metadata_values:vector<vector<u8>>,
+) acquires MultisigAccount {
+    create_with_owners_internal(
+        multisig_account,
+        owners,
+        num_signatures_required,
+        option::none<SignerCapability>(),
+        metadata_keys,
+        metadata_values,
+    );
+
+    // Rotate the account's auth key to 0x0, which effectively revokes control via auth key.
+    let multisig_address = address_of(multisig_account);
+    account::rotate_authentication_key_internal(multisig_account, ZERO_AUTH_KEY);
+    // This also needs to revoke any signer capability or rotation capability that exists for the account to
+    // completely remove all access to the account.
+    if (account::is_signer_capability_offered(multisig_address)) {
+        account::revoke_any_signer_capability(multisig_account);
+    };
+    if (account::is_rotation_capability_offered(multisig_address)) {
+        account::revoke_any_rotation_capability(multisig_account);
+    };
+}
+
+ + +
diff --git a/aptos-move/framework/aptos-framework/doc/object.md b/aptos-move/framework/aptos-framework/doc/object.md index bba128592ff..f3dae60d94a 100644 --- a/aptos-move/framework/aptos-framework/doc/object.md +++ b/aptos-move/framework/aptos-framework/doc/object.md @@ -604,6 +604,16 @@ generate_unique_address uses this for domain separation within its native implem + + +Objects cannot be burnt + + +
const EBURN_NOT_ALLOWED: u64 = 10;
+
+ + + The object does not allow for deletion @@ -2130,12 +2140,13 @@ objects may have cyclic dependencies. ## Function `burn` -Forcefully transfer an unwanted object to BURN_ADDRESS, ignoring whether ungated_transfer is allowed. -This only works for objects directly owned and for simplicity does not apply to indirectly owned objects. -Original owners can reclaim burnt objects any time in the future by calling unburn. +Previously allowed to burn objects, has now been disabled. Objects can still be unburnt. +Please use the test only [object::burn_object] for testing with previously burned objects. -
public entry fun burn<T: key>(owner: &signer, object: object::Object<T>)
+
+
#[deprecated]
+public entry fun burn<T: key>(_owner: &signer, _object: object::Object<T>)
 
@@ -2144,12 +2155,8 @@ Original owners can reclaim burnt objects any time in the future by calling unbu Implementation -
public entry fun burn<T: key>(owner: &signer, object: Object<T>) acquires ObjectCore {
-    let original_owner = signer::address_of(owner);
-    assert!(is_owner(object, original_owner), error::permission_denied(ENOT_OBJECT_OWNER));
-    let object_addr = object.inner;
-    move_to(&create_signer(object_addr), TombStone { original_owner });
-    transfer_raw_inner(object_addr, BURN_ADDRESS);
+
public entry fun burn<T: key>(_owner: &signer, _object: Object<T>) {
+    abort error::permission_denied(EBURN_NOT_ALLOWED)
 }
 
@@ -2441,6 +2448,33 @@ to determine the identity of the starting point of ownership. + + + + +
fun spec_create_object_address(source: address, seed: vector<u8>): address;
+
+ + + + + + + +
fun spec_create_user_derived_object_address(source: address, derive_from: address): address;
+
+ + + + + + + +
fun spec_create_guid_object_address(source: address, creation_num: u64): address;
+
+ + + ### Function `address_to_object` @@ -3245,17 +3279,14 @@ to determine the identity of the starting point of ownership. ### Function `burn` -
public entry fun burn<T: key>(owner: &signer, object: object::Object<T>)
+
#[deprecated]
+public entry fun burn<T: key>(_owner: &signer, _object: object::Object<T>)
 
-
pragma aborts_if_is_partial;
-let object_address = object.inner;
-aborts_if !exists<ObjectCore>(object_address);
-aborts_if owner(object) != signer::address_of(owner);
-aborts_if is_burnt(object);
+
aborts_if true;
 
@@ -3368,31 +3399,4 @@ to determine the identity of the starting point of ownership.
- - - - - -
fun spec_create_object_address(source: address, seed: vector<u8>): address;
-
- - - - - - - -
fun spec_create_user_derived_object_address(source: address, derive_from: address): address;
-
- - - - - - - -
fun spec_create_guid_object_address(source: address, creation_num: u64): address;
-
- - [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/resource_account.md b/aptos-move/framework/aptos-framework/doc/resource_account.md index fa171c0b03c..318d15a785d 100644 --- a/aptos-move/framework/aptos-framework/doc/resource_account.md +++ b/aptos-move/framework/aptos-framework/doc/resource_account.md @@ -593,7 +593,6 @@ the SignerCapability. let container = global<Container>(source_addr); let get = len(optional_auth_key) == 0; let account = global<account::Account>(source_addr); - requires source_addr != resource_addr; aborts_if len(ZERO_AUTH_KEY) != 32; include account::exists_at(resource_addr) ==> account::CreateResourceAccountAbortsIf; include !account::exists_at(resource_addr) ==> account::CreateAccountAbortsIf {addr: resource_addr}; diff --git a/aptos-move/framework/aptos-framework/doc/stake.md b/aptos-move/framework/aptos-framework/doc/stake.md index 5a199ef91f6..017d83723ad 100644 --- a/aptos-move/framework/aptos-framework/doc/stake.md +++ b/aptos-move/framework/aptos-framework/doc/stake.md @@ -2501,7 +2501,7 @@ Initialize the validator account and give ownership to the signing account. fullnode_addresses: vector<u8>, ) acquires AllowedValidators { // Checks the public key has a valid proof-of-possession to prevent rogue-key attacks. - let pubkey_from_pop = &mut bls12381::public_key_from_bytes_with_pop( + let pubkey_from_pop = &bls12381::public_key_from_bytes_with_pop( consensus_pubkey, &proof_of_possession_from_bytes(proof_of_possession) ); @@ -2843,7 +2843,7 @@ Add coins into pool_address. this requires the corresp // Only track and validate voting power increase for active and pending_active validator. // Pending_inactive validator will be removed from the validator set in the next epoch. // Inactive validator's total stake will be tracked when they join the validator set. - let validator_set = borrow_global_mut<ValidatorSet>(@aptos_framework); + let validator_set = borrow_global<ValidatorSet>(@aptos_framework); // Search directly rather using get_validator_state to save on unnecessary loops. if (option::is_some(&find_validator(&validator_set.active_validators, pool_address)) || option::is_some(&find_validator(&validator_set.pending_active, pool_address))) { @@ -2999,7 +2999,7 @@ Rotate the consensus key of the validator, it'll take effect in next epoch. let validator_info = borrow_global_mut<ValidatorConfig>(pool_address); let old_consensus_pubkey = validator_info.consensus_pubkey; // Checks the public key has a valid proof-of-possession to prevent rogue-key attacks. - let pubkey_from_pop = &mut bls12381::public_key_from_bytes_with_pop( + let pubkey_from_pop = &bls12381::public_key_from_bytes_with_pop( new_consensus_pubkey, &proof_of_possession_from_bytes(proof_of_possession) ); @@ -3249,7 +3249,7 @@ This internal version can only be called by the Genesis module during Genesis. update_voting_power_increase(voting_power); // Add validator to pending_active, to be activated in the next epoch. - let validator_config = borrow_global_mut<ValidatorConfig>(pool_address); + let validator_config = borrow_global<ValidatorConfig>(pool_address); assert!(!vector::is_empty(&validator_config.consensus_pubkey), error::invalid_argument(EINVALID_PUBLIC_KEY)); // Validate the current validator set size has not exceeded the limit. @@ -3706,8 +3706,8 @@ power. }) { let old_validator_info = vector::borrow_mut(&mut validator_set.active_validators, i); let pool_address = old_validator_info.addr; - let validator_config = borrow_global_mut<ValidatorConfig>(pool_address); - let stake_pool = borrow_global_mut<StakePool>(pool_address); + let validator_config = borrow_global<ValidatorConfig>(pool_address); + let stake_pool = borrow_global<StakePool>(pool_address); let new_validator_info = generate_validator_info(pool_address, stake_pool, *validator_config); // A validator needs at least the min stake required to join the validator set. diff --git a/aptos-move/framework/aptos-framework/doc/staking_contract.md b/aptos-move/framework/aptos-framework/doc/staking_contract.md index 5ce4bab9733..6376f194023 100644 --- a/aptos-move/framework/aptos-framework/doc/staking_contract.md +++ b/aptos-move/framework/aptos-framework/doc/staking_contract.md @@ -91,6 +91,7 @@ pool. - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) + - [Struct `StakingContract`](#@Specification_1_StakingContract) - [Function `stake_pool_address`](#@Specification_1_stake_pool_address) - [Function `last_recorded_principal`](#@Specification_1_last_recorded_principal) - [Function `commission_percentage`](#@Specification_1_commission_percentage) @@ -2360,7 +2361,7 @@ Distribute all unlocked (inactive) funds according to distribution shares. // Buy all recipients out of the distribution pool. while (pool_u64::shareholders_count(distribution_pool) > 0) { let recipients = pool_u64::shareholders(distribution_pool); - let recipient = *vector::borrow(&mut recipients, 0); + let recipient = *vector::borrow(&recipients, 0); let current_shares = pool_u64::shares(distribution_pool, recipient); let amount_to_distribute = pool_u64::redeem_shares(distribution_pool, recipient, current_shares); // If the recipient is the operator, send the commission to the beneficiary instead. @@ -2410,7 +2411,7 @@ Assert that a staking_contract exists for the staker/operator pair.
fun assert_staking_contract_exists(staker: address, operator: address) acquires Store {
     assert!(exists<Store>(staker), error::not_found(ENO_STAKING_CONTRACT_FOUND_FOR_STAKER));
-    let staking_contracts = &mut borrow_global_mut<Store>(staker).staking_contracts;
+    let staking_contracts = &borrow_global<Store>(staker).staking_contracts;
     assert!(
         simple_map::contains_key(staking_contracts, &operator),
         error::not_found(ENO_STAKING_CONTRACT_FOUND_FOR_OPERATOR),
@@ -2763,6 +2764,62 @@ Create a new staking_contracts resource.
 
 
 
+
+
+### Struct `StakingContract`
+
+
+
struct StakingContract has store
+
+ + + +
+
+principal: u64 +
+
+ +
+
+pool_address: address +
+
+ +
+
+owner_cap: stake::OwnerCapability +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+distribution_pool: pool_u64::Pool +
+
+ +
+
+signer_cap: account::SignerCapability +
+
+ +
+
+ + + +
invariant commission_percentage >= 0 && commission_percentage <= 100;
+
+ + + ### Function `stake_pool_address` @@ -2836,7 +2893,6 @@ Staking_contract exists the stacker/operator pair.
pragma verify_duration_estimate = 120;
-requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100;
 let staking_contracts = global<Store>(staker).staking_contracts;
 let staking_contract = simple_map::spec_get(staking_contracts, operator);
 include ContractExistsAbortsIf;
@@ -3023,6 +3079,7 @@ Staking_contract exists the stacker/operator pair.
 let post staking_contract = simple_map::spec_get(store.staking_contracts, operator);
 let post pool_address = staking_contract.owner_cap.pool_address;
 let post new_delegated_voter = global<stake::StakePool>(pool_address).delegated_voter;
+// This enforces high-level requirement 4:
 ensures new_delegated_voter == new_voter;
 
@@ -3117,7 +3174,6 @@ Only staker or operator can call this.
pragma verify = false;
-requires amount > 0;
 let staker_address = signer::address_of(staker);
 include ContractExistsAbortsIf { staker: staker_address };
 
@@ -3137,8 +3193,6 @@ Staking_contract exists the stacker/operator pair.
pragma verify = false;
-// This enforces high-level requirement 4:
-requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100;
 let staker_address = signer::address_of(staker);
 let staking_contracts = global<Store>(staker_address).staking_contracts;
 let staking_contract = simple_map::spec_get(staking_contracts, operator);
diff --git a/aptos-move/framework/aptos-framework/doc/vesting.md b/aptos-move/framework/aptos-framework/doc/vesting.md
index d2b59512b63..6f2d17e23f7 100644
--- a/aptos-move/framework/aptos-framework/doc/vesting.md
+++ b/aptos-move/framework/aptos-framework/doc/vesting.md
@@ -3011,7 +3011,7 @@ account.
     role: String,
     role_holder: address,
 ) acquires VestingAccountManagement, VestingContract {
-    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    let vesting_contract = borrow_global<VestingContract>(contract_address);
     verify_admin(admin, vesting_contract);
 
     if (!exists<VestingAccountManagement>(contract_address)) {
@@ -3135,7 +3135,7 @@ staking_contract and stake modules.
 
 
 
public fun get_vesting_account_signer(admin: &signer, contract_address: address): signer acquires VestingContract {
-    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    let vesting_contract = borrow_global<VestingContract>(contract_address);
     verify_admin(admin, vesting_contract);
     get_vesting_account_signer_internal(vesting_contract)
 }
@@ -3686,7 +3686,6 @@ This address should be deterministic for the same admin and vesting contract cre
 
 
schema TotalAccumulatedRewardsAbortsIf {
     vesting_contract_address: address;
-    requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100;
     include ActiveVestingContractAbortsIf<VestingContract>{contract_address: vesting_contract_address};
     let vesting_contract = global<VestingContract>(vesting_contract_address);
     let staker = vesting_contract_address;
@@ -3892,7 +3891,6 @@ This address should be deterministic for the same admin and vesting contract cre
 
 
pragma verify = false;
 aborts_if len(contract_addresses) == 0;
-include PreconditionAbortsIf;
 
@@ -3927,7 +3925,6 @@ This address should be deterministic for the same admin and vesting contract cre
pragma verify = false;
 aborts_if len(contract_addresses) == 0;
-include PreconditionAbortsIf;
 
diff --git a/aptos-move/framework/aptos-framework/doc/voting.md b/aptos-move/framework/aptos-framework/doc/voting.md index dfea6bb54d7..946e707b6e2 100644 --- a/aptos-move/framework/aptos-framework/doc/voting.md +++ b/aptos-move/framework/aptos-framework/doc/voting.md @@ -952,7 +952,7 @@ resolve this proposal. simple_map::add(&mut metadata, is_multi_step_in_execution_key, to_bytes(&false)); // If the proposal is a single-step proposal, we check if the metadata passed by the client has the IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY key. // If they have the key, we will remove it, because a single-step proposal that doesn't need this key. - } else if (simple_map::contains_key(&mut metadata, &is_multi_step_in_execution_key)) { + } else if (simple_map::contains_key(&metadata, &is_multi_step_in_execution_key)) { simple_map::remove(&mut metadata, &is_multi_step_in_execution_key); }; diff --git a/aptos-move/framework/aptos-framework/sources/account.move b/aptos-move/framework/aptos-framework/sources/account.move index a249fbb2d3d..db1fed32ab6 100644 --- a/aptos-move/framework/aptos-framework/sources/account.move +++ b/aptos-move/framework/aptos-framework/sources/account.move @@ -517,7 +517,7 @@ module aptos_framework::account { public entry fun revoke_rotation_capability(account: &signer, to_be_revoked_address: address) acquires Account { assert!(exists_at(to_be_revoked_address), error::not_found(EACCOUNT_DOES_NOT_EXIST)); let addr = signer::address_of(account); - let account_resource = borrow_global_mut(addr); + let account_resource = borrow_global(addr); assert!( option::contains(&account_resource.rotation_capability_offer.for, &to_be_revoked_address), error::not_found(ENO_SUCH_ROTATION_CAPABILITY_OFFER) @@ -587,7 +587,7 @@ module aptos_framework::account { public entry fun revoke_signer_capability(account: &signer, to_be_revoked_address: address) acquires Account { assert!(exists_at(to_be_revoked_address), error::not_found(EACCOUNT_DOES_NOT_EXIST)); let addr = signer::address_of(account); - let account_resource = borrow_global_mut(addr); + let account_resource = borrow_global(addr); assert!( option::contains(&account_resource.signer_capability_offer.for, &to_be_revoked_address), error::not_found(ENO_SUCH_SIGNER_CAPABILITY) @@ -842,7 +842,7 @@ module aptos_framework::account { signed_message_bytes: vector, message: T, ) acquires Account { - let account_resource = borrow_global_mut(account); + let account_resource = borrow_global(account); // Verify that the `SignerCapabilityOfferProofChallengeV2` has the right information and is signed by the account owner's key if (account_scheme == ED25519_SCHEME) { let pubkey = ed25519::new_unvalidated_public_key_from_bytes(account_public_key); diff --git a/aptos-move/framework/aptos-framework/sources/account.spec.move b/aptos-move/framework/aptos-framework/sources/account.spec.move index 1b97f1fc599..83d155ea822 100644 --- a/aptos-move/framework/aptos-framework/sources/account.spec.move +++ b/aptos-move/framework/aptos-framework/sources/account.spec.move @@ -575,6 +575,7 @@ spec aptos_framework::account { // This function should not abort assuming the result of `sha3_256` is deserializable into an address. aborts_if [abstract] false; ensures [abstract] result == spec_create_resource_address(source, seed); + ensures [abstract] source != result; // We can assume that the derived resource account does not equal to `source` } spec fun spec_create_resource_address(source: address, seed: vector): address; diff --git a/aptos-move/framework/aptos-framework/sources/aptos_account.spec.move b/aptos-move/framework/aptos-framework/sources/aptos_account.spec.move index ff6faa22ac6..dc55f00112b 100644 --- a/aptos-move/framework/aptos-framework/sources/aptos_account.spec.move +++ b/aptos-move/framework/aptos-framework/sources/aptos_account.spec.move @@ -90,9 +90,6 @@ spec aptos_framework::aptos_account { pragma verify = false; let account_addr_source = signer::address_of(source); - // The 'from' addr is implictly not equal to 'to' addr - requires account_addr_source != to; - include CreateAccountTransferAbortsIf; include GuidAbortsIf; include WithdrawAbortsIf{from: source}; @@ -131,10 +128,10 @@ spec aptos_framework::aptos_account { let coin_store_source = global>(account_addr_source); let balance_source = coin_store_source.coin.value; - requires forall i in 0..len(recipients): - recipients[i] != account_addr_source; - requires exists i in 0..len(recipients): - amounts[i] > 0; + // requires forall i in 0..len(recipients): + // recipients[i] != account_addr_source; + // requires exists i in 0..len(recipients): + // amounts[i] > 0; // create account properties aborts_if len(recipients) != len(amounts); @@ -182,11 +179,11 @@ spec aptos_framework::aptos_account { let coin_store_source = global>(account_addr_source); let balance_source = coin_store_source.coin.value; - requires forall i in 0..len(recipients): - recipients[i] != account_addr_source; - - requires exists i in 0..len(recipients): - amounts[i] > 0; + // requires forall i in 0..len(recipients): + // recipients[i] != account_addr_source; + // + // requires exists i in 0..len(recipients): + // amounts[i] > 0; /// [high-level-req-7] aborts_if len(recipients) != len(amounts); @@ -246,8 +243,6 @@ spec aptos_framework::aptos_account { pragma verify = false; let account_addr_source = signer::address_of(from); - //The 'from' addr is implictly not equal to 'to' addr - requires account_addr_source != to; include CreateAccountTransferAbortsIf; include WithdrawAbortsIf; diff --git a/aptos-move/framework/aptos-framework/sources/coin.move b/aptos-move/framework/aptos-framework/sources/coin.move index 91a54edb7fd..5f6598024d4 100644 --- a/aptos-move/framework/aptos-framework/sources/coin.move +++ b/aptos-move/framework/aptos-framework/sources/coin.move @@ -533,7 +533,7 @@ module aptos_framework::coin { let metadata = assert_paired_metadata_exists(); let metadata_addr = object_address(&metadata); assert!(exists(metadata_addr), error::internal(EPAIRED_FUNGIBLE_ASSET_REFS_NOT_FOUND)); - let burn_ref_opt = &mut borrow_global_mut(metadata_addr).burn_ref_opt; + let burn_ref_opt = &borrow_global(metadata_addr).burn_ref_opt; assert!(option::is_some(burn_ref_opt), error::not_found(EBURN_REF_NOT_FOUND)); option::borrow(burn_ref_opt) } @@ -1025,7 +1025,7 @@ module aptos_framework::coin { // Can only succeed once on-chain governance agreed on the upgrade. assert!( - borrow_global_mut(@aptos_framework).allow_upgrades, + borrow_global(@aptos_framework).allow_upgrades, error::permission_denied(ECOIN_SUPPLY_UPGRADE_NOT_SUPPORTED) ); diff --git a/aptos-move/framework/aptos-framework/sources/fungible_asset.move b/aptos-move/framework/aptos-framework/sources/fungible_asset.move index 946d7b05eb4..50a77348c63 100644 --- a/aptos-move/framework/aptos-framework/sources/fungible_asset.move +++ b/aptos-move/framework/aptos-framework/sources/fungible_asset.move @@ -960,19 +960,29 @@ module aptos_framework::fungible_asset { let mutable_metadata = borrow_global_mut(metadata_address); if (option::is_some(&name)){ - mutable_metadata.name = option::extract(&mut name); + let name = option::extract(&mut name); + assert!(string::length(&name) <= MAX_NAME_LENGTH, error::out_of_range(ENAME_TOO_LONG)); + mutable_metadata.name = name; }; if (option::is_some(&symbol)){ - mutable_metadata.symbol = option::extract(&mut symbol); + let symbol = option::extract(&mut symbol); + assert!(string::length(&symbol) <= MAX_SYMBOL_LENGTH, error::out_of_range(ESYMBOL_TOO_LONG)); + mutable_metadata.symbol = symbol; }; if (option::is_some(&decimals)){ - mutable_metadata.decimals = option::extract(&mut decimals); + let decimals = option::extract(&mut decimals); + assert!(decimals <= MAX_DECIMALS, error::out_of_range(EDECIMALS_TOO_LARGE)); + mutable_metadata.decimals = decimals; }; if (option::is_some(&icon_uri)){ - mutable_metadata.icon_uri = option::extract(&mut icon_uri); + let icon_uri = option::extract(&mut icon_uri); + assert!(string::length(&icon_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + mutable_metadata.icon_uri = icon_uri; }; if (option::is_some(&project_uri)){ - mutable_metadata.project_uri = option::extract(&mut project_uri); + let project_uri = option::extract(&mut project_uri); + assert!(string::length(&project_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + mutable_metadata.project_uri = project_uri; }; } @@ -1311,13 +1321,13 @@ module aptos_framework::fungible_asset { mutate_metadata( &mutate_metadata_ref, option::some(string::utf8(b"mutated_name")), - option::some(string::utf8(b"mutated_symbol")), + option::some(string::utf8(b"m_symbol")), option::none(), option::none(), option::none() ); assert!(name(metadata) == string::utf8(b"mutated_name"), 8); - assert!(symbol(metadata) == string::utf8(b"mutated_symbol"), 9); + assert!(symbol(metadata) == string::utf8(b"m_symbol"), 9); assert!(decimals(metadata) == 0, 10); assert!(icon_uri(metadata) == string::utf8(b"http://www.example.com/favicon.ico"), 11); assert!(project_uri(metadata) == string::utf8(b"http://www.example.com"), 12); @@ -1392,13 +1402,13 @@ module aptos_framework::fungible_asset { mutate_metadata( &mutate_metadata_ref, option::some(string::utf8(b"mutated_name")), - option::some(string::utf8(b"mutated_symbol")), + option::some(string::utf8(b"m_symbol")), option::some(10), option::some(string::utf8(b"http://www.mutated-example.com/favicon.ico")), option::some(string::utf8(b"http://www.mutated-example.com")) ); assert!(name(metadata) == string::utf8(b"mutated_name"), 1); - assert!(symbol(metadata) == string::utf8(b"mutated_symbol"), 2); + assert!(symbol(metadata) == string::utf8(b"m_symbol"), 2); assert!(decimals(metadata) == 10, 3); assert!(icon_uri(metadata) == string::utf8(b"http://www.mutated-example.com/favicon.ico"), 4); assert!(project_uri(metadata) == string::utf8(b"http://www.mutated-example.com"), 5); @@ -1414,18 +1424,115 @@ module aptos_framework::fungible_asset { mutate_metadata( &mutate_metadata_ref, option::some(string::utf8(b"mutated_name")), - option::some(string::utf8(b"mutated_symbol")), + option::some(string::utf8(b"m_symbol")), option::none(), option::none(), option::none() ); assert!(name(metadata) == string::utf8(b"mutated_name"), 8); - assert!(symbol(metadata) == string::utf8(b"mutated_symbol"), 9); + assert!(symbol(metadata) == string::utf8(b"m_symbol"), 9); assert!(decimals(metadata) == 0, 10); assert!(icon_uri(metadata) == string::utf8(b"http://www.example.com/favicon.ico"), 11); assert!(project_uri(metadata) == string::utf8(b"http://www.example.com"), 12); } + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x2000f, location = Self)] + fun test_mutate_metadata_name_over_maximum_length( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + + mutate_metadata( + &mutate_metadata_ref, + option::some(string::utf8(b"mutated_name_will_be_too_long_for_the_maximum_length_check")), + option::none(), + option::none(), + option::none(), + option::none() + ); + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x20010, location = Self)] + fun test_mutate_metadata_symbol_over_maximum_length( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + + mutate_metadata( + &mutate_metadata_ref, + option::none(), + option::some(string::utf8(b"mutated_symbol_will_be_too_long_for_the_maximum_length_check")), + option::none(), + option::none(), + option::none() + ); + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x20011, location = Self)] + fun test_mutate_metadata_decimals_over_maximum_amount( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + + mutate_metadata( + &mutate_metadata_ref, + option::none(), + option::none(), + option::some(50), + option::none(), + option::none() + ); + } + + #[test_only] + fun create_exceedingly_long_uri(): vector { + use std::vector; + + let too_long_of_uri = b"mutated_uri_will_be_too_long_for_the_maximum_length_check.com/"; + for (i in 0..50) { + vector::append(&mut too_long_of_uri, b"too_long_of_uri"); + }; + + too_long_of_uri + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x20013, location = Self)] + fun test_mutate_metadata_icon_uri_over_maximum_length( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + let too_long_of_uri = create_exceedingly_long_uri(); + mutate_metadata( + &mutate_metadata_ref, + option::none(), + option::none(), + option::none(), + option::some(string::utf8(too_long_of_uri)), + option::none() + ); + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x20013, location = Self)] + fun test_mutate_metadata_project_uri_over_maximum_length( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + let too_long_of_uri = create_exceedingly_long_uri(); + mutate_metadata( + &mutate_metadata_ref, + option::none(), + option::none(), + option::none(), + option::none(), + option::some(string::utf8(too_long_of_uri)) + ); + } + #[test(creator = @0xcafe)] fun test_merge_and_exact(creator: &signer) acquires Supply, ConcurrentSupply { let (mint_ref, _transfer_ref, burn_ref, _mutate_metadata_ref, _) = create_fungible_asset(creator); diff --git a/aptos-move/framework/aptos-framework/sources/jwks.move b/aptos-move/framework/aptos-framework/sources/jwks.move index ac144676adb..5c9b75147a9 100644 --- a/aptos-move/framework/aptos-framework/sources/jwks.move +++ b/aptos-move/framework/aptos-framework/sources/jwks.move @@ -39,6 +39,7 @@ module aptos_framework::jwks { const EJWK_ID_NOT_FOUND: u64 = 6; const EINSTALL_FEDERATED_JWKS_AT_APTOS_FRAMEWORK: u64 = 7; const EFEDERATED_JWKS_TOO_LARGE: u64 = 8; + const EINVALID_FEDERATED_JWK_SET: u64 = 9; const ENATIVE_MISSING_RESOURCE_VALIDATOR_SET: u64 = 0x0101; const ENATIVE_MISSING_RESOURCE_OBSERVED_JWKS: u64 = 0x0102; @@ -174,11 +175,8 @@ module aptos_framework::jwks { // /// Called by a federated keyless dapp owner to install the JWKs for the federated OIDC provider (e.g., Auth0, AWS - /// Cognito, etc). - /// - /// For type-safety, we explicitly use a `struct FederatedJWKs { jwks: AllProviderJWKs }` instead of - /// reusing `PatchedJWKs { jwks: AllProviderJWKs }`, which is a JWK-consensus-specific struct. We'd - /// need to be careful how we read it in Rust (but BCS serialization should be the same). + /// Cognito, etc). For type-safety, we explicitly use a `struct FederatedJWKs { jwks: AllProviderJWKs }` instead of + /// reusing `PatchedJWKs { jwks: AllProviderJWKs }`, which is a JWK-consensus-specific struct. public fun patch_federated_jwks(jwk_owner: &signer, patches: vector) acquires FederatedJWKs { // Prevents accidental calls in 0x1::jwks that install federated JWKs at the Aptos framework address. assert!(!system_addresses::is_aptos_framework_address(signer::address_of(jwk_owner)), @@ -201,6 +199,80 @@ module aptos_framework::jwks { assert!(num_bytes < MAX_FEDERATED_JWKS_SIZE_BYTES, error::invalid_argument(EFEDERATED_JWKS_TOO_LARGE)); } + /// This can be called to install or update a set of JWKs for a federated OIDC provider. This function should + /// be invoked to intially install a set of JWKs or to update a set of JWKs when a keypair is rotated. + /// + /// The `iss` parameter is the value of the `iss` claim on the JWTs that are to be verified by the JWK set. + /// `kid_vec`, `alg_vec`, `e_vec`, `n_vec` are String vectors of the JWK attributes `kid`, `alg`, `e` and `n` respectively. + /// See https://datatracker.ietf.org/doc/html/rfc7517#section-4 for more details about the JWK attributes aforementioned. + /// + /// For the example JWK set snapshot below containing 2 keys for Google found at https://www.googleapis.com/oauth2/v3/certs - + /// ```json + /// { + /// "keys": [ + /// { + /// "alg": "RS256", + /// "use": "sig", + /// "kty": "RSA", + /// "n": "wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw", + /// "kid": "d7b939771a7800c413f90051012d975981916d71", + /// "e": "AQAB" + /// }, + /// { + /// "kty": "RSA", + /// "kid": "b2620d5e7f132b52afe8875cdf3776c064249d04", + /// "alg": "RS256", + /// "n": "pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w", + /// "e": "AQAB", + /// "use": "sig" + /// } + /// ] + /// } + /// ``` + /// + /// We can call update_federated_jwk_set for Google's `iss` - "https://accounts.google.com" and for each vector + /// argument `kid_vec`, `alg_vec`, `e_vec`, `n_vec`, we set in index 0 the corresponding attribute in the first JWK and we set in index 1 the + /// the corresponding attribute in the second JWK as shown below. + /// + /// ```move + /// use std::string::utf8; + /// aptos_framework::jwks::update_federated_jwk_set( + /// jwk_owner, + /// b"https://accounts.google.com", + /// vector[utf8(b"d7b939771a7800c413f90051012d975981916d71"), utf8(b"b2620d5e7f132b52afe8875cdf3776c064249d04")], + /// vector[utf8(b"RS256"), utf8(b"RS256")], + /// vector[utf8(b"AQAB"), utf8(b"AQAB")], + /// vector[ + /// utf8(b"wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw"), + /// utf8(b"pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w") + /// ] + /// ) + /// ``` + /// + /// See AIP-96 for more details about federated keyless - https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-96.md + /// + /// NOTE: Currently only RSA keys are supported. + public entry fun update_federated_jwk_set(jwk_owner: &signer, iss: vector, kid_vec: vector, alg_vec: vector, e_vec: vector, n_vec: vector) acquires FederatedJWKs { + assert!(!vector::is_empty(&kid_vec), error::invalid_argument(EINVALID_FEDERATED_JWK_SET)); + let num_jwk = vector::length(&kid_vec); + assert!(vector::length(&alg_vec) == num_jwk , error::invalid_argument(EINVALID_FEDERATED_JWK_SET)); + assert!(vector::length(&e_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET)); + assert!(vector::length(&n_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET)); + + let remove_all_patch = new_patch_remove_all(); + let patches = vector[remove_all_patch]; + while (!vector::is_empty(&kid_vec)) { + let kid = vector::pop_back(&mut kid_vec); + let alg = vector::pop_back(&mut alg_vec); + let e = vector::pop_back(&mut e_vec); + let n = vector::pop_back(&mut n_vec); + let jwk = new_rsa_jwk(kid, alg, e, n); + let patch = new_patch_upsert_jwk(iss, jwk); + vector::push_back(&mut patches, patch) + }; + patch_federated_jwks(jwk_owner, patches); + } + /// Get a JWK by issuer and key ID from the `PatchedJWKs`. /// Abort if such a JWK does not exist. /// More convenient to call from Rust, since it does not wrap the JWK in an `Option`. @@ -245,7 +317,7 @@ module aptos_framework::jwks { let provider_set = if (config_buffer::does_exist()) { config_buffer::extract() } else { - *borrow_global_mut(@aptos_framework) + *borrow_global(@aptos_framework) }; let old_config_url = remove_oidc_provider_internal(&mut provider_set, name); @@ -280,7 +352,7 @@ module aptos_framework::jwks { let provider_set = if (config_buffer::does_exist()) { config_buffer::extract() } else { - *borrow_global_mut(@aptos_framework) + *borrow_global(@aptos_framework) }; let ret = remove_oidc_provider_internal(&mut provider_set, name); config_buffer::upsert(provider_set); diff --git a/aptos-move/framework/aptos-framework/sources/multisig_account.move b/aptos-move/framework/aptos-framework/sources/multisig_account.move index 6ea72d7e0ed..80fffd93ecd 100644 --- a/aptos-move/framework/aptos-framework/sources/multisig_account.move +++ b/aptos-move/framework/aptos-framework/sources/multisig_account.move @@ -457,14 +457,14 @@ module aptos_framework::multisig_account { #[view] /// Return the id of the last transaction that was executed (successful or failed) or removed. public fun last_resolved_sequence_number(multisig_account: address): u64 acquires MultisigAccount { - let multisig_account_resource = borrow_global_mut(multisig_account); + let multisig_account_resource = borrow_global(multisig_account); multisig_account_resource.last_executed_sequence_number } #[view] /// Return the id of the next transaction created. public fun next_sequence_number(multisig_account: address): u64 acquires MultisigAccount { - let multisig_account_resource = borrow_global_mut(multisig_account); + let multisig_account_resource = borrow_global(multisig_account); multisig_account_resource.next_sequence_number } @@ -472,7 +472,7 @@ module aptos_framework::multisig_account { /// Return a bool tuple indicating whether an owner has voted and if so, whether they voted yes or no. public fun vote( multisig_account: address, sequence_number: u64, owner: address): (bool, bool) acquires MultisigAccount { - let multisig_account_resource = borrow_global_mut(multisig_account); + let multisig_account_resource = borrow_global(multisig_account); assert!( sequence_number > 0 && sequence_number < multisig_account_resource.next_sequence_number, error::invalid_argument(EINVALID_SEQUENCE_NUMBER), @@ -486,7 +486,7 @@ module aptos_framework::multisig_account { #[view] public fun available_transaction_queue_capacity(multisig_account: address): u64 acquires MultisigAccount { - let multisig_account_resource = borrow_global_mut(multisig_account); + let multisig_account_resource = borrow_global(multisig_account); let num_pending_transactions = multisig_account_resource.next_sequence_number - multisig_account_resource.last_executed_sequence_number - 1; if (num_pending_transactions > MAX_PENDING_TRANSACTIONS) { 0 @@ -497,6 +497,30 @@ module aptos_framework::multisig_account { ////////////////////////// Multisig account creation functions /////////////////////////////// + /// Private entry function that creates a new multisig account on top of an existing account. + /// + /// This offers a migration path for an existing account with any type of auth key. + /// + /// Note that this does not revoke auth key-based control over the account. Owners should separately rotate the auth + /// key after they are fully migrated to the new multisig account. Alternatively, they can call + /// create_with_existing_account_and_revoke_auth_key_call instead. + entry fun create_with_existing_account_call( + multisig_account: &signer, + owners: vector
, + num_signatures_required: u64, + metadata_keys: vector, + metadata_values: vector>, + ) acquires MultisigAccount { + create_with_owners_internal( + multisig_account, + owners, + num_signatures_required, + option::none(), + metadata_keys, + metadata_values, + ); + } + /// Creates a new multisig account on top of an existing account. /// /// This offers a migration path for an existing account with a multi-ed25519 auth key (native multisig account). @@ -547,6 +571,41 @@ module aptos_framework::multisig_account { ); } + /// Private entry function that creates a new multisig account on top of an existing account and immediately rotate + /// the origin auth key to 0x0. + /// + /// Note: If the original account is a resource account, this does not revoke all control over it as if any + /// SignerCapability of the resource account still exists, it can still be used to generate the signer for the + /// account. + entry fun create_with_existing_account_and_revoke_auth_key_call( + multisig_account: &signer, + owners: vector
, + num_signatures_required: u64, + metadata_keys: vector, + metadata_values:vector>, + ) acquires MultisigAccount { + create_with_owners_internal( + multisig_account, + owners, + num_signatures_required, + option::none(), + metadata_keys, + metadata_values, + ); + + // Rotate the account's auth key to 0x0, which effectively revokes control via auth key. + let multisig_address = address_of(multisig_account); + account::rotate_authentication_key_internal(multisig_account, ZERO_AUTH_KEY); + // This also needs to revoke any signer capability or rotation capability that exists for the account to + // completely remove all access to the account. + if (account::is_signer_capability_offered(multisig_address)) { + account::revoke_any_signer_capability(multisig_account); + }; + if (account::is_rotation_capability_offered(multisig_address)) { + account::revoke_any_rotation_capability(multisig_account); + }; + } + /// Creates a new multisig account on top of an existing account and immediately rotate the origin auth key to 0x0. /// /// Note: If the original account is a resource account, this does not revoke all control over it as if any @@ -1688,6 +1747,26 @@ module aptos_framework::multisig_account { ); } + #[test] + public entry fun test_create_multisig_account_on_top_of_existing_with_signer() + acquires MultisigAccount { + setup(); + + let multisig_address = @0xabc; + create_account(multisig_address); + + let expected_owners = vector[@0x123, @0x124, @0x125]; + create_with_existing_account_call( + &create_signer(multisig_address), + expected_owners, + 2, + vector[], + vector[], + ); + assert_multisig_account_exists(multisig_address); + assert!(owners(multisig_address) == expected_owners, 0); + } + #[test] public entry fun test_create_multisig_account_on_top_of_existing_multi_ed25519_account() acquires MultisigAccount { @@ -1721,6 +1800,34 @@ module aptos_framework::multisig_account { assert!(owners(multisig_address) == expected_owners, 0); } + #[test] + public entry fun test_create_multisig_account_on_top_of_existing_and_revoke_auth_key_with_signer() + acquires MultisigAccount { + setup(); + + let multisig_address = @0xabc; + create_account(multisig_address); + + // Create both a signer capability and rotation capability offers + account::set_rotation_capability_offer(multisig_address, @0x123); + account::set_signer_capability_offer(multisig_address, @0x123); + + let expected_owners = vector[@0x123, @0x124, @0x125]; + create_with_existing_account_and_revoke_auth_key_call( + &create_signer(multisig_address), + expected_owners, + 2, + vector[], + vector[], + ); + assert_multisig_account_exists(multisig_address); + assert!(owners(multisig_address) == expected_owners, 0); + assert!(account::get_authentication_key(multisig_address) == ZERO_AUTH_KEY, 1); + // Verify that all capability offers have been wiped. + assert!(!account::is_rotation_capability_offered(multisig_address), 2); + assert!(!account::is_signer_capability_offered(multisig_address), 3); + } + #[test] public entry fun test_create_multisig_account_on_top_of_existing_multi_ed25519_account_and_revoke_auth_key() acquires MultisigAccount { diff --git a/aptos-move/framework/aptos-framework/sources/object.move b/aptos-move/framework/aptos-framework/sources/object.move index 6e809e87e87..c03914fb767 100644 --- a/aptos-move/framework/aptos-framework/sources/object.move +++ b/aptos-move/framework/aptos-framework/sources/object.move @@ -50,6 +50,8 @@ module aptos_framework::object { const EOBJECT_NOT_BURNT: u64 = 8; /// Object is untransferable any operations that might result in a transfer are disallowed. const EOBJECT_NOT_TRANSFERRABLE: u64 = 9; + /// Objects cannot be burnt + const EBURN_NOT_ALLOWED: u64 = 10; /// Explicitly separate the GUID space between Object and Account to prevent accidental overlap. const INIT_GUID_CREATION_NUM: u64 = 0x4000000000000; @@ -610,15 +612,12 @@ module aptos_framework::object { }; } - /// Forcefully transfer an unwanted object to BURN_ADDRESS, ignoring whether ungated_transfer is allowed. - /// This only works for objects directly owned and for simplicity does not apply to indirectly owned objects. - /// Original owners can reclaim burnt objects any time in the future by calling unburn. - public entry fun burn(owner: &signer, object: Object) acquires ObjectCore { - let original_owner = signer::address_of(owner); - assert!(is_owner(object, original_owner), error::permission_denied(ENOT_OBJECT_OWNER)); - let object_addr = object.inner; - move_to(&create_signer(object_addr), TombStone { original_owner }); - transfer_raw_inner(object_addr, BURN_ADDRESS); + #[deprecated] + /// Previously allowed to burn objects, has now been disabled. Objects can still be unburnt. + /// + /// Please use the test only [`object::burn_object`] for testing with previously burned objects. + public entry fun burn(_owner: &signer, _object: Object) { + abort error::permission_denied(EBURN_NOT_ALLOWED) } /// Allow origin owners to reclaim any objects they previous burnt. @@ -705,6 +704,20 @@ module aptos_framework::object { #[test_only] const EWEAPON_DOES_NOT_EXIST: u64 = 0x101; + #[test_only] + /// For testing the previous behavior of `object::burn()` + /// + /// Forcefully transfer an unwanted object to BURN_ADDRESS, ignoring whether ungated_transfer is allowed. + /// This only works for objects directly owned and for simplicity does not apply to indirectly owned objects. + /// Original owners can reclaim burnt objects any time in the future by calling unburn. + public fun burn_object(owner: &signer, object: Object) acquires ObjectCore { + let original_owner = signer::address_of(owner); + assert!(is_owner(object, original_owner), error::permission_denied(ENOT_OBJECT_OWNER)); + let object_addr = object.inner; + move_to(&create_signer(object_addr), TombStone { original_owner }); + transfer_raw_inner(object_addr, BURN_ADDRESS); + } + #[test_only] struct HeroEquipEvent has drop, store { weapon_id: Option>, @@ -820,7 +833,7 @@ module aptos_framework::object { #[expected_failure(abort_code = 0x10008, location = Self)] fun test_cannot_unburn_after_transfer_with_ref(creator: &signer) acquires ObjectCore, TombStone { let (hero_constructor, hero) = create_hero(creator); - burn(creator, hero); + burn_object(creator, hero); let transfer_ref = generate_transfer_ref(&hero_constructor); transfer_with_ref(generate_linear_transfer_ref(&transfer_ref), @0x456); unburn(creator, hero); @@ -876,7 +889,7 @@ module aptos_framework::object { disable_ungated_transfer(&transfer_ref); // Owner should be able to burn, despite ungated transfer disallowed. - burn(creator, hero); + burn_object(creator, hero); assert!(owner(hero) == BURN_ADDRESS, 0); assert!(!ungated_transfer_allowed(hero), 0); @@ -897,7 +910,7 @@ module aptos_framework::object { // Owner should be not be able to burn weapon directly. assert!(owner(weapon) == object_address(&hero), 0); assert!(owns(weapon, signer::address_of(creator)), 0); - burn(creator, weapon); + burn_object(creator, weapon); } #[test(creator = @0x123)] @@ -907,6 +920,13 @@ module aptos_framework::object { unburn(creator, hero); } + #[test(creator = @0x123)] + #[expected_failure(abort_code = 0x5000A, location = Self)] + fun test_burn_should_fail(creator: &signer) acquires ObjectCore { + let (_, hero) = create_hero(creator); + burn(creator, hero); + } + #[test_only] fun create_simple_object(creator: &signer, seed: vector): Object { object_from_constructor_ref(&create_named_object(creator, seed)) diff --git a/aptos-move/framework/aptos-framework/sources/object.spec.move b/aptos-move/framework/aptos-framework/sources/object.spec.move index d2627d649fd..51ae05b5683 100644 --- a/aptos-move/framework/aptos-framework/sources/object.spec.move +++ b/aptos-move/framework/aptos-framework/sources/object.spec.move @@ -475,7 +475,11 @@ spec aptos_framework::object { aborts_if !global(object_address).allow_ungated_transfer; } - spec burn(owner: &signer, object: Object) { + spec burn(_owner: &signer, _object: Object) { + aborts_if true; + } + + spec burn_object(owner: &signer, object: Object) { pragma aborts_if_is_partial; let object_address = object.inner; aborts_if !exists(object_address); diff --git a/aptos-move/framework/aptos-framework/sources/primary_fungible_store.move b/aptos-move/framework/aptos-framework/sources/primary_fungible_store.move index fc20e1cf311..9e39b97fa28 100644 --- a/aptos-move/framework/aptos-framework/sources/primary_fungible_store.move +++ b/aptos-move/framework/aptos-framework/sources/primary_fungible_store.move @@ -372,7 +372,7 @@ module aptos_framework::primary_fungible_store { // User 2 burns their primary store but should still be able to transfer afterward. let user_2_primary_store = primary_store(user_2_address, metadata); - object::burn(user_2, user_2_primary_store); + object::burn_object(user_2, user_2_primary_store); assert!(object::is_burnt(user_2_primary_store), 0); // Balance still works assert!(balance(user_2_address, metadata) == 80, 0); @@ -396,7 +396,7 @@ module aptos_framework::primary_fungible_store { // User 2 burns their primary store but should still be able to withdraw afterward. let user_2_primary_store = primary_store(user_2_address, metadata); - object::burn(user_2, user_2_primary_store); + object::burn_object(user_2, user_2_primary_store); assert!(object::is_burnt(user_2_primary_store), 0); let coins = withdraw(user_2, metadata, 70); assert!(balance(user_2_address, metadata) == 10, 0); diff --git a/aptos-move/framework/aptos-framework/sources/resource_account.spec.move b/aptos-move/framework/aptos-framework/sources/resource_account.spec.move index a6f4d6848a5..847e77853bd 100644 --- a/aptos-move/framework/aptos-framework/sources/resource_account.spec.move +++ b/aptos-move/framework/aptos-framework/sources/resource_account.spec.move @@ -155,8 +155,6 @@ spec aptos_framework::resource_account { let get = len(optional_auth_key) == 0; let account = global(source_addr); - requires source_addr != resource_addr; - aborts_if len(ZERO_AUTH_KEY) != 32; include account::exists_at(resource_addr) ==> account::CreateResourceAccountAbortsIf; include !account::exists_at(resource_addr) ==> account::CreateAccountAbortsIf {addr: resource_addr}; diff --git a/aptos-move/framework/aptos-framework/sources/stake.move b/aptos-move/framework/aptos-framework/sources/stake.move index 9639ffa8ff0..0b65fa5f4eb 100644 --- a/aptos-move/framework/aptos-framework/sources/stake.move +++ b/aptos-move/framework/aptos-framework/sources/stake.move @@ -588,7 +588,7 @@ module aptos_framework::stake { fullnode_addresses: vector, ) acquires AllowedValidators { // Checks the public key has a valid proof-of-possession to prevent rogue-key attacks. - let pubkey_from_pop = &mut bls12381::public_key_from_bytes_with_pop( + let pubkey_from_pop = &bls12381::public_key_from_bytes_with_pop( consensus_pubkey, &proof_of_possession_from_bytes(proof_of_possession) ); @@ -730,7 +730,7 @@ module aptos_framework::stake { // Only track and validate voting power increase for active and pending_active validator. // Pending_inactive validator will be removed from the validator set in the next epoch. // Inactive validator's total stake will be tracked when they join the validator set. - let validator_set = borrow_global_mut(@aptos_framework); + let validator_set = borrow_global(@aptos_framework); // Search directly rather using get_validator_state to save on unnecessary loops. if (option::is_some(&find_validator(&validator_set.active_validators, pool_address)) || option::is_some(&find_validator(&validator_set.pending_active, pool_address))) { @@ -826,7 +826,7 @@ module aptos_framework::stake { let validator_info = borrow_global_mut(pool_address); let old_consensus_pubkey = validator_info.consensus_pubkey; // Checks the public key has a valid proof-of-possession to prevent rogue-key attacks. - let pubkey_from_pop = &mut bls12381::public_key_from_bytes_with_pop( + let pubkey_from_pop = &bls12381::public_key_from_bytes_with_pop( new_consensus_pubkey, &proof_of_possession_from_bytes(proof_of_possession) ); @@ -976,7 +976,7 @@ module aptos_framework::stake { update_voting_power_increase(voting_power); // Add validator to pending_active, to be activated in the next epoch. - let validator_config = borrow_global_mut(pool_address); + let validator_config = borrow_global(pool_address); assert!(!vector::is_empty(&validator_config.consensus_pubkey), error::invalid_argument(EINVALID_PUBLIC_KEY)); // Validate the current validator set size has not exceeded the limit. @@ -1273,8 +1273,8 @@ module aptos_framework::stake { }) { let old_validator_info = vector::borrow_mut(&mut validator_set.active_validators, i); let pool_address = old_validator_info.addr; - let validator_config = borrow_global_mut(pool_address); - let stake_pool = borrow_global_mut(pool_address); + let validator_config = borrow_global(pool_address); + let stake_pool = borrow_global(pool_address); let new_validator_info = generate_validator_info(pool_address, stake_pool, *validator_config); // A validator needs at least the min stake required to join the validator set. diff --git a/aptos-move/framework/aptos-framework/sources/staking_contract.move b/aptos-move/framework/aptos-framework/sources/staking_contract.move index 6120b770f23..be247f59bd2 100644 --- a/aptos-move/framework/aptos-framework/sources/staking_contract.move +++ b/aptos-move/framework/aptos-framework/sources/staking_contract.move @@ -785,7 +785,7 @@ module aptos_framework::staking_contract { // Buy all recipients out of the distribution pool. while (pool_u64::shareholders_count(distribution_pool) > 0) { let recipients = pool_u64::shareholders(distribution_pool); - let recipient = *vector::borrow(&mut recipients, 0); + let recipient = *vector::borrow(&recipients, 0); let current_shares = pool_u64::shares(distribution_pool, recipient); let amount_to_distribute = pool_u64::redeem_shares(distribution_pool, recipient, current_shares); // If the recipient is the operator, send the commission to the beneficiary instead. @@ -815,7 +815,7 @@ module aptos_framework::staking_contract { /// Assert that a staking_contract exists for the staker/operator pair. fun assert_staking_contract_exists(staker: address, operator: address) acquires Store { assert!(exists(staker), error::not_found(ENO_STAKING_CONTRACT_FOUND_FOR_STAKER)); - let staking_contracts = &mut borrow_global_mut(staker).staking_contracts; + let staking_contracts = &borrow_global(staker).staking_contracts; assert!( simple_map::contains_key(staking_contracts, &operator), error::not_found(ENO_STAKING_CONTRACT_FOUND_FOR_OPERATOR), diff --git a/aptos-move/framework/aptos-framework/sources/staking_contract.spec.move b/aptos-move/framework/aptos-framework/sources/staking_contract.spec.move index d4a6d49f677..e18616ff623 100644 --- a/aptos-move/framework/aptos-framework/sources/staking_contract.spec.move +++ b/aptos-move/framework/aptos-framework/sources/staking_contract.spec.move @@ -72,6 +72,10 @@ spec aptos_framework::staking_contract { pragma aborts_if_is_strict; } + spec StakingContract { + invariant commission_percentage >= 0 && commission_percentage <= 100; + } + spec stake_pool_address(staker: address, operator: address): address { include ContractExistsAbortsIf; let staking_contracts = global(staker).staking_contracts; @@ -97,7 +101,6 @@ spec aptos_framework::staking_contract { spec staking_contract_amounts(staker: address, operator: address): (u64, u64, u64) { // TODO: set because of timeout (property proved). pragma verify_duration_estimate = 120; - requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100; let staking_contracts = global(staker).staking_contracts; let staking_contract = simple_map::spec_get(staking_contracts, operator); @@ -228,6 +231,7 @@ spec aptos_framework::staking_contract { let post new_delegated_voter = global(pool_address).delegated_voter; // property 4: The staker may update the voter of a staking contract, enabling them // to modify the assigned voter address and ensure it accurately reflects their desired choice. + /// [high-level-req-4] ensures new_delegated_voter == new_voter; } @@ -275,8 +279,6 @@ spec aptos_framework::staking_contract { // TODO: Call `update_distribution_pool` and could not verify `update_distribution_pool`. // TODO: Set because of timeout (estimate unknown). pragma verify = false; - /// [high-level-req-4] - requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100; let staker_address = signer::address_of(staker); let staking_contracts = global(staker_address).staking_contracts; let staking_contract = simple_map::spec_get(staking_contracts, operator); @@ -287,7 +289,6 @@ spec aptos_framework::staking_contract { // TODO: Call `update_distribution_pool` and could not verify `update_distribution_pool`. // TODO: Set because of timeout (estimate unknown). pragma verify = false; - requires amount > 0; let staker_address = signer::address_of(staker); include ContractExistsAbortsIf { staker: staker_address }; } diff --git a/aptos-move/framework/aptos-framework/sources/vesting.move b/aptos-move/framework/aptos-framework/sources/vesting.move index 527b4726ffb..9ede3acfa3d 100644 --- a/aptos-move/framework/aptos-framework/sources/vesting.move +++ b/aptos-move/framework/aptos-framework/sources/vesting.move @@ -1064,7 +1064,7 @@ module aptos_framework::vesting { role: String, role_holder: address, ) acquires VestingAccountManagement, VestingContract { - let vesting_contract = borrow_global_mut(contract_address); + let vesting_contract = borrow_global(contract_address); verify_admin(admin, vesting_contract); if (!exists(contract_address)) { @@ -1108,7 +1108,7 @@ module aptos_framework::vesting { /// This doesn't give the admin total power as the admin would still need to follow the rules set by /// staking_contract and stake modules. public fun get_vesting_account_signer(admin: &signer, contract_address: address): signer acquires VestingContract { - let vesting_contract = borrow_global_mut(contract_address); + let vesting_contract = borrow_global(contract_address); verify_admin(admin, vesting_contract); get_vesting_account_signer_internal(vesting_contract) } diff --git a/aptos-move/framework/aptos-framework/sources/vesting.spec.move b/aptos-move/framework/aptos-framework/sources/vesting.spec.move index dfb445b95ab..3bcbcbb11c0 100644 --- a/aptos-move/framework/aptos-framework/sources/vesting.spec.move +++ b/aptos-move/framework/aptos-framework/sources/vesting.spec.move @@ -163,10 +163,6 @@ spec aptos_framework::vesting { spec schema TotalAccumulatedRewardsAbortsIf { vesting_contract_address: address; - // Note: commission percentage should not be under 0 or higher than 100, cause it's a percentage number - // This requirement will solve the timeout issue of total_accumulated_rewards - // However, accumulated_rewards is still timeout - requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100; include ActiveVestingContractAbortsIf{contract_address: vesting_contract_address}; let vesting_contract = global(vesting_contract_address); @@ -294,7 +290,6 @@ spec aptos_framework::vesting { // TODO: Calls `unlock_rewards` in loop. pragma verify = false; aborts_if len(contract_addresses) == 0; - include PreconditionAbortsIf; } spec vest(contract_address: address) { @@ -307,14 +302,6 @@ spec aptos_framework::vesting { // TODO: Calls `vest` in loop. pragma verify = false; aborts_if len(contract_addresses) == 0; - include PreconditionAbortsIf; - } - - spec schema PreconditionAbortsIf { - contract_addresses: vector
; - - requires forall i in 0..len(contract_addresses): simple_map::spec_get(global(contract_addresses[i]).staking_contracts, global(contract_addresses[i]).staking.operator).commission_percentage >= 0 - && simple_map::spec_get(global(contract_addresses[i]).staking_contracts, global(contract_addresses[i]).staking.operator).commission_percentage <= 100; } spec distribute(contract_address: address) { diff --git a/aptos-move/framework/aptos-framework/sources/voting.move b/aptos-move/framework/aptos-framework/sources/voting.move index 3bc26528ba7..a10e795b736 100644 --- a/aptos-move/framework/aptos-framework/sources/voting.move +++ b/aptos-move/framework/aptos-framework/sources/voting.move @@ -305,7 +305,7 @@ module aptos_framework::voting { simple_map::add(&mut metadata, is_multi_step_in_execution_key, to_bytes(&false)); // If the proposal is a single-step proposal, we check if the metadata passed by the client has the IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY key. // If they have the key, we will remove it, because a single-step proposal that doesn't need this key. - } else if (simple_map::contains_key(&mut metadata, &is_multi_step_in_execution_key)) { + } else if (simple_map::contains_key(&metadata, &is_multi_step_in_execution_key)) { simple_map::remove(&mut metadata, &is_multi_step_in_execution_key); }; diff --git a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/account.md b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/account.md index e869c0dfaba..76710e0da3f 100644 --- a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/account.md +++ b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/account.md @@ -3151,6 +3151,7 @@ The value of signer_capability_offer.for of Account resource under the signer is pragma aborts_if_is_strict = false; aborts_if [abstract] false; ensures [abstract] result == spec_create_resource_address(source, seed); +ensures [abstract] source != result;
diff --git a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/aptos_account.md b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/aptos_account.md index 31f613dfe2e..dbc4f7b687c 100644 --- a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/aptos_account.md +++ b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/aptos_account.md @@ -879,10 +879,6 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to let account_addr_source = signer::address_of(source); let coin_store_source = global<coin::CoinStore<AptosCoin>>(account_addr_source); let balance_source = coin_store_source.coin.value; -requires forall i in 0..len(recipients): - recipients[i] != account_addr_source; -requires exists i in 0..len(recipients): - amounts[i] > 0; aborts_if len(recipients) != len(amounts); aborts_if exists i in 0..len(recipients): !account::exists_at(recipients[i]) && length_judgment(recipients[i]); @@ -920,7 +916,6 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to
pragma verify = false;
 let account_addr_source = signer::address_of(source);
-requires account_addr_source != to;
 include CreateAccountTransferAbortsIf;
 include GuidAbortsIf<AptosCoin>;
 include WithdrawAbortsIf<AptosCoin>{from: source};
@@ -948,10 +943,6 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to
 let account_addr_source = signer::address_of(from);
 let coin_store_source = global<coin::CoinStore<CoinType>>(account_addr_source);
 let balance_source = coin_store_source.coin.value;
-requires forall i in 0..len(recipients):
-    recipients[i] != account_addr_source;
-requires exists i in 0..len(recipients):
-    amounts[i] > 0;
 // This enforces high-level requirement 7:
 aborts_if len(recipients) != len(amounts);
 aborts_if exists i in 0..len(recipients):
@@ -992,7 +983,6 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to
 
 
pragma verify = false;
 let account_addr_source = signer::address_of(from);
-requires account_addr_source != to;
 include CreateAccountTransferAbortsIf;
 include WithdrawAbortsIf<CoinType>;
 include GuidAbortsIf<CoinType>;
diff --git a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/fungible_asset.md b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/fungible_asset.md
index 0cac9507ce2..eb352c025f1 100644
--- a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/fungible_asset.md
+++ b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/fungible_asset.md
@@ -3170,19 +3170,29 @@ Mutate specified fields of the fungible asset's Metadata>(metadata_address);
 
     if (option::is_some(&name)){
-        mutable_metadata.name = option::extract(&mut name);
+        let name = option::extract(&mut name);
+        assert!(string::length(&name) <= MAX_NAME_LENGTH, error::out_of_range(ENAME_TOO_LONG));
+        mutable_metadata.name = name;
     };
     if (option::is_some(&symbol)){
-        mutable_metadata.symbol = option::extract(&mut symbol);
+        let symbol = option::extract(&mut symbol);
+        assert!(string::length(&symbol) <= MAX_SYMBOL_LENGTH, error::out_of_range(ESYMBOL_TOO_LONG));
+        mutable_metadata.symbol = symbol;
     };
     if (option::is_some(&decimals)){
-        mutable_metadata.decimals = option::extract(&mut decimals);
+        let decimals = option::extract(&mut decimals);
+        assert!(decimals <= MAX_DECIMALS, error::out_of_range(EDECIMALS_TOO_LARGE));
+        mutable_metadata.decimals = decimals;
     };
     if (option::is_some(&icon_uri)){
-        mutable_metadata.icon_uri = option::extract(&mut icon_uri);
+        let icon_uri = option::extract(&mut icon_uri);
+        assert!(string::length(&icon_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+        mutable_metadata.icon_uri = icon_uri;
     };
     if (option::is_some(&project_uri)){
-        mutable_metadata.project_uri = option::extract(&mut project_uri);
+        let project_uri = option::extract(&mut project_uri);
+        assert!(string::length(&project_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+        mutable_metadata.project_uri = project_uri;
     };
 }
 
diff --git a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/resource_account.md b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/resource_account.md index 4df4d4691ce..5922cc99737 100644 --- a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/resource_account.md +++ b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/resource_account.md @@ -593,7 +593,6 @@ the SignerCapability. let container = global<Container>(source_addr); let get = len(optional_auth_key) == 0; let account = global<account::Account>(source_addr); - requires source_addr != resource_addr; aborts_if len(ZERO_AUTH_KEY) != 32; include account::exists_at(resource_addr) ==> account::CreateResourceAccountAbortsIf; include !account::exists_at(resource_addr) ==> account::CreateAccountAbortsIf {addr: resource_addr}; diff --git a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/staking_contract.md b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/staking_contract.md index a8a6417a5b8..015101fcddb 100644 --- a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/staking_contract.md +++ b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/staking_contract.md @@ -91,6 +91,7 @@ pool. - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) + - [Struct `StakingContract`](#@Specification_1_StakingContract) - [Function `stake_pool_address`](#@Specification_1_stake_pool_address) - [Function `last_recorded_principal`](#@Specification_1_last_recorded_principal) - [Function `commission_percentage`](#@Specification_1_commission_percentage) @@ -2763,6 +2764,62 @@ Create a new staking_contracts resource. + + +### Struct `StakingContract` + + +
struct StakingContract has store
+
+ + + +
+
+principal: u64 +
+
+ +
+
+pool_address: address +
+
+ +
+
+owner_cap: stake::OwnerCapability +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+distribution_pool: pool_u64::Pool +
+
+ +
+
+signer_cap: account::SignerCapability +
+
+ +
+
+ + + +
invariant commission_percentage >= 0 && commission_percentage <= 100;
+
+ + + ### Function `stake_pool_address` @@ -2836,7 +2893,6 @@ Staking_contract exists the stacker/operator pair.
pragma verify_duration_estimate = 120;
-requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100;
 let staking_contracts = global<Store>(staker).staking_contracts;
 let staking_contract = simple_map::spec_get(staking_contracts, operator);
 include ContractExistsAbortsIf;
@@ -3023,6 +3079,7 @@ Staking_contract exists the stacker/operator pair.
 let post staking_contract = simple_map::spec_get(store.staking_contracts, operator);
 let post pool_address = staking_contract.owner_cap.pool_address;
 let post new_delegated_voter = global<stake::StakePool>(pool_address).delegated_voter;
+// This enforces high-level requirement 4:
 ensures new_delegated_voter == new_voter;
 
@@ -3117,7 +3174,6 @@ Only staker or operator can call this.
pragma verify = false;
-requires amount > 0;
 let staker_address = signer::address_of(staker);
 include ContractExistsAbortsIf { staker: staker_address };
 
@@ -3137,8 +3193,6 @@ Staking_contract exists the stacker/operator pair.
pragma verify = false;
-// This enforces high-level requirement 4:
-requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100;
 let staker_address = signer::address_of(staker);
 let staking_contracts = global<Store>(staker_address).staking_contracts;
 let staking_contract = simple_map::spec_get(staking_contracts, operator);
diff --git a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/vesting.md b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/vesting.md
index f10b63becec..49260e36409 100644
--- a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/vesting.md
+++ b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/vesting.md
@@ -3686,7 +3686,6 @@ This address should be deterministic for the same admin and vesting contract cre
 
 
schema TotalAccumulatedRewardsAbortsIf {
     vesting_contract_address: address;
-    requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100;
     include ActiveVestingContractAbortsIf<VestingContract>{contract_address: vesting_contract_address};
     let vesting_contract = global<VestingContract>(vesting_contract_address);
     let staker = vesting_contract_address;
@@ -3892,7 +3891,6 @@ This address should be deterministic for the same admin and vesting contract cre
 
 
pragma verify = false;
 aborts_if len(contract_addresses) == 0;
-include PreconditionAbortsIf;
 
@@ -3927,7 +3925,6 @@ This address should be deterministic for the same admin and vesting contract cre
pragma verify = false;
 aborts_if len(contract_addresses) == 0;
-include PreconditionAbortsIf;
 
diff --git a/aptos-move/framework/aptos-framework/tests/simple_dispatchable_token_pfs_tests.move b/aptos-move/framework/aptos-framework/tests/simple_dispatchable_token_pfs_tests.move index 1b80c489024..d069923a5f8 100644 --- a/aptos-move/framework/aptos-framework/tests/simple_dispatchable_token_pfs_tests.move +++ b/aptos-move/framework/aptos-framework/tests/simple_dispatchable_token_pfs_tests.move @@ -28,7 +28,7 @@ module aptos_framework::simple_token_pfs_tests { // User 2 burns their primary store but should still be able to transfer afterward. let user_2_primary_store = primary_store(user_2_address, metadata); - object::burn(user_2, user_2_primary_store); + object::burn_object(user_2, user_2_primary_store); assert!(object::is_burnt(user_2_primary_store), 0); // Balance still works assert!(balance(user_2_address, metadata) == 80, 0); @@ -54,7 +54,7 @@ module aptos_framework::simple_token_pfs_tests { // User 2 burns their primary store but should still be able to withdraw afterward. let user_2_primary_store = primary_store(user_2_address, metadata); - object::burn(user_2, user_2_primary_store); + object::burn_object(user_2, user_2_primary_store); assert!(object::is_burnt(user_2_primary_store), 0); let coins = withdraw(user_2, metadata, 70); assert!(balance(user_2_address, metadata) == 10, 0); diff --git a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs index 83d3b4e0247..71987dc605f 100644 --- a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs +++ b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs @@ -443,6 +443,67 @@ pub enum EntryFunctionCall { amount: u64, }, + /// This can be called to install or update a set of JWKs for a federated OIDC provider. This function should + /// be invoked to intially install a set of JWKs or to update a set of JWKs when a keypair is rotated. + /// + /// The `iss` parameter is the value of the `iss` claim on the JWTs that are to be verified by the JWK set. + /// `kid_vec`, `alg_vec`, `e_vec`, `n_vec` are String vectors of the JWK attributes `kid`, `alg`, `e` and `n` respectively. + /// See https://datatracker.ietf.org/doc/html/rfc7517#section-4 for more details about the JWK attributes aforementioned. + /// + /// For the example JWK set snapshot below containing 2 keys for Google found at https://www.googleapis.com/oauth2/v3/certs - + /// ```json + /// { + /// "keys": [ + /// { + /// "alg": "RS256", + /// "use": "sig", + /// "kty": "RSA", + /// "n": "wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw", + /// "kid": "d7b939771a7800c413f90051012d975981916d71", + /// "e": "AQAB" + /// }, + /// { + /// "kty": "RSA", + /// "kid": "b2620d5e7f132b52afe8875cdf3776c064249d04", + /// "alg": "RS256", + /// "n": "pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w", + /// "e": "AQAB", + /// "use": "sig" + /// } + /// ] + /// } + /// ``` + /// + /// We can call update_federated_jwk_set for Google's `iss` - "https://accounts.google.com" and for each vector + /// argument `kid_vec`, `alg_vec`, `e_vec`, `n_vec`, we set in index 0 the corresponding attribute in the first JWK and we set in index 1 the + /// the corresponding attribute in the second JWK as shown below. + /// + /// ```move + /// use std::string::utf8; + /// aptos_framework::jwks::update_federated_jwk_set( + /// jwk_owner, + /// b"https://accounts.google.com", + /// vector[utf8(b"d7b939771a7800c413f90051012d975981916d71"), utf8(b"b2620d5e7f132b52afe8875cdf3776c064249d04")], + /// vector[utf8(b"RS256"), utf8(b"RS256")], + /// vector[utf8(b"AQAB"), utf8(b"AQAB")], + /// vector[ + /// utf8(b"wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw"), + /// utf8(b"pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w") + /// ] + /// ) + /// ``` + /// + /// See AIP-96 for more details about federated keyless - https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-96.md + /// + /// NOTE: Currently only RSA keys are supported. + JwksUpdateFederatedJwkSet { + iss: Vec, + kid_vec: Vec>, + alg_vec: Vec>, + e_vec: Vec>, + n_vec: Vec>, + }, + /// Withdraw an `amount` of coin `CoinType` from `account` and burn it. ManagedCoinBurn { coin_type: TypeTag, @@ -556,6 +617,33 @@ pub enum EntryFunctionCall { metadata_values: Vec>, }, + /// Private entry function that creates a new multisig account on top of an existing account and immediately rotate + /// the origin auth key to 0x0. + /// + /// Note: If the original account is a resource account, this does not revoke all control over it as if any + /// SignerCapability of the resource account still exists, it can still be used to generate the signer for the + /// account. + MultisigAccountCreateWithExistingAccountAndRevokeAuthKeyCall { + owners: Vec, + num_signatures_required: u64, + metadata_keys: Vec>, + metadata_values: Vec>, + }, + + /// Private entry function that creates a new multisig account on top of an existing account. + /// + /// This offers a migration path for an existing account with any type of auth key. + /// + /// Note that this does not revoke auth key-based control over the account. Owners should separately rotate the auth + /// key after they are fully migrated to the new multisig account. Alternatively, they can call + /// create_with_existing_account_and_revoke_auth_key_call instead. + MultisigAccountCreateWithExistingAccountCall { + owners: Vec, + num_signatures_required: u64, + metadata_keys: Vec>, + metadata_values: Vec>, + }, + /// Creates a new multisig account with the specified additional owner list and signatures required. /// /// @param additional_owners The owner account who calls this function cannot be in the additional_owners and there @@ -1272,6 +1360,13 @@ impl EntryFunctionCall { pool_address, amount, } => delegation_pool_withdraw(pool_address, amount), + JwksUpdateFederatedJwkSet { + iss, + kid_vec, + alg_vec, + e_vec, + n_vec, + } => jwks_update_federated_jwk_set(iss, kid_vec, alg_vec, e_vec, n_vec), ManagedCoinBurn { coin_type, amount } => managed_coin_burn(coin_type, amount), ManagedCoinInitialize { coin_type, @@ -1350,6 +1445,28 @@ impl EntryFunctionCall { metadata_keys, metadata_values, ), + MultisigAccountCreateWithExistingAccountAndRevokeAuthKeyCall { + owners, + num_signatures_required, + metadata_keys, + metadata_values, + } => multisig_account_create_with_existing_account_and_revoke_auth_key_call( + owners, + num_signatures_required, + metadata_keys, + metadata_values, + ), + MultisigAccountCreateWithExistingAccountCall { + owners, + num_signatures_required, + metadata_keys, + metadata_values, + } => multisig_account_create_with_existing_account_call( + owners, + num_signatures_required, + metadata_keys, + metadata_values, + ), MultisigAccountCreateWithOwners { additional_owners, num_signatures_required, @@ -2819,6 +2936,86 @@ pub fn delegation_pool_withdraw(pool_address: AccountAddress, amount: u64) -> Tr )) } +/// This can be called to install or update a set of JWKs for a federated OIDC provider. This function should +/// be invoked to intially install a set of JWKs or to update a set of JWKs when a keypair is rotated. +/// +/// The `iss` parameter is the value of the `iss` claim on the JWTs that are to be verified by the JWK set. +/// `kid_vec`, `alg_vec`, `e_vec`, `n_vec` are String vectors of the JWK attributes `kid`, `alg`, `e` and `n` respectively. +/// See https://datatracker.ietf.org/doc/html/rfc7517#section-4 for more details about the JWK attributes aforementioned. +/// +/// For the example JWK set snapshot below containing 2 keys for Google found at https://www.googleapis.com/oauth2/v3/certs - +/// ```json +/// { +/// "keys": [ +/// { +/// "alg": "RS256", +/// "use": "sig", +/// "kty": "RSA", +/// "n": "wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw", +/// "kid": "d7b939771a7800c413f90051012d975981916d71", +/// "e": "AQAB" +/// }, +/// { +/// "kty": "RSA", +/// "kid": "b2620d5e7f132b52afe8875cdf3776c064249d04", +/// "alg": "RS256", +/// "n": "pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w", +/// "e": "AQAB", +/// "use": "sig" +/// } +/// ] +/// } +/// ``` +/// +/// We can call update_federated_jwk_set for Google's `iss` - "https://accounts.google.com" and for each vector +/// argument `kid_vec`, `alg_vec`, `e_vec`, `n_vec`, we set in index 0 the corresponding attribute in the first JWK and we set in index 1 the +/// the corresponding attribute in the second JWK as shown below. +/// +/// ```move +/// use std::string::utf8; +/// aptos_framework::jwks::update_federated_jwk_set( +/// jwk_owner, +/// b"https://accounts.google.com", +/// vector[utf8(b"d7b939771a7800c413f90051012d975981916d71"), utf8(b"b2620d5e7f132b52afe8875cdf3776c064249d04")], +/// vector[utf8(b"RS256"), utf8(b"RS256")], +/// vector[utf8(b"AQAB"), utf8(b"AQAB")], +/// vector[ +/// utf8(b"wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw"), +/// utf8(b"pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w") +/// ] +/// ) +/// ``` +/// +/// See AIP-96 for more details about federated keyless - https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-96.md +/// +/// NOTE: Currently only RSA keys are supported. +pub fn jwks_update_federated_jwk_set( + iss: Vec, + kid_vec: Vec>, + alg_vec: Vec>, + e_vec: Vec>, + n_vec: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("jwks").to_owned(), + ), + ident_str!("update_federated_jwk_set").to_owned(), + vec![], + vec![ + bcs::to_bytes(&iss).unwrap(), + bcs::to_bytes(&kid_vec).unwrap(), + bcs::to_bytes(&alg_vec).unwrap(), + bcs::to_bytes(&e_vec).unwrap(), + bcs::to_bytes(&n_vec).unwrap(), + ], + )) +} + /// Withdraw an `amount` of coin `CoinType` from `account` and burn it. pub fn managed_coin_burn(coin_type: TypeTag, amount: u64) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( @@ -3134,6 +3331,69 @@ pub fn multisig_account_create_with_existing_account_and_revoke_auth_key( )) } +/// Private entry function that creates a new multisig account on top of an existing account and immediately rotate +/// the origin auth key to 0x0. +/// +/// Note: If the original account is a resource account, this does not revoke all control over it as if any +/// SignerCapability of the resource account still exists, it can still be used to generate the signer for the +/// account. +pub fn multisig_account_create_with_existing_account_and_revoke_auth_key_call( + owners: Vec, + num_signatures_required: u64, + metadata_keys: Vec>, + metadata_values: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("multisig_account").to_owned(), + ), + ident_str!("create_with_existing_account_and_revoke_auth_key_call").to_owned(), + vec![], + vec![ + bcs::to_bytes(&owners).unwrap(), + bcs::to_bytes(&num_signatures_required).unwrap(), + bcs::to_bytes(&metadata_keys).unwrap(), + bcs::to_bytes(&metadata_values).unwrap(), + ], + )) +} + +/// Private entry function that creates a new multisig account on top of an existing account. +/// +/// This offers a migration path for an existing account with any type of auth key. +/// +/// Note that this does not revoke auth key-based control over the account. Owners should separately rotate the auth +/// key after they are fully migrated to the new multisig account. Alternatively, they can call +/// create_with_existing_account_and_revoke_auth_key_call instead. +pub fn multisig_account_create_with_existing_account_call( + owners: Vec, + num_signatures_required: u64, + metadata_keys: Vec>, + metadata_values: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("multisig_account").to_owned(), + ), + ident_str!("create_with_existing_account_call").to_owned(), + vec![], + vec![ + bcs::to_bytes(&owners).unwrap(), + bcs::to_bytes(&num_signatures_required).unwrap(), + bcs::to_bytes(&metadata_keys).unwrap(), + bcs::to_bytes(&metadata_values).unwrap(), + ], + )) +} + /// Creates a new multisig account with the specified additional owner list and signatures required. /// /// @param additional_owners The owner account who calls this function cannot be in the additional_owners and there @@ -5323,6 +5583,22 @@ mod decoder { } } + pub fn jwks_update_federated_jwk_set( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some(EntryFunctionCall::JwksUpdateFederatedJwkSet { + iss: bcs::from_bytes(script.args().get(0)?).ok()?, + kid_vec: bcs::from_bytes(script.args().get(1)?).ok()?, + alg_vec: bcs::from_bytes(script.args().get(2)?).ok()?, + e_vec: bcs::from_bytes(script.args().get(3)?).ok()?, + n_vec: bcs::from_bytes(script.args().get(4)?).ok()?, + }) + } else { + None + } + } + pub fn managed_coin_burn(payload: &TransactionPayload) -> Option { if let TransactionPayload::EntryFunction(script) = payload { Some(EntryFunctionCall::ManagedCoinBurn { @@ -5502,6 +5778,40 @@ mod decoder { } } + pub fn multisig_account_create_with_existing_account_and_revoke_auth_key_call( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some( + EntryFunctionCall::MultisigAccountCreateWithExistingAccountAndRevokeAuthKeyCall { + owners: bcs::from_bytes(script.args().get(0)?).ok()?, + num_signatures_required: bcs::from_bytes(script.args().get(1)?).ok()?, + metadata_keys: bcs::from_bytes(script.args().get(2)?).ok()?, + metadata_values: bcs::from_bytes(script.args().get(3)?).ok()?, + }, + ) + } else { + None + } + } + + pub fn multisig_account_create_with_existing_account_call( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some( + EntryFunctionCall::MultisigAccountCreateWithExistingAccountCall { + owners: bcs::from_bytes(script.args().get(0)?).ok()?, + num_signatures_required: bcs::from_bytes(script.args().get(1)?).ok()?, + metadata_keys: bcs::from_bytes(script.args().get(2)?).ok()?, + metadata_values: bcs::from_bytes(script.args().get(3)?).ok()?, + }, + ) + } else { + None + } + } + pub fn multisig_account_create_with_owners( payload: &TransactionPayload, ) -> Option { @@ -6629,6 +6939,10 @@ static SCRIPT_FUNCTION_DECODER_MAP: once_cell::sync::Lazy TransactionPayload { - coin_transfer( - aptos_types::utility_coin::APTOS_COIN_TYPE.clone(), - to, - amount, - ) + coin_transfer(AptosCoinType::type_tag(), to, amount) } pub fn publish_module_source(module_name: &str, module_src: &str) -> TransactionPayload { diff --git a/aptos-move/framework/src/chunked_publish.rs b/aptos-move/framework/src/chunked_publish.rs new file mode 100644 index 00000000000..fcf14bbb08c --- /dev/null +++ b/aptos-move/framework/src/chunked_publish.rs @@ -0,0 +1,191 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_types::transaction::{EntryFunction, TransactionPayload}; +use move_core_types::{account_address::AccountAddress, ident_str, language_storage::ModuleId}; + +pub const LARGE_PACKAGES_MODULE_ADDRESS: &str = + "0xa29df848eebfe5d981f708c2a5b06d31af2be53bbd8ddc94c8523f4b903f7adb"; // mainnet and testnet + +/// Maximum code & metadata chunk size to be included in a transaction +pub const MAX_CHUNK_SIZE_IN_BYTES: usize = 60_000; + +pub enum PublishType { + AccountDeploy, + ObjectDeploy, + ObjectUpgrade, +} + +pub fn chunk_package_and_create_payloads( + metadata: Vec, + package_code: Vec>, + publish_type: PublishType, + object_address: Option, +) -> Vec { + // Chunk the metadata + let mut metadata_chunks = create_chunks(metadata); + // Separate last chunk for special handling + let mut metadata_chunk = metadata_chunks.pop().expect("Metadata is required"); + + let mut taken_size = metadata_chunk.len(); + let mut payloads = metadata_chunks + .into_iter() + .map(|chunk| large_packages_stage_code_chunk(chunk, vec![], vec![])) + .collect::>(); + + let mut code_indices: Vec = vec![]; + let mut code_chunks: Vec> = vec![]; + + for (idx, module_code) in package_code.into_iter().enumerate() { + let chunked_module = create_chunks(module_code); + for chunk in chunked_module { + if taken_size + chunk.len() > MAX_CHUNK_SIZE_IN_BYTES { + // Create a payload and reset accumulators + let payload = large_packages_stage_code_chunk( + metadata_chunk, + code_indices.clone(), + code_chunks.clone(), + ); + payloads.push(payload); + + metadata_chunk = vec![]; + code_indices.clear(); + code_chunks.clear(); + taken_size = 0; + } + + code_indices.push(idx as u16); + taken_size += chunk.len(); + code_chunks.push(chunk); + } + } + + // The final call includes staging the last metadata and code chunk, and then publishing or upgrading the package on-chain. + let payload = match publish_type { + PublishType::AccountDeploy => large_packages_stage_code_chunk_and_publish_to_account( + metadata_chunk, + code_indices, + code_chunks, + ), + PublishType::ObjectDeploy => large_packages_stage_code_chunk_and_publish_to_object( + metadata_chunk, + code_indices, + code_chunks, + ), + PublishType::ObjectUpgrade => large_packages_stage_code_chunk_and_upgrade_object_code( + metadata_chunk, + code_indices, + code_chunks, + object_address, + ), + }; + payloads.push(payload); + + payloads +} + +// Create chunks of data based on the defined maximum chunk size. +fn create_chunks(data: Vec) -> Vec> { + data.chunks(MAX_CHUNK_SIZE_IN_BYTES) + .map(|chunk| chunk.to_vec()) + .collect() +} + +// Create a transaction payload for staging chunked data to the staging area. +fn large_packages_stage_code_chunk( + metadata_chunk: Vec, + code_indices: Vec, + code_chunks: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + ident_str!("large_packages").to_owned(), + ), + ident_str!("stage_code_chunk").to_owned(), + vec![], + vec![ + bcs::to_bytes(&metadata_chunk).unwrap(), + bcs::to_bytes(&code_indices).unwrap(), + bcs::to_bytes(&code_chunks).unwrap(), + ], + )) +} + +// Create a transaction payload for staging chunked data and finally publishing the package to an account. +fn large_packages_stage_code_chunk_and_publish_to_account( + metadata_chunk: Vec, + code_indices: Vec, + code_chunks: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + ident_str!("large_packages").to_owned(), + ), + ident_str!("stage_code_chunk_and_publish_to_account").to_owned(), + vec![], + vec![ + bcs::to_bytes(&metadata_chunk).unwrap(), + bcs::to_bytes(&code_indices).unwrap(), + bcs::to_bytes(&code_chunks).unwrap(), + ], + )) +} + +// Create a transaction payload for staging chunked data and finally publishing the package to an object. +fn large_packages_stage_code_chunk_and_publish_to_object( + metadata_chunk: Vec, + code_indices: Vec, + code_chunks: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + ident_str!("large_packages").to_owned(), + ), + ident_str!("stage_code_chunk_and_publish_to_object").to_owned(), + vec![], + vec![ + bcs::to_bytes(&metadata_chunk).unwrap(), + bcs::to_bytes(&code_indices).unwrap(), + bcs::to_bytes(&code_chunks).unwrap(), + ], + )) +} + +// Create a transaction payload for staging chunked data and finally upgrading the object package. +fn large_packages_stage_code_chunk_and_upgrade_object_code( + metadata_chunk: Vec, + code_indices: Vec, + code_chunks: Vec>, + code_object: Option, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + ident_str!("large_packages").to_owned(), + ), + ident_str!("stage_code_chunk_and_upgrade_object_code").to_owned(), + vec![], + vec![ + bcs::to_bytes(&metadata_chunk).unwrap(), + bcs::to_bytes(&code_indices).unwrap(), + bcs::to_bytes(&code_chunks).unwrap(), + bcs::to_bytes(&code_object).unwrap(), + ], + )) +} + +// Cleanup account's `StagingArea` resource. +pub fn large_packages_cleanup_staging_area() -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + ident_str!("large_packages").to_owned(), + ), + ident_str!("cleanup_staging_area").to_owned(), + vec![], + vec![], + )) +} diff --git a/aptos-move/framework/src/lib.rs b/aptos-move/framework/src/lib.rs index d9c7a338e48..5103790707c 100644 --- a/aptos-move/framework/src/lib.rs +++ b/aptos-move/framework/src/lib.rs @@ -21,6 +21,7 @@ pub use release_builder::*; pub mod docgen; pub mod extended_checks; pub use extended_checks::ResourceGroupScope; +pub mod chunked_publish; pub mod prover; mod release_bundle; mod released_framework; diff --git a/aptos-move/framework/src/module_metadata.rs b/aptos-move/framework/src/module_metadata.rs index e0dc1d36b4f..18a6178e23b 100644 --- a/aptos-move/framework/src/module_metadata.rs +++ b/aptos-move/framework/src/module_metadata.rs @@ -624,11 +624,23 @@ fn check_module_complexity(module: &CompiledModule) -> Result<(), MetaDataValida check_ident_complexity(module, &mut meter, handle.name)?; } for def in module.struct_defs() { - if let StructFieldInformation::Declared(fields) = &def.field_information { - for field in fields { - check_ident_complexity(module, &mut meter, field.name)?; - check_sigtok_complexity(module, &mut meter, &field.signature.0)? - } + match &def.field_information { + StructFieldInformation::Native => {}, + StructFieldInformation::Declared(fields) => { + for field in fields { + check_ident_complexity(module, &mut meter, field.name)?; + check_sigtok_complexity(module, &mut meter, &field.signature.0)? + } + }, + StructFieldInformation::DeclaredVariants(variants) => { + for variant in variants { + check_ident_complexity(module, &mut meter, variant.name)?; + for field in &variant.fields { + check_ident_complexity(module, &mut meter, field.name)?; + check_sigtok_complexity(module, &mut meter, &field.signature.0)? + } + } + }, } } for def in module.function_defs() { diff --git a/aptos-move/move-examples/large_packages/README.md b/aptos-move/move-examples/large_packages/README.md index 2b0dc07b17f..d4a922aa421 100644 --- a/aptos-move/move-examples/large_packages/README.md +++ b/aptos-move/move-examples/large_packages/README.md @@ -1,14 +1,43 @@ -This package provides an experimental service for uploading very large modules to the Aptos network. To publish using this API, you must divide your metadata and modules across multiple calls into `large_packages::stage_code`. Specifically: +# Aptos Large Packages Framework -* Make sure LargePackages is deployed to your network of choice, you can currently find it on testnet at `0xd20f305e3090a24c00524604dc2a42925a75c67aa6020d33033d516cf0878c4a` -* Compile your package -* Chunk up the metadata and modules and call `large_packages::stage_code` -* In your last call to `large_packages::stage_code` set `publish` to `true` +This module provides a framework for uploading large packages to the Aptos network, under standard +accounts or objects. +To publish using this API, you must divide your metadata and modules across multiple calls +into `large_packages::stage_code_chunk`. +In each pass, the caller pushes more code by calling `stage_code_chunk`. +In the final call, the caller can use `stage_code_chunk_and_publish_to_account`, `stage_code_chunk_and_publish_to_object`, or +`stage_code_chunk_and_upgrade_object_code` to upload the final data chunk and publish or upgrade the package on-chain. -The above logic is currently implemented in the Python SDK: `aptos-core/ecosystem/python/sdk/aptos_sdk/package_publisher.py` +The above logic is currently implemented in the Python +SDK: [`aptos-python-sdk`](https://github.com/aptos-labs/aptos-python-sdk/blob/main/aptos_sdk/package_publisher.py). -For validation purposes, this contains a package, `large_package_example` that exceeds the requirements for publishing in a single transaction. +Aptos CLI supports this as well with `--chunked-publish` flag: +- `aptos move publish [OPTIONS] --chunked-publish` +- `aptos move create-object-and-publish-package [OPTIONS] --address-name --chunked-publish` +- `aptos move upgrade-object-package [OPTIONS] --address-name --chunked-publish` -This framework has some limitations: -* There is no consistency checking until the publishing attempt -* Module code is not split across chunks, so if a single module is too big, it won't work +# Usage + +1. **Stage Code Chunks**: + - Call `stage_code_chunk` with the appropriate metadata and code chunks. + - Ensure that `code_indices` are provided from `0` to `last_module_idx`, without any + gaps. + + +2. **Publish or Upgrade**: + - In order to upload the last data chunk and publish the package, call `stage_code_chunk_and_publish_to_account` or `stage_code_chunk_and_publish_to_object`. + + - For object code upgrades, call `stage_code_chunk_and_upgrade_object_code` with the argument `code_object` provided. + +3. **Cleanup**: + - In order to remove `StagingArea` resource from an account, call `cleanup_staging_area`. + +# Notes + +* Make sure LargePackages is deployed to your network of choice, you can currently find it both on + mainnet and testnet at `0xa29df848eebfe5d981f708c2a5b06d31af2be53bbd8ddc94c8523f4b903f7adb` +* Ensure that `code_indices` have no gaps. For example, if code_indices are + provided as [0, 1, 3] (skipping index 2), the inline function `assemble_module_code` will abort + since `StagingArea.last_module_idx` is set as the max value of the provided index + from `code_indices`, and `assemble_module_code` will lookup the `StagingArea.code` SmartTable from + 0 to `StagingArea.last_module_idx` in turn. diff --git a/aptos-move/move-examples/large_packages/large_package_example/sources/eight.move b/aptos-move/move-examples/large_packages/large_package_example/sources/eight.move index 8db4e0bc8da..f10fbe510af 100644 --- a/aptos-move/move-examples/large_packages/large_package_example/sources/eight.move +++ b/aptos-move/move-examples/large_packages/large_package_example/sources/eight.move @@ -9,7 +9,22 @@ /// Long winded text that goes on and on and on /// Long winded text that goes on and on and on module large_package_example::eight { - public fun long_function(_a_very_long_name: u8, _b_very_long_name: u8, _c_very_long_name: u8, _d_very_long_name: u8, _e_very_long_name: u8): address { + + struct State has key { + value: u64 + } + + public entry fun hello(s: &signer, value: u64) { + move_to(s, State { value }) + } + + public fun long_function( + _a_very_long_name: u8, + _b_very_long_name: u8, + _c_very_long_name: u8, + _d_very_long_name: u8, + _e_very_long_name: u8 + ): address { @0x1 } diff --git a/aptos-move/move-examples/large_packages/sources/large_packages.move b/aptos-move/move-examples/large_packages/sources/large_packages.move index 84ba2d49eec..cf3f4314cc8 100644 --- a/aptos-move/move-examples/large_packages/sources/large_packages.move +++ b/aptos-move/move-examples/large_packages/sources/large_packages.move @@ -1,30 +1,84 @@ -/// This provides a framework for uploading large packages. In each pass, the caller pushes more -/// code by calling `stage_code`. In the last call, the caller can set the optoinal `publish` and -/// the package will be published inline, saving an extra transaction and additional storage costs. -/// Currently this module does not support modules that are larger than 63KB as that is the maximum -/// that can fit within a transaction and this framework does not split up individual modules. +/// This provides a framework for uploading large packages to standard accounts or objects. +/// In each pass, the caller pushes more code by calling `stage_code_chunk`. +/// In the final call, the caller can use `stage_code_chunk_and_publish_to_account`, `stage_code_chunk_and_publish_to_object`, or +/// `stage_code_chunk_and_upgrade_object_code` to upload the final data chunk and publish or upgrade the package on-chain. +/// +/// Note that `code_indices` must not have gaps. For example, if `code_indices` are provided as [0, 1, 3] +/// (skipping index 2), the inline function `assemble_module_code` will abort. This is because `StagingArea.last_module_idx` +/// is set to the maximum value from `code_indices`. When `assemble_module_code` iterates over the range from 0 to +/// `StagingArea.last_module_idx`, it expects each index to be present in the `StagingArea.code` SmartTable. +/// Any missing index in this range will cause the function to fail. module large_packages::large_packages { use std::error; + use std::option::{Self, Option}; use std::signer; use std::vector; + use aptos_std::smart_table::{Self, SmartTable}; - use aptos_framework::code; + use aptos_framework::code::{Self, PackageRegistry}; + use aptos_framework::object::{Object}; + use aptos_framework::object_code_deployment; /// code_indices and code_chunks should be the same length. const ECODE_MISMATCH: u64 = 1; + /// Object reference should be provided when upgrading object code. + const EMISSING_OBJECT_REFERENCE: u64 = 2; - struct StagingArea has drop, key { + struct StagingArea has key { metadata_serialized: vector, - code: vector>, + code: SmartTable>, + last_module_idx: u64, } - public entry fun stage_code( + public entry fun stage_code_chunk( owner: &signer, - metadata_serialized: vector, + metadata_chunk: vector, + code_indices: vector, + code_chunks: vector>, + ) acquires StagingArea { + stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks); + } + + public entry fun stage_code_chunk_and_publish_to_account( + owner: &signer, + metadata_chunk: vector, + code_indices: vector, + code_chunks: vector>, + ) acquires StagingArea { + let staging_area = stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks); + publish_to_account(owner, staging_area); + cleanup_staging_area(owner); + } + + public entry fun stage_code_chunk_and_publish_to_object( + owner: &signer, + metadata_chunk: vector, + code_indices: vector, + code_chunks: vector>, + ) acquires StagingArea { + let staging_area = stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks); + publish_to_object(owner, staging_area); + cleanup_staging_area(owner); + } + + public entry fun stage_code_chunk_and_upgrade_object_code( + owner: &signer, + metadata_chunk: vector, code_indices: vector, code_chunks: vector>, - publish: bool, + code_object: Option>, ) acquires StagingArea { + let staging_area = stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks); + upgrade_object_code(owner, staging_area, option::extract(&mut code_object)); + cleanup_staging_area(owner); + } + + inline fun stage_code_chunk_internal( + owner: &signer, + metadata_chunk: vector, + code_indices: vector, + code_chunks: vector>, + ): &mut StagingArea acquires StagingArea { assert!( vector::length(&code_indices) == vector::length(&code_chunks), error::invalid_argument(ECODE_MISMATCH), @@ -34,42 +88,84 @@ module large_packages::large_packages { if (!exists(owner_address)) { move_to(owner, StagingArea { - metadata_serialized: vector::empty(), - code: vector::empty(), + metadata_serialized: vector[], + code: smart_table::new(), + last_module_idx: 0, }); }; let staging_area = borrow_global_mut(owner_address); - vector::append(&mut staging_area.metadata_serialized, metadata_serialized); - while (!vector::is_empty(&code_chunks)) { - let inner_code = vector::pop_back(&mut code_chunks); - let idx = (vector::pop_back(&mut code_indices) as u64); - while (vector::length(&staging_area.code) <= idx) { - vector::push_back(&mut staging_area.code, vector::empty()); + if (!vector::is_empty(&metadata_chunk)) { + vector::append(&mut staging_area.metadata_serialized, metadata_chunk); + }; + + let i = 0; + while (i < vector::length(&code_chunks)) { + let inner_code = *vector::borrow(&code_chunks, i); + let idx = (*vector::borrow(&code_indices, i) as u64); + + if (smart_table::contains(&staging_area.code, idx)) { + vector::append(smart_table::borrow_mut(&mut staging_area.code, idx), inner_code); + } else { + smart_table::add(&mut staging_area.code, idx, inner_code); + if (idx > staging_area.last_module_idx) { + staging_area.last_module_idx = idx; + } }; - let source_code = vector::borrow_mut(&mut staging_area.code, idx); - vector::append(source_code, inner_code) + i = i + 1; }; - let _ = staging_area; + staging_area + } - if (publish) { - publish_staged_code(owner, owner_address); - move_from(owner_address); - } + inline fun publish_to_account( + publisher: &signer, + staging_area: &mut StagingArea, + ) { + let code = assemble_module_code(staging_area); + code::publish_package_txn(publisher, staging_area.metadata_serialized, code); } - public entry fun cleanup(owner: &signer) acquires StagingArea { - move_from(signer::address_of(owner)); + inline fun publish_to_object( + publisher: &signer, + staging_area: &mut StagingArea, + ) { + let code = assemble_module_code(staging_area); + object_code_deployment::publish(publisher, staging_area.metadata_serialized, code); } - /// Publish code from staging area. - public entry fun publish_staged_code( + inline fun upgrade_object_code( publisher: &signer, - staging_area_address: address, - ) acquires StagingArea { - let staging_area = borrow_global_mut(staging_area_address); - code::publish_package_txn(publisher, staging_area.metadata_serialized, staging_area.code); + staging_area: &mut StagingArea, + code_object: Object, + ) { + let code = assemble_module_code(staging_area); + object_code_deployment::upgrade(publisher, staging_area.metadata_serialized, code, code_object); + } + + inline fun assemble_module_code( + staging_area: &mut StagingArea, + ): vector> { + let last_module_idx = staging_area.last_module_idx; + let code: vector> = vector[]; + let i: u64 = 0; + while (i <= last_module_idx) { + vector::push_back( + &mut code, + *smart_table::borrow(&staging_area.code, i) + ); + i = i + 1; + }; + code + } + + public entry fun cleanup_staging_area(owner: &signer) acquires StagingArea { + let StagingArea { + metadata_serialized: _, + code, + last_module_idx: _, + } = move_from(signer::address_of(owner)); + smart_table::destroy(code); } } diff --git a/aptos-node/src/lib.rs b/aptos-node/src/lib.rs index 4634c22c3ef..93acbfd7e7e 100644 --- a/aptos-node/src/lib.rs +++ b/aptos-node/src/lib.rs @@ -605,7 +605,7 @@ pub fn setup_environment_and_start_node( let mut admin_service = services::start_admin_service(&node_config); // Set up the storage database and any RocksDB checkpoints - let (db_rw, backup_service, genesis_waypoint, indexer_db_opt) = + let (db_rw, backup_service, genesis_waypoint, indexer_db_opt, update_receiver) = storage::initialize_database_and_checkpoints(&mut node_config)?; admin_service.set_aptos_db(db_rw.clone().into()); @@ -687,7 +687,13 @@ pub fn setup_environment_and_start_node( indexer_runtime, indexer_grpc_runtime, internal_indexer_db_runtime, - ) = services::bootstrap_api_and_indexer(&node_config, db_rw.clone(), chain_id, indexer_db_opt)?; + ) = services::bootstrap_api_and_indexer( + &node_config, + db_rw.clone(), + chain_id, + indexer_db_opt, + update_receiver, + )?; // Create mempool and get the consensus to mempool sender let (mempool_runtime, consensus_to_mempool_sender) = diff --git a/aptos-node/src/services.rs b/aptos-node/src/services.rs index a6b94bde33b..2a686806ae3 100644 --- a/aptos-node/src/services.rs +++ b/aptos-node/src/services.rs @@ -34,7 +34,10 @@ use aptos_types::{chain_id::ChainId, indexer::indexer_db_reader::IndexerReader}; use aptos_validator_transaction_pool::VTxnPoolState; use futures::channel::{mpsc, mpsc::Sender}; use std::{sync::Arc, time::Instant}; -use tokio::runtime::{Handle, Runtime}; +use tokio::{ + runtime::{Handle, Runtime}, + sync::watch::Receiver as WatchReceiver, +}; const AC_SMP_CHANNEL_BUFFER_SIZE: usize = 1_024; const INTRA_NODE_CHANNEL_BUFFER_SIZE: usize = 1; @@ -46,6 +49,7 @@ pub fn bootstrap_api_and_indexer( db_rw: DbReaderWriter, chain_id: ChainId, internal_indexer_db: Option, + update_receiver: Option>, ) -> anyhow::Result<( Receiver, Option, @@ -68,11 +72,15 @@ pub fn bootstrap_api_and_indexer( None => (None, None), }; - let (db_indexer_runtime, txn_event_reader) = - match bootstrap_internal_indexer_db(node_config, db_rw.clone(), internal_indexer_db) { - Some((runtime, db_indexer)) => (Some(runtime), Some(db_indexer)), - None => (None, None), - }; + let (db_indexer_runtime, txn_event_reader) = match bootstrap_internal_indexer_db( + node_config, + db_rw.clone(), + internal_indexer_db, + update_receiver, + ) { + Some((runtime, db_indexer)) => (Some(runtime), Some(db_indexer)), + None => (None, None), + }; let indexer_readers = IndexerReaders::new(indexer_async_v2, txn_event_reader); diff --git a/aptos-node/src/storage.rs b/aptos-node/src/storage.rs index 0089a7961b2..8ee67228fe7 100644 --- a/aptos-node/src/storage.rs +++ b/aptos-node/src/storage.rs @@ -10,11 +10,16 @@ use aptos_executor::db_bootstrapper::maybe_bootstrap; use aptos_indexer_grpc_table_info::internal_indexer_db_service::InternalIndexerDBService; use aptos_logger::{debug, info}; use aptos_storage_interface::{DbReader, DbReaderWriter}; -use aptos_types::{ledger_info::LedgerInfoWithSignatures, waypoint::Waypoint}; +use aptos_types::{ + ledger_info::LedgerInfoWithSignatures, transaction::Version, waypoint::Waypoint, +}; use aptos_vm::AptosVM; use either::Either; use std::{fs, path::Path, sync::Arc, time::Instant}; -use tokio::runtime::Runtime; +use tokio::{ + runtime::Runtime, + sync::watch::{channel, Receiver as WatchReceiver}, +}; pub(crate) fn maybe_apply_genesis( db_rw: &DbReaderWriter, @@ -45,46 +50,60 @@ pub(crate) fn bootstrap_db( DbReaderWriter, Option, Option, + Option>, )> { let internal_indexer_db = InternalIndexerDBService::get_indexer_db(node_config); - let (aptos_db_reader, db_rw, backup_service) = - match FastSyncStorageWrapper::initialize_dbs(node_config, internal_indexer_db.clone())? { - Either::Left(db) => { - let (db_arc, db_rw) = DbReaderWriter::wrap(db); - let db_backup_service = start_backup_service( - node_config.storage.backup_service_address, - db_arc.clone(), - ); - maybe_apply_genesis(&db_rw, node_config)?; - (db_arc as Arc, db_rw, Some(db_backup_service)) - }, - Either::Right(fast_sync_db_wrapper) => { - let temp_db = fast_sync_db_wrapper.get_temporary_db_with_genesis(); - maybe_apply_genesis(&DbReaderWriter::from_arc(temp_db), node_config)?; - let (db_arc, db_rw) = DbReaderWriter::wrap(fast_sync_db_wrapper); - let fast_sync_db = db_arc.get_fast_sync_db(); - // FastSyncDB requires ledger info at epoch 0 to establish provenance to genesis - let ledger_info = db_arc - .get_temporary_db_with_genesis() - .get_epoch_ending_ledger_info(0) - .expect("Genesis ledger info must exist"); - - if fast_sync_db - .get_latest_ledger_info_option() - .expect("should returns Ok results") - .is_none() - { - // it means the DB is empty and we need to - // commit the genesis ledger info to the DB. - fast_sync_db.commit_genesis_ledger_info(&ledger_info)?; - } - - let db_backup_service = - start_backup_service(node_config.storage.backup_service_address, fast_sync_db); - (db_arc as Arc, db_rw, Some(db_backup_service)) - }, - }; - Ok((aptos_db_reader, db_rw, backup_service, internal_indexer_db)) + let (update_sender, update_receiver) = if internal_indexer_db.is_some() { + let (sender, receiver) = channel::(0); + (Some(sender), Some(receiver)) + } else { + (None, None) + }; + + let (aptos_db_reader, db_rw, backup_service) = match FastSyncStorageWrapper::initialize_dbs( + node_config, + internal_indexer_db.clone(), + update_sender, + )? { + Either::Left(db) => { + let (db_arc, db_rw) = DbReaderWriter::wrap(db); + let db_backup_service = + start_backup_service(node_config.storage.backup_service_address, db_arc.clone()); + maybe_apply_genesis(&db_rw, node_config)?; + (db_arc as Arc, db_rw, Some(db_backup_service)) + }, + Either::Right(fast_sync_db_wrapper) => { + let temp_db = fast_sync_db_wrapper.get_temporary_db_with_genesis(); + maybe_apply_genesis(&DbReaderWriter::from_arc(temp_db), node_config)?; + let (db_arc, db_rw) = DbReaderWriter::wrap(fast_sync_db_wrapper); + let fast_sync_db = db_arc.get_fast_sync_db(); + // FastSyncDB requires ledger info at epoch 0 to establish provenance to genesis + let ledger_info = db_arc + .get_temporary_db_with_genesis() + .get_epoch_ending_ledger_info(0) + .expect("Genesis ledger info must exist"); + + if fast_sync_db + .get_latest_ledger_info_option() + .expect("should returns Ok results") + .is_none() + { + // it means the DB is empty and we need to + // commit the genesis ledger info to the DB. + fast_sync_db.commit_genesis_ledger_info(&ledger_info)?; + } + let db_backup_service = + start_backup_service(node_config.storage.backup_service_address, fast_sync_db); + (db_arc as Arc, db_rw, Some(db_backup_service)) + }, + }; + Ok(( + aptos_db_reader, + db_rw, + backup_service, + internal_indexer_db, + update_receiver, + )) } /// In consensus-only mode, return a in-memory based [FakeAptosDB] and @@ -157,6 +176,7 @@ pub fn initialize_database_and_checkpoints( Option, Waypoint, Option, + Option>, )> { // If required, create RocksDB checkpoints and change the working directory. // This is test-only. @@ -166,7 +186,8 @@ pub fn initialize_database_and_checkpoints( // Open the database let instant = Instant::now(); - let (_aptos_db, db_rw, backup_service, indexer_db_opt) = bootstrap_db(node_config)?; + let (_aptos_db, db_rw, backup_service, indexer_db_opt, update_receiver) = + bootstrap_db(node_config)?; // Log the duration to open storage debug!( @@ -179,5 +200,6 @@ pub fn initialize_database_and_checkpoints( backup_service, node_config.base.waypoint.genesis_waypoint(), indexer_db_opt, + update_receiver, )) } diff --git a/buildtools/packer/aws-ubuntu.pkr.hcl b/buildtools/packer/aws-ubuntu.pkr.hcl index 2a7b64c17a6..ca98235c1f9 100644 --- a/buildtools/packer/aws-ubuntu.pkr.hcl +++ b/buildtools/packer/aws-ubuntu.pkr.hcl @@ -59,6 +59,16 @@ build { "whoami", "chmod +x /tmp/scripts/dev_setup.sh", "sudo -u runner /tmp/scripts/dev_setup.sh -b -r -y -P -J -t -k", + + // Install GCloud SDK and kubectl + "sudo apt-get install apt-transport-https ca-certificates gnupg -y", + "echo 'deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main' | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list", + "curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key --keyring /usr/share/keyrings/cloud.google.gpg add -", + "sudo apt-get update && sudo apt-get install google-cloud-sdk kubectl google-cloud-sdk-gke-gcloud-auth-plugin -y", + // Verify installations + "gcloud --version", + "kubectl version --client", + "gke-gcloud-auth-plugin --version" ] } } diff --git a/config/src/config/consensus_config.rs b/config/src/config/consensus_config.rs index bc45494f2de..021edf0b365 100644 --- a/config/src/config/consensus_config.rs +++ b/config/src/config/consensus_config.rs @@ -89,50 +89,15 @@ pub struct ConsensusConfig { pub rand_rb_config: ReliableBroadcastConfig, pub num_bounded_executor_tasks: u64, pub enable_pre_commit: bool, + + pub max_pending_rounds_in_commit_vote_cache: u64, } +/// Deprecated #[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] pub enum QcAggregatorType { #[default] NoDelay, - Delayed(DelayedQcAggregatorConfig), -} - -impl QcAggregatorType { - pub fn default_delayed() -> Self { - // TODO: Enable the delayed aggregation by default once we have tested it more. - Self::Delayed(DelayedQcAggregatorConfig::default()) - } -} - -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -pub struct DelayedQcAggregatorConfig { - // Maximum Delay for a QC to be aggregated after round start (in milliseconds). This assumes that - // we have enough voting power to form a QC. If we don't have enough voting power, we will wait - // until we have enough voting power to form a QC. - pub max_delay_after_round_start_ms: u64, - // Percentage of aggregated voting power to wait for before aggregating a QC. For example, if this - // is set to 95% then, a QC is formed as soon as we have 95% of the voting power aggregated without - // any additional waiting. - pub aggregated_voting_power_pct_to_wait: usize, - // This knob control what is the % of the time (as compared to time between round start and time when we - // have enough voting power to form a QC) we wait after we have enough voting power to form a QC. In a sense, - // this knobs controls how much slower we are willing to make consensus to wait for more votes. - pub pct_delay_after_qc_aggregated: usize, - // In summary, let's denote the time we have enough voting power (2f + 1) to form a QC as T1 and - // the time we have aggregated `aggregated_voting_power_pct_to_wait` as T2. Then, we wait for - // min((T1 + `pct_delay_after_qc_aggregated` * T1 / 100), `max_delay_after_round_start_ms`, T2) - // before forming a QC. -} - -impl Default for DelayedQcAggregatorConfig { - fn default() -> Self { - Self { - max_delay_after_round_start_ms: 700, - aggregated_voting_power_pct_to_wait: 90, - pct_delay_after_qc_aggregated: 30, - } - } } /// Execution backpressure which handles gas/s variance, @@ -354,6 +319,7 @@ impl Default for ConsensusConfig { }, num_bounded_executor_tasks: 16, enable_pre_commit: true, + max_pending_rounds_in_commit_vote_cache: 100, } } } diff --git a/config/src/config/consensus_observer_config.rs b/config/src/config/consensus_observer_config.rs index 8d930cf17c8..02d85721349 100644 --- a/config/src/config/consensus_observer_config.rs +++ b/config/src/config/consensus_observer_config.rs @@ -9,8 +9,8 @@ use serde::{Deserialize, Serialize}; use serde_yaml::Value; // Useful constants for enabling consensus observer on different node types -const ENABLE_ON_VALIDATORS: bool = false; -const ENABLE_ON_VALIDATOR_FULLNODES: bool = false; +const ENABLE_ON_VALIDATORS: bool = true; +const ENABLE_ON_VALIDATOR_FULLNODES: bool = true; const ENABLE_ON_PUBLIC_FULLNODES: bool = false; #[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)] @@ -30,6 +30,8 @@ pub struct ConsensusObserverConfig { /// Interval (in milliseconds) to garbage collect peer state pub garbage_collection_interval_ms: u64, + /// The maximum number of concurrent subscriptions + pub max_concurrent_subscriptions: u64, /// Maximum number of blocks to keep in memory (e.g., pending blocks, ordered blocks, etc.) pub max_num_pending_blocks: u64, /// Maximum timeout (in milliseconds) for active subscriptions @@ -52,8 +54,9 @@ impl Default for ConsensusObserverConfig { publisher_enabled: false, max_network_channel_size: 1000, max_parallel_serialization_tasks: num_cpus::get(), // Default to the number of CPUs - network_request_timeout_ms: 10_000, // 10 seconds + network_request_timeout_ms: 5_000, // 5 seconds garbage_collection_interval_ms: 60_000, // 60 seconds + max_concurrent_subscriptions: 2, // 2 streams should be sufficient max_num_pending_blocks: 100, // 100 blocks max_subscription_timeout_ms: 30_000, // 30 seconds max_synced_version_timeout_ms: 60_000, // 60 seconds diff --git a/config/src/config/gas_estimation_config.rs b/config/src/config/gas_estimation_config.rs index 763ac1b9474..00d943e9c16 100644 --- a/config/src/config/gas_estimation_config.rs +++ b/config/src/config/gas_estimation_config.rs @@ -32,6 +32,8 @@ pub struct GasEstimationConfig { pub aggressive_block_history: usize, /// Time after write when previous value is returned without recomputing pub cache_expiration_ms: u64, + /// Whether to account which TransactionShufflerType is used onchain, and how it affects gas estimation + pub incorporate_reordering_effects: bool, } impl Default for GasEstimationConfig { @@ -44,6 +46,7 @@ impl Default for GasEstimationConfig { market_block_history: 30, aggressive_block_history: 120, cache_expiration_ms: 500, + incorporate_reordering_effects: true, } } } diff --git a/config/src/config/indexer_grpc_config.rs b/config/src/config/indexer_grpc_config.rs index ed086009ce6..6dcb566194f 100644 --- a/config/src/config/indexer_grpc_config.rs +++ b/config/src/config/indexer_grpc_config.rs @@ -98,7 +98,7 @@ impl ConfigSanitizer for IndexerGrpcConfig { { return Err(Error::ConfigSanitizerFailed( sanitizer_name, - "storage.enable_indexer or indexer_table_info.enabled must be true if indexer_grpc.enabled is true".to_string(), + "storage.enable_indexer must be true or indexer_table_info.table_info_service_mode must be IndexingOnly if indexer_grpc.enabled is true".to_string(), )); } Ok(()) diff --git a/consensus/consensus-types/src/block_test.rs b/consensus/consensus-types/src/block_test.rs index bc33ddec8bc..54ece0539e2 100644 --- a/consensus/consensus-types/src/block_test.rs +++ b/consensus/consensus-types/src/block_test.rs @@ -17,7 +17,7 @@ use aptos_types::{ account_address::AccountAddress, aggregate_signature::PartialSignatures, block_info::{BlockInfo, Round}, - ledger_info::{LedgerInfo, LedgerInfoWithPartialSignatures}, + ledger_info::{LedgerInfo, LedgerInfoWithVerifiedSignatures}, on_chain_config::ValidatorSet, validator_signer::ValidatorSigner, validator_verifier::{random_validator_verifier, ValidatorVerifier}, @@ -131,7 +131,7 @@ fn test_same_qc_different_authors() { .unwrap(); let signature = signer.sign(genesis_qc.ledger_info().ledger_info()).unwrap(); - let mut ledger_info_altered = LedgerInfoWithPartialSignatures::new( + let mut ledger_info_altered = LedgerInfoWithVerifiedSignatures::new( genesis_qc.ledger_info().ledger_info().clone(), PartialSignatures::empty(), ); @@ -201,7 +201,7 @@ fn test_block_metadata_bitvec() { ); let mut ledger_info_1 = - LedgerInfoWithPartialSignatures::new(ledger_info.clone(), PartialSignatures::empty()); + LedgerInfoWithVerifiedSignatures::new(ledger_info.clone(), PartialSignatures::empty()); let votes_1 = vec![true, false, true, true]; votes_1 .iter() diff --git a/consensus/consensus-types/src/delayed_qc_msg.rs b/consensus/consensus-types/src/delayed_qc_msg.rs deleted file mode 100644 index 75d9752c2ea..00000000000 --- a/consensus/consensus-types/src/delayed_qc_msg.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright © Aptos Foundation -// Parts of the project are originally copyright © Meta Platforms, Inc. -// SPDX-License-Identifier: Apache-2.0 - -use crate::vote::Vote; -use serde::{Deserialize, Serialize}; -use std::fmt::{Display, Formatter}; - -/// DelayedQCMsg is the struct that is sent by the proposer to self when it receives enough votes -/// for a QC but it still delays the creation of the QC to ensure that slow nodes are given enough -/// time to catch up to the chain and cast their votes. -#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] -pub struct DelayedQcMsg { - /// Vote data for the QC that is being delayed. - pub vote: Vote, -} - -impl Display for DelayedQcMsg { - fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - write!(f, "DelayedQcMsg: vote [{}]", self.vote,) - } -} - -impl DelayedQcMsg { - pub fn new(vote: Vote) -> Self { - Self { vote } - } - - pub fn vote(&self) -> &Vote { - &self.vote - } -} diff --git a/consensus/consensus-types/src/lib.rs b/consensus/consensus-types/src/lib.rs index c9e555da27a..bc70a1ad942 100644 --- a/consensus/consensus-types/src/lib.rs +++ b/consensus/consensus-types/src/lib.rs @@ -8,7 +8,6 @@ pub mod block; pub mod block_data; pub mod block_retrieval; pub mod common; -pub mod delayed_qc_msg; pub mod epoch_retrieval; pub mod order_vote; pub mod order_vote_msg; diff --git a/consensus/consensus-types/src/timeout_2chain.rs b/consensus/consensus-types/src/timeout_2chain.rs index c0d62edc6ff..7c4e9e0993e 100644 --- a/consensus/consensus-types/src/timeout_2chain.rs +++ b/consensus/consensus-types/src/timeout_2chain.rs @@ -266,7 +266,7 @@ impl TwoChainTimeoutWithPartialSignatures { let (partial_sig, ordered_rounds) = self .signatures .get_partial_sig_with_rounds(verifier.address_to_validator_index()); - let aggregated_sig = verifier.aggregate_signatures(&partial_sig)?; + let aggregated_sig = verifier.aggregate_signatures(partial_sig.signatures_iter())?; Ok(TwoChainTimeoutCertificate { timeout: self.timeout.clone(), signatures_with_rounds: AggregateSignatureWithRounds::new( @@ -406,7 +406,7 @@ mod tests { use aptos_types::{ aggregate_signature::PartialSignatures, block_info::BlockInfo, - ledger_info::{LedgerInfo, LedgerInfoWithPartialSignatures}, + ledger_info::{LedgerInfo, LedgerInfoWithVerifiedSignatures}, validator_verifier::random_validator_verifier, }; @@ -415,7 +415,7 @@ mod tests { let quorum_size = validators.quorum_voting_power() as usize; let generate_quorum = |round, num_of_signature| { let vote_data = VoteData::new(BlockInfo::random(round), BlockInfo::random(0)); - let mut ledger_info = LedgerInfoWithPartialSignatures::new( + let mut ledger_info = LedgerInfoWithVerifiedSignatures::new( LedgerInfo::new(BlockInfo::empty(), vote_data.hash()), PartialSignatures::empty(), ); diff --git a/consensus/consensus-types/src/wrapped_ledger_info.rs b/consensus/consensus-types/src/wrapped_ledger_info.rs index 6125f85ca2c..ee254af1730 100644 --- a/consensus/consensus-types/src/wrapped_ledger_info.rs +++ b/consensus/consensus-types/src/wrapped_ledger_info.rs @@ -77,6 +77,10 @@ impl WrappedLedgerInfo { &self.signed_ledger_info } + pub fn epoch(&self) -> u64 { + self.ledger_info().ledger_info().epoch() + } + pub fn commit_info(&self) -> &BlockInfo { self.ledger_info().ledger_info().commit_info() } diff --git a/consensus/safety-rules/src/test_utils.rs b/consensus/safety-rules/src/test_utils.rs index 07b9159c66a..ce161c0a5fb 100644 --- a/consensus/safety-rules/src/test_utils.rs +++ b/consensus/safety-rules/src/test_utils.rs @@ -24,7 +24,7 @@ use aptos_types::{ block_info::BlockInfo, epoch_change::EpochChangeProof, epoch_state::EpochState, - ledger_info::{LedgerInfo, LedgerInfoWithPartialSignatures, LedgerInfoWithSignatures}, + ledger_info::{LedgerInfo, LedgerInfoWithSignatures, LedgerInfoWithVerifiedSignatures}, on_chain_config::ValidatorSet, proof::AccumulatorExtensionProof, validator_info::ValidatorInfo, @@ -168,7 +168,7 @@ pub fn make_proposal_with_parent_and_overrides( ) .unwrap(); - let mut ledger_info_with_signatures = LedgerInfoWithPartialSignatures::new( + let mut ledger_info_with_signatures = LedgerInfoWithVerifiedSignatures::new( vote.ledger_info().clone(), PartialSignatures::empty(), ); diff --git a/consensus/src/block_storage/block_store_test.rs b/consensus/src/block_storage/block_store_test.rs index 7328688f2f4..41def8f1c32 100644 --- a/consensus/src/block_storage/block_store_test.rs +++ b/consensus/src/block_storage/block_store_test.rs @@ -8,9 +8,7 @@ use crate::{ test_utils::{ build_empty_tree, build_simple_tree, consensus_runtime, timed_block_on, TreeInserter, }, - util::mock_time_service::SimulatedTimeService, }; -use aptos_config::config::QcAggregatorType; use aptos_consensus_types::{ block::{ block_test_utils::{ @@ -27,9 +25,8 @@ use aptos_crypto::{HashValue, PrivateKey}; use aptos_types::{ validator_signer::ValidatorSigner, validator_verifier::random_validator_verifier, }; -use futures_channel::mpsc::unbounded; use proptest::prelude::*; -use std::{cmp::min, collections::HashSet, sync::Arc}; +use std::{cmp::min, collections::HashSet}; #[tokio::test] async fn test_highest_block_and_quorum_cert() { @@ -284,11 +281,8 @@ async fn test_insert_vote() { let block = inserter .insert_block_with_qc(certificate_for_genesis(), &genesis, 1) .await; - let time_service = Arc::new(SimulatedTimeService::new()); - let (delayed_qc_tx, _) = unbounded(); - let mut pending_votes = - PendingVotes::new(time_service, delayed_qc_tx, QcAggregatorType::NoDelay); + let mut pending_votes = PendingVotes::new(); assert!(block_store.get_quorum_cert_for_block(block.id()).is_none()); for (i, voter) in signers.iter().enumerate().take(10).skip(1) { diff --git a/consensus/src/consensus_observer/common/error.rs b/consensus/src/consensus_observer/common/error.rs index 37a516d1011..7fc6a78785a 100644 --- a/consensus/src/consensus_observer/common/error.rs +++ b/consensus/src/consensus_observer/common/error.rs @@ -21,6 +21,9 @@ pub enum Error { #[error("Subscription progress stopped: {0}")] SubscriptionProgressStopped(String), + #[error("Subscriptions reset: {0}")] + SubscriptionsReset(String), + #[error("Subscription suboptimal: {0}")] SubscriptionSuboptimal(String), @@ -40,6 +43,7 @@ impl Error { Self::RpcError(_) => "rpc_error", Self::SubscriptionDisconnected(_) => "subscription_disconnected", Self::SubscriptionProgressStopped(_) => "subscription_progress_stopped", + Self::SubscriptionsReset(_) => "subscriptions_reset", Self::SubscriptionSuboptimal(_) => "subscription_suboptimal", Self::SubscriptionTimeout(_) => "subscription_timeout", Self::UnexpectedError(_) => "unexpected_error", diff --git a/consensus/src/consensus_observer/common/metrics.rs b/consensus/src/consensus_observer/common/metrics.rs index 8cf8144d25a..5888bbfcaca 100644 --- a/consensus/src/consensus_observer/common/metrics.rs +++ b/consensus/src/consensus_observer/common/metrics.rs @@ -5,17 +5,18 @@ use aptos_config::network_id::{NetworkId, PeerNetworkId}; use aptos_metrics_core::{ - register_histogram_vec, register_int_counter_vec, register_int_gauge_vec, HistogramVec, - IntCounterVec, IntGaugeVec, + register_histogram_vec, register_int_counter, register_int_counter_vec, register_int_gauge_vec, + HistogramVec, IntCounter, IntCounterVec, IntGaugeVec, }; use once_cell::sync::Lazy; // Useful metric labels pub const BLOCK_PAYLOAD_LABEL: &str = "block_payload"; pub const COMMIT_DECISION_LABEL: &str = "commit_decision"; +pub const COMMITTED_BLOCKS_LABEL: &str = "committed_blocks"; pub const CREATED_SUBSCRIPTION_LABEL: &str = "created_subscription"; pub const ORDERED_BLOCK_ENTRIES_LABEL: &str = "ordered_block_entries"; -pub const ORDERED_BLOCKS_LABEL: &str = "ordered_blocks"; +pub const ORDERED_BLOCK_LABEL: &str = "ordered_block"; pub const PENDING_BLOCK_ENTRIES_LABEL: &str = "pending_block_entries"; pub const PENDING_BLOCKS_LABEL: &str = "pending_blocks"; pub const STORED_PAYLOADS_LABEL: &str = "stored_payloads"; @@ -30,6 +31,34 @@ pub static OBSERVER_CREATED_SUBSCRIPTIONS: Lazy = Lazy::new(|| { .unwrap() }); +/// Counter for tracking the number of times the block state was cleared by the consensus observer +pub static OBSERVER_CLEARED_BLOCK_STATE: Lazy = Lazy::new(|| { + register_int_counter!( + "consensus_observer_cleared_block_state", + "Counter for tracking the number of times the block state was cleared by the consensus observer", + ).unwrap() +}); + +/// Counter for tracking dropped (direct send) messages by the consensus observer +pub static OBSERVER_DROPPED_MESSAGES: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "consensus_observer_dropped_messages", + "Counters related to dropped (direct send) messages by the consensus observer", + &["message_type", "network_id"] + ) + .unwrap() +}); + +/// Counter for tracking rejected (direct send) messages by the consensus observer +pub static OBSERVER_REJECTED_MESSAGES: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "consensus_observer_rejected_messages", + "Counters related to rejected (direct send) messages by the consensus observer", + &["message_type", "network_id"] + ) + .unwrap() +}); + /// Gauge for tracking the number of active subscriptions for the consensus observer pub static OBSERVER_NUM_ACTIVE_SUBSCRIPTIONS: Lazy = Lazy::new(|| { register_int_gauge_vec!( @@ -180,8 +209,8 @@ pub static PUBLISHER_SENT_MESSAGES: Lazy = Lazy::new(|| { .unwrap() }); -/// Increments the given request counter with the provided values -pub fn increment_request_counter( +/// Increments the given counter with the provided values +pub fn increment_counter( counter: &Lazy, label: &str, peer_network_id: &PeerNetworkId, @@ -192,6 +221,11 @@ pub fn increment_request_counter( .inc(); } +/// Increments the given counter without labels +pub fn increment_counter_without_labels(counter: &Lazy) { + counter.inc(); +} + /// Observes the value for the provided histogram and label pub fn observe_value_with_label( histogram: &Lazy, diff --git a/consensus/src/consensus_observer/network/network_handler.rs b/consensus/src/consensus_observer/network/network_handler.rs index d8aa1447312..bbaeca0dc48 100644 --- a/consensus/src/consensus_observer/network/network_handler.rs +++ b/consensus/src/consensus_observer/network/network_handler.rs @@ -208,7 +208,7 @@ impl ConsensusObserverNetworkHandler { None => { error!( LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Missing response sender for RCP request: {:?}", + "Missing response sender for the RPC request: {:?}", request )) ); diff --git a/consensus/src/consensus_observer/network/observer_client.rs b/consensus/src/consensus_observer/network/observer_client.rs index a2f94ff4452..33c4ce902af 100644 --- a/consensus/src/consensus_observer/network/observer_client.rs +++ b/consensus/src/consensus_observer/network/observer_client.rs @@ -46,7 +46,7 @@ impl> message_label: &str, ) -> Result<(), Error> { // Increment the message counter - metrics::increment_request_counter( + metrics::increment_counter( &metrics::PUBLISHER_SENT_MESSAGES, message_label, peer_network_id, @@ -74,7 +74,7 @@ impl> .message(&format!("Failed to send message: {:?}", error))); // Update the direct send error metrics - metrics::increment_request_counter( + metrics::increment_counter( &metrics::PUBLISHER_SENT_MESSAGE_ERRORS, error.get_label(), peer_network_id, @@ -125,7 +125,7 @@ impl> .message(&format!("Failed to serialize message: {:?}", error))); // Update the direct send error metrics - metrics::increment_request_counter( + metrics::increment_counter( &metrics::PUBLISHER_SENT_MESSAGE_ERRORS, error.get_label(), peer_network_id, @@ -147,7 +147,7 @@ impl> let request_id = rand::thread_rng().gen(); // Increment the request counter - metrics::increment_request_counter( + metrics::increment_counter( &metrics::OBSERVER_SENT_REQUESTS, request.get_label(), peer_network_id, @@ -174,7 +174,7 @@ impl> match result { Ok(consensus_observer_response) => { // Update the RPC success metrics - metrics::increment_request_counter( + metrics::increment_counter( &metrics::OBSERVER_RECEIVED_MESSAGE_RESPONSES, request_label, peer_network_id, @@ -192,7 +192,7 @@ impl> .error(&error)); // Update the RPC error metrics - metrics::increment_request_counter( + metrics::increment_counter( &metrics::OBSERVER_SENT_MESSAGE_ERRORS, error.get_label(), peer_network_id, diff --git a/consensus/src/consensus_observer/network/observer_message.rs b/consensus/src/consensus_observer/network/observer_message.rs index 6c68384cda3..1905d162b03 100644 --- a/consensus/src/consensus_observer/network/observer_message.rs +++ b/consensus/src/consensus_observer/network/observer_message.rs @@ -312,8 +312,8 @@ impl CommitDecision { /// The transaction payload and proof of each block #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct PayloadWithProof { - pub transactions: Vec, - pub proofs: Vec, + transactions: Vec, + proofs: Vec, } impl PayloadWithProof { @@ -337,8 +337,8 @@ impl PayloadWithProof { /// The transaction payload and proof of each block with a transaction limit #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct PayloadWithProofAndLimit { - pub payload_with_proof: PayloadWithProof, - pub transaction_limit: Option, + payload_with_proof: PayloadWithProof, + transaction_limit: Option, } impl PayloadWithProofAndLimit { @@ -629,8 +629,8 @@ impl BlockTransactionPayload { /// Payload message contains the block and transaction payload #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct BlockPayload { - pub block: BlockInfo, - pub transaction_payload: BlockTransactionPayload, + block: BlockInfo, + transaction_payload: BlockTransactionPayload, } impl BlockPayload { @@ -641,25 +641,73 @@ impl BlockPayload { } } + /// Returns a reference to the block info + pub fn block(&self) -> &BlockInfo { + &self.block + } + + /// Returns the epoch of the block info + pub fn epoch(&self) -> u64 { + self.block.epoch() + } + + /// Returns the round of the block info + pub fn round(&self) -> Round { + self.block.round() + } + + /// Returns a reference to the block transaction payload + pub fn transaction_payload(&self) -> &BlockTransactionPayload { + &self.transaction_payload + } + /// Verifies the block payload digests and returns an error if the data is invalid pub fn verify_payload_digests(&self) -> Result<(), Error> { - // Verify the proof of store digests against the transaction + // Get the block info, transactions, payload proofs and inline batches + let block_info = self.block.clone(); let transactions = self.transaction_payload.transactions(); + let payload_proofs = self.transaction_payload.payload_proofs(); + let inline_batches = self.transaction_payload.inline_batches(); + + // Get the number of transactions, payload proofs and inline batches + let num_transactions = transactions.len(); + let num_payload_proofs = payload_proofs.len(); + let num_inline_batches = inline_batches.len(); + + // Verify the payload proof digests using the transactions let mut transactions_iter = transactions.iter(); - for proof_of_store in &self.transaction_payload.payload_proofs() { - reconstruct_and_verify_batch(&mut transactions_iter, proof_of_store.info())?; + for proof_of_store in &payload_proofs { + reconstruct_and_verify_batch(&block_info, &mut transactions_iter, proof_of_store.info(), true).map_err( + |error| { + Error::InvalidMessageError(format!( + "Failed to verify payload proof digests! Num transactions: {:?}, \ + num batches: {:?}, num inline batches: {:?}, failed batch: {:?}, Error: {:?}", + num_transactions, num_payload_proofs, num_inline_batches, proof_of_store.info(), error + )) + }, + )?; } - // Verify the inline batch digests against the inline batches - for batch_info in self.transaction_payload.inline_batches() { - reconstruct_and_verify_batch(&mut transactions_iter, batch_info)?; + // Verify the inline batch digests using the transactions + for batch_info in inline_batches.into_iter() { + reconstruct_and_verify_batch(&block_info, &mut transactions_iter, batch_info, false).map_err( + |error| { + Error::InvalidMessageError(format!( + "Failed to verify inline batch digests! Num transactions: {:?}, \ + num batches: {:?}, num inline batches: {:?}, failed batch: {:?}, Error: {:?}", + num_transactions, num_payload_proofs, num_inline_batches, batch_info, error + )) + }, + )?; } - // Verify that there are no transactions remaining + // Verify that there are no transactions remaining (all transactions should be consumed) let remaining_transactions = transactions_iter.as_slice(); if !remaining_transactions.is_empty() { return Err(Error::InvalidMessageError(format!( - "Failed to verify payload transactions! Transactions remaining: {:?}. Expected: 0", + "Failed to verify payload transactions! Num transactions: {:?}, \ + transactions remaining: {:?}. Expected: 0", + num_transactions, remaining_transactions.len() ))); } @@ -691,12 +739,22 @@ impl BlockPayload { } } -/// Reconstructs and verifies the batch using the -/// given transactions and the expected batch info. +/// Reconstructs and verifies the batch using the given transactions +/// and the expected batch info. If `skip_expired_batches` is true +/// then verification will be skipped if the batch is expired. fn reconstruct_and_verify_batch( + block_info: &BlockInfo, transactions_iter: &mut Iter, expected_batch_info: &BatchInfo, + skip_expired_batches: bool, ) -> Result<(), Error> { + // If the batch is expired we should skip verification (as the + // transactions for the expired batch won't be sent in the payload). + // Note: this should only be required for QS batches (not inline batches). + if skip_expired_batches && block_info.timestamp_usecs() > expected_batch_info.expiration() { + return Ok(()); + } + // Gather the transactions for the batch let mut batch_transactions = vec![]; for i in 0..expected_batch_info.num_txns() { @@ -720,7 +778,7 @@ fn reconstruct_and_verify_batch( let expected_digest = expected_batch_info.digest(); if batch_digest != *expected_digest { return Err(Error::InvalidMessageError(format!( - "The reconstructed batch digest does not match the expected digest!\ + "The reconstructed batch digest does not match the expected digest! \ Batch: {:?}, Expected digest: {:?}, Reconstructed digest: {:?}", expected_batch_info, expected_digest, batch_digest ))); @@ -750,7 +808,7 @@ mod test { validator_verifier::{ValidatorConsensusInfo, ValidatorVerifier}, PeerId, }; - use claims::assert_matches; + use claims::{assert_matches, assert_ok}; use move_core_types::account_address::AccountAddress; #[test] @@ -1091,17 +1149,18 @@ mod test { let num_batches = num_signed_transactions - 1; let mut proofs = vec![]; for _ in 0..num_batches { - let batch_info = create_batch_info_with_digest(HashValue::random(), 1); + let batch_info = create_batch_info_with_digest(HashValue::random(), 1, 1000); let proof = ProofOfStore::new(batch_info, AggregateSignature::empty()); proofs.push(proof); } // Create a single inline batch with a random digest - let inline_batch = create_batch_info_with_digest(HashValue::random(), 1); + let inline_batch = create_batch_info_with_digest(HashValue::random(), 1, 1000); let inline_batches = vec![inline_batch]; // Create a block payload (with the transactions, proofs and inline batches) - let block_payload = create_block_payload(&signed_transactions, &proofs, &inline_batches); + let block_payload = + create_block_payload(None, &signed_transactions, &proofs, &inline_batches); // Verify the block payload digests and ensure it fails (the batch digests don't match) let error = block_payload.verify_payload_digests().unwrap_err(); @@ -1111,13 +1170,14 @@ mod test { let mut proofs = vec![]; for transaction in &signed_transactions[0..num_batches] { let batch_payload = BatchPayload::new(PeerId::ZERO, vec![transaction.clone()]); - let batch_info = create_batch_info_with_digest(batch_payload.hash(), 1); + let batch_info = create_batch_info_with_digest(batch_payload.hash(), 1, 1000); let proof = ProofOfStore::new(batch_info, AggregateSignature::empty()); proofs.push(proof); } // Create a block payload (with the transactions, correct proofs and inline batches) - let block_payload = create_block_payload(&signed_transactions, &proofs, &inline_batches); + let block_payload = + create_block_payload(None, &signed_transactions, &proofs, &inline_batches); // Verify the block payload digests and ensure it fails (the inline batch digests don't match) let error = block_payload.verify_payload_digests().unwrap_err(); @@ -1128,18 +1188,20 @@ mod test { .last() .unwrap() .clone()]); - let inline_batch_info = create_batch_info_with_digest(inline_batch_payload.hash(), 1); + let inline_batch_info = create_batch_info_with_digest(inline_batch_payload.hash(), 1, 1000); let inline_batches = vec![inline_batch_info]; // Create a block payload (with the transactions, correct proofs and correct inline batches) - let block_payload = create_block_payload(&signed_transactions, &proofs, &inline_batches); + let block_payload = + create_block_payload(None, &signed_transactions, &proofs, &inline_batches); // Verify the block payload digests and ensure it passes block_payload.verify_payload_digests().unwrap(); // Create a block payload (with too many transactions) signed_transactions.append(&mut create_signed_transactions(1)); - let block_payload = create_block_payload(&signed_transactions, &proofs, &inline_batches); + let block_payload = + create_block_payload(None, &signed_transactions, &proofs, &inline_batches); // Verify the block payload digests and ensure it fails (there are too many transactions) let error = block_payload.verify_payload_digests().unwrap_err(); @@ -1149,13 +1211,101 @@ mod test { for _ in 0..3 { signed_transactions.pop(); } - let block_payload = create_block_payload(&signed_transactions, &proofs, &inline_batches); + let block_payload = + create_block_payload(None, &signed_transactions, &proofs, &inline_batches); // Verify the block payload digests and ensure it fails (there are too few transactions) let error = block_payload.verify_payload_digests().unwrap_err(); assert_matches!(error, Error::InvalidMessageError(_)); } + #[test] + fn test_verify_payload_digests_expired() { + // Create a new block info with the specified timestamp + let block_timestamp = 1000; + let block_info = BlockInfo::new( + 0, + 0, + HashValue::random(), + HashValue::random(), + 0, + block_timestamp, + None, + ); + + // Create multiple signed transactions + let num_signed_transactions = 100; + let signed_transactions = create_signed_transactions(num_signed_transactions); + + // Create multiple batch proofs (where some batches are expired) + let (proofs, non_expired_transactions) = + create_mixed_expiration_proofs(block_timestamp, &signed_transactions); + + // Create a block payload (with non-expired transactions, all proofs and no inline batches) + let block_payload = create_block_payload( + Some(block_info.clone()), + &non_expired_transactions, + &proofs, + &[], + ); + + // Verify the block payload digests and ensure it passes + assert_ok!(block_payload.verify_payload_digests()); + + // Create multiple inline transactions + let num_inline_transactions = 25; + let inline_transactions = create_signed_transactions(num_inline_transactions); + + // Create multiple inline batches (where some batches are expired) + let (inline_batches, non_expired_inline_transactions) = + create_mixed_expiration_proofs(block_timestamp, &inline_transactions); + + // Create a block payload (with all non-expired inline transactions, no proofs and inline batches) + let inline_batches: Vec<_> = inline_batches + .iter() + .map(|proof| proof.info().clone()) + .collect(); + let block_payload = create_block_payload( + Some(block_info.clone()), + &non_expired_inline_transactions, + &[], + &inline_batches, + ); + + // Verify the block payload digests and ensure it fails (expired inline batches are still checked) + let error = block_payload.verify_payload_digests().unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Create a block payload (with all inline transactions, no proofs and inline batches) + let block_payload = create_block_payload( + Some(block_info.clone()), + &inline_transactions, + &[], + &inline_batches, + ); + + // Verify the block payload digests and ensure it now passes + assert_ok!(block_payload.verify_payload_digests()); + + // Gather all transactions (from both QS and inline batches) + let all_transactions: Vec<_> = non_expired_transactions + .iter() + .chain(inline_transactions.iter()) + .cloned() + .collect(); + + // Create a block payload (with all transactions, all proofs and inline batches) + let block_payload = create_block_payload( + Some(block_info), + &all_transactions, + &proofs, + &inline_batches, + ); + + // Verify the block payload digests and ensure it passes + assert_ok!(block_payload.verify_payload_digests()); + } + #[test] fn test_verify_payload_signatures() { // Create multiple batch info proofs (with empty signatures) @@ -1206,16 +1356,20 @@ mod test { /// Creates and returns a new batch info with random data fn create_batch_info() -> BatchInfo { - create_batch_info_with_digest(HashValue::random(), 0) + create_batch_info_with_digest(HashValue::random(), 0, 0) } - /// Creates and returns a new batch info with the specified digest - fn create_batch_info_with_digest(digest: HashValue, num_transactions: u64) -> BatchInfo { + /// Creates and returns a new batch info with the specified digest and properties + fn create_batch_info_with_digest( + digest: HashValue, + num_transactions: u64, + batch_expiration: u64, + ) -> BatchInfo { BatchInfo::new( PeerId::ZERO, BatchId::new(0), 10, - 1, + batch_expiration, digest, num_transactions, 1, @@ -1230,6 +1384,7 @@ mod test { /// Creates and returns a hybrid quorum store payload using the given data fn create_block_payload( + block_info: Option, signed_transactions: &[SignedTransaction], proofs: &[ProofOfStore], inline_batches: &[BatchInfo], @@ -1242,11 +1397,11 @@ mod test { inline_batches.to_vec(), ); + // Determine the block info to use + let block_info = block_info.unwrap_or_else(|| create_block_info(0, HashValue::random())); + // Create the block payload - BlockPayload::new( - create_block_info(0, HashValue::random()), - transaction_payload, - ) + BlockPayload::new(block_info, transaction_payload) } /// Creates and returns a new ledger info with an empty signature set @@ -1257,6 +1412,43 @@ mod test { ) } + /// Creates and returns a set of batch proofs using the given block + /// timestamp and transactions. Note: some batches will be expired. + fn create_mixed_expiration_proofs( + block_timestamp: u64, + signed_transactions: &[SignedTransaction], + ) -> (Vec, Vec) { + let mut proofs = vec![]; + let mut non_expired_transactions = vec![]; + + // Create multiple batch proofs (each batch has 1 transaction, and some batches are expired) + for (i, transaction) in signed_transactions.iter().enumerate() { + // Expire every other (odd) batch and transaction + let is_batch_expired = i % 2 != 0; + + // Determine the expiration time for the batch + let batch_expiration = if is_batch_expired { + block_timestamp - 1 // Older than the block timestamp + } else { + block_timestamp + 1 // Newer than the block timestamp + }; + + // Create and store the batch proof + let batch_payload = BatchPayload::new(PeerId::ZERO, vec![transaction.clone()]); + let batch_info = + create_batch_info_with_digest(batch_payload.hash(), 1, batch_expiration); + let proof = ProofOfStore::new(batch_info, AggregateSignature::empty()); + proofs.push(proof); + + // Save the non-expired transactions + if !is_batch_expired { + non_expired_transactions.push(transaction.clone()); + } + } + + (proofs, non_expired_transactions) + } + /// Creates and returns a new pipelined block with the given block info fn create_pipelined_block(block_info: BlockInfo) -> Arc { let block_data = BlockData::new_for_testing( diff --git a/consensus/src/consensus_observer/observer/active_state.rs b/consensus/src/consensus_observer/observer/active_state.rs index 73c03af670e..f162fab553e 100644 --- a/consensus/src/consensus_observer/observer/active_state.rs +++ b/consensus/src/consensus_observer/observer/active_state.rs @@ -101,8 +101,8 @@ impl ActiveObserverState { /// root ledger info and remove the blocks from the given stores. pub fn create_commit_callback( &self, - pending_ordered_blocks: OrderedBlockStore, - block_payload_store: BlockPayloadStore, + pending_ordered_blocks: Arc>, + block_payload_store: Arc>, ) -> StateComputerCommitCallBackType { // Clone the root pointer let root = self.root.clone(); @@ -243,7 +243,7 @@ async fn extract_on_chain_configs( let onchain_randomness_config_seq_num: anyhow::Result = on_chain_configs.get(); if let Err(error) = &onchain_randomness_config_seq_num { - error!( + warn!( LogSchema::new(LogEntry::ConsensusObserver).message(&format!( "Failed to read on-chain randomness config seq num! Error: {:?}", error @@ -282,15 +282,17 @@ async fn extract_on_chain_configs( /// A simple helper function that handles the committed blocks /// (as part of the commit callback). fn handle_committed_blocks( - pending_ordered_blocks: OrderedBlockStore, - block_payload_store: BlockPayloadStore, + pending_ordered_blocks: Arc>, + block_payload_store: Arc>, root: Arc>, blocks: &[Arc], ledger_info: LedgerInfoWithSignatures, ) { // Remove the committed blocks from the payload and pending stores - block_payload_store.remove_committed_blocks(blocks); - pending_ordered_blocks.remove_blocks_for_commit(&ledger_info); + block_payload_store.lock().remove_committed_blocks(blocks); + pending_ordered_blocks + .lock() + .remove_blocks_for_commit(&ledger_info); // Verify the ledger info is for the same epoch let mut root = root.lock(); @@ -407,8 +409,12 @@ mod test { let root = Arc::new(Mutex::new(create_ledger_info(epoch, round))); // Create the ordered block store and block payload store - let ordered_block_store = OrderedBlockStore::new(node_config.consensus_observer); - let mut block_payload_store = BlockPayloadStore::new(node_config.consensus_observer); + let ordered_block_store = Arc::new(Mutex::new(OrderedBlockStore::new( + node_config.consensus_observer, + ))); + let block_payload_store = Arc::new(Mutex::new(BlockPayloadStore::new( + node_config.consensus_observer, + ))); // Handle the committed blocks at the wrong epoch and verify the root is not updated handle_committed_blocks( @@ -432,12 +438,16 @@ mod test { // Add pending ordered blocks let num_ordered_blocks = 10; - let ordered_blocks = - create_and_add_ordered_blocks(&ordered_block_store, num_ordered_blocks, epoch, round); + let ordered_blocks = create_and_add_ordered_blocks( + ordered_block_store.clone(), + num_ordered_blocks, + epoch, + round, + ); // Add block payloads for the ordered blocks for ordered_block in &ordered_blocks { - create_and_add_payloads_for_ordered_block(&mut block_payload_store, ordered_block); + create_and_add_payloads_for_ordered_block(block_payload_store.clone(), ordered_block); } // Create the commit ledger info (for the second to last block) @@ -461,8 +471,11 @@ mod test { ); // Verify the committed blocks are removed from the stores - assert_eq!(ordered_block_store.get_all_ordered_blocks().len(), 1); - assert_eq!(block_payload_store.get_block_payloads().lock().len(), 1); + assert_eq!(ordered_block_store.lock().get_all_ordered_blocks().len(), 1); + assert_eq!( + block_payload_store.lock().get_block_payloads().lock().len(), + 1 + ); // Verify the root is updated assert_eq!(root.lock().clone(), committed_ledger_info); @@ -495,7 +508,7 @@ mod test { /// Creates and adds the specified number of ordered blocks to the ordered blocks fn create_and_add_ordered_blocks( - ordered_block_store: &OrderedBlockStore, + ordered_block_store: Arc>, num_ordered_blocks: usize, epoch: u64, starting_round: Round, @@ -532,7 +545,9 @@ mod test { let ordered_block = OrderedBlock::new(blocks, ordered_proof); // Insert the block into the ordered block store - ordered_block_store.insert_ordered_block(ordered_block.clone()); + ordered_block_store + .lock() + .insert_ordered_block(ordered_block.clone()); // Add the block to the ordered blocks ordered_blocks.push(ordered_block); @@ -543,13 +558,15 @@ mod test { /// Creates and adds payloads for the ordered block fn create_and_add_payloads_for_ordered_block( - block_payload_store: &mut BlockPayloadStore, + block_payload_store: Arc>, ordered_block: &OrderedBlock, ) { for block in ordered_block.blocks() { let block_payload = BlockPayload::new(block.block_info(), BlockTransactionPayload::empty()); - block_payload_store.insert_block_payload(block_payload, true); + block_payload_store + .lock() + .insert_block_payload(block_payload, true); } } diff --git a/consensus/src/consensus_observer/observer/consensus_observer.rs b/consensus/src/consensus_observer/observer/consensus_observer.rs index 250b338d233..032a3fa38f8 100644 --- a/consensus/src/consensus_observer/observer/consensus_observer.rs +++ b/consensus/src/consensus_observer/observer/consensus_observer.rs @@ -28,10 +28,14 @@ use crate::{ pipeline::execution_client::TExecutionClient, }; use aptos_channels::{aptos_channel, aptos_channel::Receiver, message_queues::QueueStyle}; -use aptos_config::config::{ConsensusObserverConfig, NodeConfig}; +use aptos_config::{ + config::{ConsensusObserverConfig, NodeConfig}, + network_id::PeerNetworkId, +}; use aptos_consensus_types::{pipeline, pipelined_block::PipelinedBlock}; use aptos_crypto::{bls12381, Genesis}; use aptos_event_notifications::{DbBackedOnChainConfig, ReconfigNotificationListener}; +use aptos_infallible::Mutex; use aptos_logger::{debug, error, info, warn}; use aptos_network::{ application::interface::NetworkClient, protocols::wire::handshake::v1::ProtocolId, @@ -63,13 +67,13 @@ pub struct ConsensusObserver { active_observer_state: ActiveObserverState, // The block payload store (containing the block transaction payloads) - block_payload_store: BlockPayloadStore, + block_payload_store: Arc>, // The ordered block store (containing ordered blocks that are ready for execution) - ordered_block_store: OrderedBlockStore, + ordered_block_store: Arc>, // The pending block store (containing pending blocks that are without payloads) - pending_block_store: PendingBlockStore, + pending_block_store: Arc>, // The execution client to the buffer manager execution_client: Arc, @@ -81,7 +85,7 @@ pub struct ConsensusObserver { // The flag indicates if we're waiting to transition to a new epoch. sync_handle: Option<(DropGuard, bool)>, - // The subscription manager + // The consensus observer subscription manager subscription_manager: SubscriptionManager, } @@ -116,12 +120,17 @@ impl ConsensusObserver { let active_observer_state = ActiveObserverState::new(node_config, db_reader, reconfig_events, consensus_publisher); + // Create the block and payload stores + let ordered_block_store = OrderedBlockStore::new(consensus_observer_config); + let block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let pending_block_store = PendingBlockStore::new(consensus_observer_config); + // Create the consensus observer Self { active_observer_state, - ordered_block_store: OrderedBlockStore::new(consensus_observer_config), - block_payload_store: BlockPayloadStore::new(consensus_observer_config), - pending_block_store: PendingBlockStore::new(consensus_observer_config), + ordered_block_store: Arc::new(Mutex::new(ordered_block_store)), + block_payload_store: Arc::new(Mutex::new(block_payload_store)), + pending_block_store: Arc::new(Mutex::new(pending_block_store)), execution_client, sync_notification_sender, sync_handle: None, @@ -137,7 +146,7 @@ impl ConsensusObserver { } // Otherwise, check if all the payloads exist in the payload store - self.block_payload_store.all_payloads_exist(blocks) + self.block_payload_store.lock().all_payloads_exist(blocks) } /// Checks the progress of the consensus observer @@ -156,13 +165,15 @@ impl ConsensusObserver { return; } - // Otherwise, check the health of the active subscription - let new_subscription_created = self + // Otherwise, check the health of the active subscriptions + if let Err(error) = self .subscription_manager .check_and_manage_subscriptions() - .await; - if new_subscription_created { - // Clear the pending block state (a new subscription was created) + .await + { + // Log the failure and clear the pending block state + warn!(LogSchema::new(LogEntry::ConsensusObserver) + .message(&format!("Subscription checks failed! Error: {:?}", error))); self.clear_pending_block_state().await; } } @@ -171,13 +182,13 @@ impl ConsensusObserver { /// subscriptions, where we want to wipe all state and restart). async fn clear_pending_block_state(&self) { // Clear the payload store - self.block_payload_store.clear_all_payloads(); + self.block_payload_store.lock().clear_all_payloads(); // Clear the pending blocks - self.pending_block_store.clear_missing_blocks(); + self.pending_block_store.lock().clear_missing_blocks(); // Clear the ordered blocks - self.ordered_block_store.clear_all_ordered_blocks(); + self.ordered_block_store.lock().clear_all_ordered_blocks(); // Reset the execution pipeline for the root let root = self.active_observer_state.root(); @@ -189,6 +200,9 @@ impl ConsensusObserver { )) ); } + + // Increment the cleared block state counter + metrics::increment_counter_without_labels(&metrics::OBSERVER_CLEARED_BLOCK_STATE); } /// Finalizes the ordered block by sending it to the execution pipeline @@ -256,10 +270,25 @@ impl ConsensusObserver { self.active_observer_state.epoch_state() } - /// Returns the last known block - fn get_last_block(&self) -> BlockInfo { - if let Some(last_pending_block) = self.ordered_block_store.get_last_ordered_block() { - last_pending_block + /// Returns the highest committed block epoch and round + fn get_highest_committed_epoch_round(&self) -> (u64, Round) { + if let Some(epoch_round) = self + .ordered_block_store + .lock() + .get_highest_committed_epoch_round() + { + epoch_round + } else { + // Return the root epoch and round + let root_block_info = self.active_observer_state.root().commit_info().clone(); + (root_block_info.epoch(), root_block_info.round()) + } + } + + /// Returns the last ordered block + fn get_last_ordered_block(&self) -> BlockInfo { + if let Some(last_ordered_block) = self.ordered_block_store.lock().get_last_ordered_block() { + last_ordered_block } else { // Return the root ledger info self.active_observer_state.root().commit_info().clone() @@ -278,34 +307,55 @@ impl ConsensusObserver { /// Orders any ready pending blocks for the given epoch and round async fn order_ready_pending_block(&mut self, block_epoch: u64, block_round: Round) { - if let Some(ordered_block) = self.pending_block_store.remove_ready_block( + // Get any ready ordered block + let ready_ordered_block = self.pending_block_store.lock().remove_ready_block( block_epoch, block_round, - &self.block_payload_store, - ) { - self.process_ordered_block(ordered_block).await; + self.block_payload_store.clone(), + ); + + // Process the ready ordered block (if it exists) + if let Some(ready_ordered_block) = ready_ordered_block { + self.process_ordered_block(ready_ordered_block).await; } } /// Processes the block payload message - async fn process_block_payload_message(&mut self, block_payload: BlockPayload) { + async fn process_block_payload_message( + &mut self, + peer_network_id: PeerNetworkId, + block_payload: BlockPayload, + ) { // Get the epoch and round for the block - let block_epoch = block_payload.block.epoch(); - let block_round = block_payload.block.round(); + let block_epoch = block_payload.epoch(); + let block_round = block_payload.round(); + + // Determine if the payload is behind the last ordered block, or if it already exists + let last_ordered_block = self.get_last_ordered_block(); + let payload_out_of_date = + (block_epoch, block_round) <= (last_ordered_block.epoch(), last_ordered_block.round()); + let payload_exists = self + .block_payload_store + .lock() + .existing_payload_entry(&block_payload); + + // If the payload is out of date or already exists, ignore it + if payload_out_of_date || payload_exists { + // Update the metrics for the dropped block payload + update_metrics_for_dropped_block_payload_message(peer_network_id, &block_payload); + return; + } // Update the metrics for the received block payload - metrics::set_gauge_with_label( - &metrics::OBSERVER_RECEIVED_MESSAGE_ROUNDS, - metrics::BLOCK_PAYLOAD_LABEL, - block_round, - ); + update_metrics_for_block_payload_message(peer_network_id, &block_payload); // Verify the block payload digests if let Err(error) = block_payload.verify_payload_digests() { error!( LogSchema::new(LogEntry::ConsensusObserver).message(&format!( "Failed to verify block payload digests! Ignoring block: {:?}. Error: {:?}", - block_payload.block, error + block_payload.block(), + error )) ); return; @@ -319,7 +369,7 @@ impl ConsensusObserver { error!( LogSchema::new(LogEntry::ConsensusObserver).message(&format!( "Failed to verify block payload signatures! Ignoring block: {:?}. Error: {:?}", - block_payload.block, error + block_payload.block(), error )) ); return; @@ -332,6 +382,7 @@ impl ConsensusObserver { // Update the payload store with the payload self.block_payload_store + .lock() .insert_block_payload(block_payload, verified_payload); // Check if there are blocks that were missing payloads but are @@ -344,18 +395,28 @@ impl ConsensusObserver { } /// Processes the commit decision message - fn process_commit_decision_message(&mut self, commit_decision: CommitDecision) { + fn process_commit_decision_message( + &mut self, + peer_network_id: PeerNetworkId, + commit_decision: CommitDecision, + ) { + // Get the commit decision epoch and round + let commit_epoch = commit_decision.epoch(); + let commit_round = commit_decision.round(); + + // If the commit message is behind our highest committed block, ignore it + if (commit_epoch, commit_round) <= self.get_highest_committed_epoch_round() { + // Update the metrics for the dropped commit decision + update_metrics_for_dropped_commit_decision_message(peer_network_id, &commit_decision); + return; + } + // Update the metrics for the received commit decision - metrics::set_gauge_with_label( - &metrics::OBSERVER_RECEIVED_MESSAGE_ROUNDS, - metrics::COMMIT_DECISION_LABEL, - commit_decision.round(), - ); + update_metrics_for_commit_decision_message(peer_network_id, &commit_decision); // If the commit decision is for the current epoch, verify and process it let epoch_state = self.get_epoch_state(); - let commit_decision_epoch = commit_decision.epoch(); - if commit_decision_epoch == epoch_state.epoch { + if commit_epoch == epoch_state.epoch { // Verify the commit decision if let Err(error) = commit_decision.verify_commit_proof(&epoch_state) { error!( @@ -379,10 +440,9 @@ impl ConsensusObserver { // Otherwise, we failed to process the commit decision. If the commit // is for a future epoch or round, we need to state sync. - let last_block = self.get_last_block(); - let commit_decision_round = commit_decision.round(); - let epoch_changed = commit_decision_epoch > last_block.epoch(); - if epoch_changed || commit_decision_round > last_block.round() { + let last_block = self.get_last_ordered_block(); + let epoch_changed = commit_epoch > last_block.epoch(); + if epoch_changed || commit_round > last_block.round() { // If we're waiting for state sync to transition into a new epoch, // we should just wait and not issue a new state sync request. if self.in_state_sync_epoch_change() { @@ -408,15 +468,17 @@ impl ConsensusObserver { self.active_observer_state .update_root(commit_decision.commit_proof().clone()); self.block_payload_store - .remove_blocks_for_epoch_round(commit_decision_epoch, commit_decision_round); + .lock() + .remove_blocks_for_epoch_round(commit_epoch, commit_round); self.ordered_block_store + .lock() .remove_blocks_for_commit(commit_decision.commit_proof()); // Start the state sync process let abort_handle = sync_to_commit_decision( commit_decision, - commit_decision_epoch, - commit_decision_round, + commit_epoch, + commit_round, self.execution_client.clone(), self.sync_notification_sender.clone(), ); @@ -431,6 +493,7 @@ impl ConsensusObserver { // Get the pending block for the commit decision let pending_block = self .ordered_block_store + .lock() .get_ordered_block(commit_decision.epoch(), commit_decision.round()); // Process the pending block @@ -444,6 +507,7 @@ impl ConsensusObserver { )) ); self.ordered_block_store + .lock() .update_commit_decision(commit_decision); // If we are not in sync mode, forward the commit decision to the execution pipeline @@ -469,23 +533,30 @@ impl ConsensusObserver { // Unpack the network message let (peer_network_id, message) = network_message.into_parts(); - // Verify the message is from the peer we've subscribed to + // Verify the message is from the peers we've subscribed to if let Err(error) = self .subscription_manager - .verify_message_sender(peer_network_id) + .verify_message_for_subscription(peer_network_id) { + // Increment the rejected message counter + metrics::increment_counter( + &metrics::OBSERVER_REJECTED_MESSAGES, + message.get_label(), + &peer_network_id, + ); + + // Log the error and return warn!( LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Message failed subscription sender verification! Error: {:?}", + "Received message that was not from an active subscription! Error: {:?}", error, )) ); - return; } // Increment the received message counter - metrics::increment_request_counter( + metrics::increment_counter( &metrics::OBSERVER_RECEIVED_MESSAGES, message.get_label(), &peer_network_id, @@ -494,39 +565,15 @@ impl ConsensusObserver { // Process the message based on the type match message { ConsensusObserverDirectSend::OrderedBlock(ordered_block) => { - // Log the received ordered block message - let log_message = format!( - "Received ordered block: {}, from peer: {}!", - ordered_block.proof_block_info(), - peer_network_id - ); - log_received_message(log_message); - - // Process the ordered block message - self.process_ordered_block_message(ordered_block).await; + self.process_ordered_block_message(peer_network_id, ordered_block) + .await; }, ConsensusObserverDirectSend::CommitDecision(commit_decision) => { - // Log the received commit decision message - let log_message = format!( - "Received commit decision: {}, from peer: {}!", - commit_decision.proof_block_info(), - peer_network_id - ); - log_received_message(log_message); - - // Process the commit decision message - self.process_commit_decision_message(commit_decision); + self.process_commit_decision_message(peer_network_id, commit_decision); }, ConsensusObserverDirectSend::BlockPayload(block_payload) => { - // Log the received block payload message - let log_message = format!( - "Received block payload: {}, from peer: {}!", - block_payload.block, peer_network_id - ); - log_received_message(log_message); - - // Process the block payload message - self.process_block_payload_message(block_payload).await; + self.process_block_payload_message(peer_network_id, block_payload) + .await; }, } @@ -535,7 +582,11 @@ impl ConsensusObserver { } /// Processes the ordered block - async fn process_ordered_block_message(&mut self, ordered_block: OrderedBlock) { + async fn process_ordered_block_message( + &mut self, + peer_network_id: PeerNetworkId, + ordered_block: OrderedBlock, + ) { // Verify the ordered blocks before processing if let Err(error) = ordered_block.verify_ordered_blocks() { error!( @@ -548,12 +599,37 @@ impl ConsensusObserver { return; }; + // Get the epoch and round of the first block + let first_block = ordered_block.first_block(); + let first_block_epoch_round = (first_block.epoch(), first_block.round()); + + // Determine if the block is behind the last ordered block, or if it is already pending + let last_ordered_block = self.get_last_ordered_block(); + let block_out_of_date = + first_block_epoch_round <= (last_ordered_block.epoch(), last_ordered_block.round()); + let block_pending = self + .pending_block_store + .lock() + .existing_pending_block(&ordered_block); + + // If the block is out of date or already pending, ignore it + if block_out_of_date || block_pending { + // Update the metrics for the dropped ordered block + update_metrics_for_dropped_ordered_block_message(peer_network_id, &ordered_block); + return; + } + + // Update the metrics for the received ordered block + update_metrics_for_ordered_block_message(peer_network_id, &ordered_block); + // If all payloads exist, process the block. Otherwise, store it // in the pending block store and wait for the payloads to arrive. if self.all_payloads_exist(ordered_block.blocks()) { self.process_ordered_block(ordered_block).await; } else { - self.pending_block_store.insert_pending_block(ordered_block); + self.pending_block_store + .lock() + .insert_pending_block(ordered_block); } } @@ -587,6 +663,7 @@ impl ConsensusObserver { // Verify the block payloads against the ordered block if let Err(error) = self .block_payload_store + .lock() .verify_payloads_against_ordered_block(&ordered_block) { error!( @@ -601,9 +678,10 @@ impl ConsensusObserver { // The block was verified correctly. If the block is a child of our // last block, we can insert it into the ordered block store. - if self.get_last_block().id() == ordered_block.first_block().parent_id() { + if self.get_last_ordered_block().id() == ordered_block.first_block().parent_id() { // Insert the ordered block into the pending blocks self.ordered_block_store + .lock() .insert_ordered_block(ordered_block.clone()); // If we're not in sync mode, finalize the ordered blocks @@ -655,6 +733,7 @@ impl ConsensusObserver { let new_epoch_state = self.get_epoch_state(); let verified_payload_rounds = self .block_payload_store + .lock() .verify_payload_signatures(&new_epoch_state); // Order all the pending blocks that are now ready (these were buffered during state sync) @@ -668,9 +747,8 @@ impl ConsensusObserver { self.sync_handle = None; // Process all the newly ordered blocks - for (_, (ordered_block, commit_decision)) in - self.ordered_block_store.get_all_ordered_blocks() - { + let all_ordered_blocks = self.ordered_block_store.lock().get_all_ordered_blocks(); + for (_, (ordered_block, commit_decision)) in all_ordered_blocks { // Finalize the ordered block self.finalize_ordered_block(ordered_block).await; @@ -684,19 +762,25 @@ impl ConsensusObserver { /// Updates the metrics for the processed blocks fn update_processed_blocks_metrics(&self) { // Update the payload store metrics - self.block_payload_store.update_payload_store_metrics(); + self.block_payload_store + .lock() + .update_payload_store_metrics(); // Update the pending block metrics - self.pending_block_store.update_pending_blocks_metrics(); + self.pending_block_store + .lock() + .update_pending_blocks_metrics(); // Update the pending block metrics - self.ordered_block_store.update_ordered_blocks_metrics(); + self.ordered_block_store + .lock() + .update_ordered_blocks_metrics(); } /// Waits for a new epoch to start async fn wait_for_epoch_start(&mut self) { // Wait for the active state epoch to update - let block_payloads = self.block_payload_store.get_block_payloads(); + let block_payloads = self.block_payload_store.lock().get_block_payloads(); let (payload_manager, consensus_config, execution_config, randomness_config) = self .active_observer_state .wait_for_epoch_start(block_payloads) @@ -822,3 +906,135 @@ fn sync_to_commit_decision( )); abort_handle } + +/// Updates the metrics for the received block payload message +fn update_metrics_for_block_payload_message( + peer_network_id: PeerNetworkId, + block_payload: &BlockPayload, +) { + // Log the received block payload message + let log_message = format!( + "Received block payload: {}, from peer: {}!", + block_payload.block(), + peer_network_id + ); + log_received_message(log_message); + + // Update the metrics for the received block payload + metrics::set_gauge_with_label( + &metrics::OBSERVER_RECEIVED_MESSAGE_ROUNDS, + metrics::BLOCK_PAYLOAD_LABEL, + block_payload.round(), + ); +} + +/// Updates the metrics for the received commit decision message +fn update_metrics_for_commit_decision_message( + peer_network_id: PeerNetworkId, + commit_decision: &CommitDecision, +) { + // Log the received commit decision message + let log_message = format!( + "Received commit decision: {}, from peer: {}!", + commit_decision.proof_block_info(), + peer_network_id + ); + log_received_message(log_message); + + // Update the metrics for the received commit decision + metrics::set_gauge_with_label( + &metrics::OBSERVER_RECEIVED_MESSAGE_ROUNDS, + metrics::COMMIT_DECISION_LABEL, + commit_decision.round(), + ); +} + +/// Updates the metrics for the dropped block payload message +fn update_metrics_for_dropped_block_payload_message( + peer_network_id: PeerNetworkId, + block_payload: &BlockPayload, +) { + // Increment the dropped message counter + metrics::increment_counter( + &metrics::OBSERVER_DROPPED_MESSAGES, + metrics::BLOCK_PAYLOAD_LABEL, + &peer_network_id, + ); + + // Log the dropped block payload message + debug!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Ignoring block payload message from peer: {:?}! Block epoch and round: ({}, {})", + peer_network_id, + block_payload.epoch(), + block_payload.round() + )) + ); +} + +/// Updates the metrics for the dropped commit decision message +fn update_metrics_for_dropped_commit_decision_message( + peer_network_id: PeerNetworkId, + commit_decision: &CommitDecision, +) { + // Increment the dropped message counter + metrics::increment_counter( + &metrics::OBSERVER_DROPPED_MESSAGES, + metrics::COMMITTED_BLOCKS_LABEL, + &peer_network_id, + ); + + // Log the dropped commit decision message + debug!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Ignoring commit decision message from peer: {:?}! Commit epoch and round: ({}, {})", + peer_network_id, + commit_decision.epoch(), + commit_decision.round() + )) + ); +} + +/// Updates the metrics for the dropped ordered block message +fn update_metrics_for_dropped_ordered_block_message( + peer_network_id: PeerNetworkId, + ordered_block: &OrderedBlock, +) { + // Increment the dropped message counter + metrics::increment_counter( + &metrics::OBSERVER_DROPPED_MESSAGES, + metrics::ORDERED_BLOCK_LABEL, + &peer_network_id, + ); + + // Log the dropped ordered block message + debug!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Ignoring ordered block message from peer: {:?}! Block epoch and round: ({}, {})", + peer_network_id, + ordered_block.proof_block_info().epoch(), + ordered_block.proof_block_info().round() + )) + ); +} + +/// Updates the metrics for the received ordered block message +fn update_metrics_for_ordered_block_message( + peer_network_id: PeerNetworkId, + ordered_block: &OrderedBlock, +) { + // Log the received ordered block message + let log_message = format!( + "Received ordered block: {}, from peer: {}!", + ordered_block.proof_block_info(), + peer_network_id + ); + log_received_message(log_message); + + // Update the metrics for the received ordered block + metrics::set_gauge_with_label( + &metrics::OBSERVER_RECEIVED_MESSAGE_ROUNDS, + metrics::ORDERED_BLOCK_LABEL, + ordered_block.proof_block_info().round(), + ); +} diff --git a/consensus/src/consensus_observer/observer/mod.rs b/consensus/src/consensus_observer/observer/mod.rs index 35dd0ea2ec7..4a4e5d42881 100644 --- a/consensus/src/consensus_observer/observer/mod.rs +++ b/consensus/src/consensus_observer/observer/mod.rs @@ -8,3 +8,4 @@ pub mod payload_store; pub mod pending_blocks; pub mod subscription; pub mod subscription_manager; +pub mod subscription_utils; diff --git a/consensus/src/consensus_observer/observer/ordered_blocks.rs b/consensus/src/consensus_observer/observer/ordered_blocks.rs index edfde50a4ed..a2408b3a4b2 100644 --- a/consensus/src/consensus_observer/observer/ordered_blocks.rs +++ b/consensus/src/consensus_observer/observer/ordered_blocks.rs @@ -10,46 +10,52 @@ use crate::consensus_observer::{ }; use aptos_config::config::ConsensusObserverConfig; use aptos_consensus_types::common::Round; -use aptos_infallible::Mutex; use aptos_logger::{debug, warn}; use aptos_types::{block_info::BlockInfo, ledger_info::LedgerInfoWithSignatures}; -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; /// A simple struct to store ordered blocks -#[derive(Clone)] pub struct OrderedBlockStore { // The configuration of the consensus observer consensus_observer_config: ConsensusObserverConfig, + // The highest committed block (epoch and round) + highest_committed_epoch_round: Option<(u64, Round)>, + // Ordered blocks. The key is the epoch and round of the last block in the // ordered block. Each entry contains the block and the commit decision (if any). - ordered_blocks: Arc)>>>, + ordered_blocks: BTreeMap<(u64, Round), (OrderedBlock, Option)>, } impl OrderedBlockStore { pub fn new(consensus_observer_config: ConsensusObserverConfig) -> Self { Self { consensus_observer_config, - ordered_blocks: Arc::new(Mutex::new(BTreeMap::new())), + highest_committed_epoch_round: None, + ordered_blocks: BTreeMap::new(), } } /// Clears all ordered blocks - pub fn clear_all_ordered_blocks(&self) { - self.ordered_blocks.lock().clear(); + pub fn clear_all_ordered_blocks(&mut self) { + self.ordered_blocks.clear(); } /// Returns a copy of the ordered blocks pub fn get_all_ordered_blocks( &self, ) -> BTreeMap<(u64, Round), (OrderedBlock, Option)> { - self.ordered_blocks.lock().clone() + self.ordered_blocks.clone() + } + + /// Returns the highest committed epoch and round (if any) + pub fn get_highest_committed_epoch_round(&self) -> Option<(u64, Round)> { + self.highest_committed_epoch_round } /// Returns the last ordered block (if any) pub fn get_last_ordered_block(&self) -> Option { self.ordered_blocks - .lock() .last_key_value() .map(|(_, (ordered_block, _))| ordered_block.last_block().block_info()) } @@ -57,7 +63,6 @@ impl OrderedBlockStore { /// Returns the ordered block for the given epoch and round (if any) pub fn get_ordered_block(&self, epoch: u64, round: Round) -> Option { self.ordered_blocks - .lock() .get(&(epoch, round)) .map(|(ordered_block, _)| ordered_block.clone()) } @@ -65,10 +70,10 @@ impl OrderedBlockStore { /// Inserts the given ordered block into the ordered blocks. This function /// assumes the block has already been checked to extend the current ordered /// blocks, and that the ordered proof has been verified. - pub fn insert_ordered_block(&self, ordered_block: OrderedBlock) { + pub fn insert_ordered_block(&mut self, ordered_block: OrderedBlock) { // Verify that the number of ordered blocks doesn't exceed the maximum let max_num_ordered_blocks = self.consensus_observer_config.max_num_pending_blocks as usize; - if self.ordered_blocks.lock().len() >= max_num_ordered_blocks { + if self.ordered_blocks.len() >= max_num_ordered_blocks { warn!( LogSchema::new(LogEntry::ConsensusObserver).message(&format!( "Exceeded the maximum number of ordered blocks: {:?}. Dropping block: {:?}.", @@ -94,42 +99,70 @@ impl OrderedBlockStore { // Insert the ordered block self.ordered_blocks - .lock() .insert((last_block_epoch, last_block_round), (ordered_block, None)); } /// Removes the ordered blocks for the given commit ledger info. This will /// remove all blocks up to (and including) the epoch and round of the commit. - pub fn remove_blocks_for_commit(&self, commit_ledger_info: &LedgerInfoWithSignatures) { + pub fn remove_blocks_for_commit(&mut self, commit_ledger_info: &LedgerInfoWithSignatures) { // Determine the epoch and round to split off let split_off_epoch = commit_ledger_info.ledger_info().epoch(); let split_off_round = commit_ledger_info.commit_info().round().saturating_add(1); // Remove the blocks from the ordered blocks - let mut ordered_blocks = self.ordered_blocks.lock(); - *ordered_blocks = ordered_blocks.split_off(&(split_off_epoch, split_off_round)); + self.ordered_blocks = self + .ordered_blocks + .split_off(&(split_off_epoch, split_off_round)); + + // Update the highest committed epoch and round + self.update_highest_committed_epoch_round(commit_ledger_info); } /// Updates the commit decision of the ordered block (if found) - pub fn update_commit_decision(&self, commit_decision: &CommitDecision) { + pub fn update_commit_decision(&mut self, commit_decision: &CommitDecision) { // Get the epoch and round of the commit decision let commit_decision_epoch = commit_decision.epoch(); let commit_decision_round = commit_decision.round(); // Update the commit decision for the ordered blocks - let mut ordered_blocks = self.ordered_blocks.lock(); - if let Some((_, existing_commit_decision)) = - ordered_blocks.get_mut(&(commit_decision_epoch, commit_decision_round)) + if let Some((_, existing_commit_decision)) = self + .ordered_blocks + .get_mut(&(commit_decision_epoch, commit_decision_round)) { *existing_commit_decision = Some(commit_decision.clone()); } + + // Update the highest committed epoch and round + self.update_highest_committed_epoch_round(commit_decision.commit_proof()); + } + + /// Updates the highest committed epoch and round based on the commit ledger info + fn update_highest_committed_epoch_round( + &mut self, + commit_ledger_info: &LedgerInfoWithSignatures, + ) { + // Get the epoch and round of the commit ledger info + let commit_epoch = commit_ledger_info.ledger_info().epoch(); + let commit_round = commit_ledger_info.commit_info().round(); + let commit_epoch_round = (commit_epoch, commit_round); + + // Update the highest committed epoch and round (if appropriate) + match self.highest_committed_epoch_round { + Some(highest_committed_epoch_round) => { + if commit_epoch_round > highest_committed_epoch_round { + self.highest_committed_epoch_round = Some(commit_epoch_round); + } + }, + None => { + self.highest_committed_epoch_round = Some(commit_epoch_round); + }, + } } /// Updates the metrics for the ordered blocks pub fn update_ordered_blocks_metrics(&self) { // Update the number of ordered block entries - let ordered_blocks = self.ordered_blocks.lock(); - let num_entries = ordered_blocks.len() as u64; + let num_entries = self.ordered_blocks.len() as u64; metrics::set_gauge_with_label( &metrics::OBSERVER_NUM_PROCESSED_BLOCKS, metrics::ORDERED_BLOCK_ENTRIES_LABEL, @@ -137,26 +170,39 @@ impl OrderedBlockStore { ); // Update the total number of ordered blocks - let num_ordered_blocks = ordered_blocks + let num_ordered_blocks = self + .ordered_blocks .values() .map(|(ordered_block, _)| ordered_block.blocks().len() as u64) .sum(); metrics::set_gauge_with_label( &metrics::OBSERVER_NUM_PROCESSED_BLOCKS, - metrics::ORDERED_BLOCKS_LABEL, + metrics::ORDERED_BLOCK_LABEL, num_ordered_blocks, ); // Update the highest round for the ordered blocks - let highest_ordered_round = ordered_blocks + let highest_ordered_round = self + .ordered_blocks .last_key_value() .map(|(_, (ordered_block, _))| ordered_block.last_block().round()) .unwrap_or(0); metrics::set_gauge_with_label( &metrics::OBSERVER_PROCESSED_BLOCK_ROUNDS, - metrics::ORDERED_BLOCKS_LABEL, + metrics::ORDERED_BLOCK_LABEL, highest_ordered_round, ); + + // Update the highest round for the committed blocks + let highest_committed_round = self + .highest_committed_epoch_round + .map(|(_, round)| round) + .unwrap_or(0); + metrics::set_gauge_with_label( + &metrics::OBSERVER_PROCESSED_BLOCK_ROUNDS, + metrics::COMMITTED_BLOCKS_LABEL, + highest_committed_round, + ); } } @@ -173,28 +219,128 @@ mod test { use aptos_types::{ aggregate_signature::AggregateSignature, ledger_info::LedgerInfo, transaction::Version, }; + use std::sync::Arc; #[test] fn test_clear_all_ordered_blocks() { // Create a new ordered block store - let ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); + let mut ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); // Insert several ordered blocks for the current epoch let current_epoch = 0; let num_ordered_blocks = 10; - create_and_add_ordered_blocks(&ordered_block_store, num_ordered_blocks, current_epoch); + create_and_add_ordered_blocks(&mut ordered_block_store, num_ordered_blocks, current_epoch); // Clear all ordered blocks ordered_block_store.clear_all_ordered_blocks(); // Check that all the ordered blocks were removed - assert!(ordered_block_store.ordered_blocks.lock().is_empty()); + assert!(ordered_block_store.ordered_blocks.is_empty()); + } + + #[test] + fn test_get_highest_committed_epoch_round() { + // Create a new ordered block store + let mut ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); + + // Verify that we have no highest committed epoch and round + assert!(ordered_block_store + .get_highest_committed_epoch_round() + .is_none()); + + // Insert several ordered blocks for the current epoch + let current_epoch = 10; + let num_ordered_blocks = 50; + let ordered_blocks = create_and_add_ordered_blocks( + &mut ordered_block_store, + num_ordered_blocks, + current_epoch, + ); + + // Create a commit decision for the first ordered block + let first_ordered_block = ordered_blocks.first().unwrap(); + let first_ordered_block_info = first_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(first_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Update the commit decision for the first ordered block + ordered_block_store.update_commit_decision(&commit_decision); + + // Verify the highest committed epoch and round is the first ordered block + verify_highest_committed_epoch_round(&ordered_block_store, &first_ordered_block_info); + + // Create a commit decision for the last ordered block + let last_ordered_block = ordered_blocks.last().unwrap(); + let last_ordered_block_info = last_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(last_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Update the commit decision for the last ordered block + ordered_block_store.update_commit_decision(&commit_decision); + + // Verify the highest committed epoch and round is the last ordered block + verify_highest_committed_epoch_round(&ordered_block_store, &last_ordered_block_info); + + // Insert several ordered blocks for the next epoch + let next_epoch = current_epoch + 1; + let num_ordered_blocks = 10; + let ordered_blocks = + create_and_add_ordered_blocks(&mut ordered_block_store, num_ordered_blocks, next_epoch); + + // Verify the highest committed epoch and round is still the last ordered block + verify_highest_committed_epoch_round(&ordered_block_store, &last_ordered_block_info); + + // Create a commit decision for the first ordered block (in the next epoch) + let first_ordered_block = ordered_blocks.first().unwrap(); + let first_ordered_block_info = first_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(first_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Update the commit decision for the first ordered block + ordered_block_store.update_commit_decision(&commit_decision); + + // Verify the highest committed epoch and round is the first ordered block (in the next epoch) + verify_highest_committed_epoch_round(&ordered_block_store, &first_ordered_block_info); + + // Create a commit decision for the last ordered block (in the next epoch) + let last_ordered_block = ordered_blocks.last().unwrap(); + let last_ordered_block_info = last_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(last_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Remove the ordered blocks for the commit decision + ordered_block_store.remove_blocks_for_commit(commit_decision.commit_proof()); + + // Verify the highest committed epoch and round is the last ordered block (in the next epoch) + verify_highest_committed_epoch_round(&ordered_block_store, &last_ordered_block_info); + + // Create a commit decision for an out-of-date ordered block + let out_of_date_ordered_block = ordered_blocks.first().unwrap(); + let out_of_date_ordered_block_info = out_of_date_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(out_of_date_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Update the commit decision for the out-of-date ordered block + ordered_block_store.update_commit_decision(&commit_decision); + + // Verify the highest committed epoch and round is still the last ordered block (in the next epoch) + verify_highest_committed_epoch_round(&ordered_block_store, &last_ordered_block_info); } #[test] fn test_get_last_ordered_block() { // Create a new ordered block store - let ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); + let mut ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); // Verify that we have no last ordered block assert!(ordered_block_store.get_last_ordered_block().is_none()); @@ -202,8 +348,11 @@ mod test { // Insert several ordered blocks for the current epoch let current_epoch = 0; let num_ordered_blocks = 50; - let ordered_blocks = - create_and_add_ordered_blocks(&ordered_block_store, num_ordered_blocks, current_epoch); + let ordered_blocks = create_and_add_ordered_blocks( + &mut ordered_block_store, + num_ordered_blocks, + current_epoch, + ); // Verify the last ordered block is the block with the highest round let last_ordered_block = ordered_blocks.last().unwrap(); @@ -217,7 +366,7 @@ mod test { let next_epoch = current_epoch + 1; let num_ordered_blocks = 50; let ordered_blocks = - create_and_add_ordered_blocks(&ordered_block_store, num_ordered_blocks, next_epoch); + create_and_add_ordered_blocks(&mut ordered_block_store, num_ordered_blocks, next_epoch); // Verify the last ordered block is the block with the highest epoch and round let last_ordered_block = ordered_blocks.last().unwrap(); @@ -231,13 +380,16 @@ mod test { #[test] fn test_get_ordered_block() { // Create a new ordered block store - let ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); + let mut ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); // Insert several ordered blocks for the current epoch let current_epoch = 0; let num_ordered_blocks = 50; - let ordered_blocks = - create_and_add_ordered_blocks(&ordered_block_store, num_ordered_blocks, current_epoch); + let ordered_blocks = create_and_add_ordered_blocks( + &mut ordered_block_store, + num_ordered_blocks, + current_epoch, + ); // Ensure the ordered blocks were all inserted let all_ordered_blocks = ordered_block_store.get_all_ordered_blocks(); @@ -272,12 +424,12 @@ mod test { }; // Create a new ordered block store - let ordered_block_store = OrderedBlockStore::new(consensus_observer_config); + let mut ordered_block_store = OrderedBlockStore::new(consensus_observer_config); // Insert several ordered blocks for the current epoch let current_epoch = 0; let num_ordered_blocks = max_num_pending_blocks * 2; // Insert more than the maximum - create_and_add_ordered_blocks(&ordered_block_store, num_ordered_blocks, current_epoch); + create_and_add_ordered_blocks(&mut ordered_block_store, num_ordered_blocks, current_epoch); // Verify the ordered blocks were inserted up to the maximum let all_ordered_blocks = ordered_block_store.get_all_ordered_blocks(); @@ -287,7 +439,7 @@ mod test { let next_epoch = current_epoch + 1; let num_ordered_blocks = max_num_pending_blocks - 1; // Insert one less than the maximum let ordered_blocks = - create_and_add_ordered_blocks(&ordered_block_store, num_ordered_blocks, next_epoch); + create_and_add_ordered_blocks(&mut ordered_block_store, num_ordered_blocks, next_epoch); // Verify the ordered blocks were not inserted (they should have just been dropped) for ordered_block in &ordered_blocks { @@ -305,19 +457,22 @@ mod test { #[test] fn test_remove_blocks_for_commit() { // Create a new ordered block store - let ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); + let mut ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); // Insert several ordered blocks for the current epoch let current_epoch = 10; let num_ordered_blocks = 10; - let ordered_blocks = - create_and_add_ordered_blocks(&ordered_block_store, num_ordered_blocks, current_epoch); + let ordered_blocks = create_and_add_ordered_blocks( + &mut ordered_block_store, + num_ordered_blocks, + current_epoch, + ); // Insert several ordered blocks for the next epoch let next_epoch = current_epoch + 1; let num_ordered_blocks_next_epoch = 20; let ordered_blocks_next_epoch = create_and_add_ordered_blocks( - &ordered_block_store, + &mut ordered_block_store, num_ordered_blocks_next_epoch, next_epoch, ); @@ -326,7 +481,7 @@ mod test { let future_epoch = next_epoch + 1; let num_ordered_blocks_future_epoch = 30; create_and_add_ordered_blocks( - &ordered_block_store, + &mut ordered_block_store, num_ordered_blocks_future_epoch, future_epoch, ); @@ -399,19 +554,22 @@ mod test { #[test] fn test_update_commit_decision() { // Create a new ordered block store - let ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); + let mut ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); // Insert several ordered blocks for the current epoch let current_epoch = 0; let num_ordered_blocks = 10; - let ordered_blocks = - create_and_add_ordered_blocks(&ordered_block_store, num_ordered_blocks, current_epoch); + let ordered_blocks = create_and_add_ordered_blocks( + &mut ordered_block_store, + num_ordered_blocks, + current_epoch, + ); // Insert several ordered blocks for the next epoch let next_epoch = current_epoch + 1; let num_ordered_blocks_next_epoch = 20; let ordered_blocks_next_epoch = create_and_add_ordered_blocks( - &ordered_block_store, + &mut ordered_block_store, num_ordered_blocks_next_epoch, next_epoch, ); @@ -499,7 +657,7 @@ mod test { /// Creates and adds the specified number of ordered blocks to the ordered blocks fn create_and_add_ordered_blocks( - ordered_block_store: &OrderedBlockStore, + ordered_block_store: &mut OrderedBlockStore, num_ordered_blocks: usize, epoch: u64, ) -> Vec { @@ -571,4 +729,19 @@ mod test { updated_commit_decision.as_ref().unwrap().clone() ); } + + /// Verifies the highest committed epoch and round matches the given block info + fn verify_highest_committed_epoch_round( + ordered_block_store: &OrderedBlockStore, + block_info: &BlockInfo, + ) { + // Verify the highest committed epoch and round is the block info + let highest_committed_epoch_round = ordered_block_store + .get_highest_committed_epoch_round() + .unwrap(); + assert_eq!( + highest_committed_epoch_round, + (block_info.epoch(), block_info.round()) + ); + } } diff --git a/consensus/src/consensus_observer/observer/payload_store.rs b/consensus/src/consensus_observer/observer/payload_store.rs index bae1225c581..59859ec0b82 100644 --- a/consensus/src/consensus_observer/observer/payload_store.rs +++ b/consensus/src/consensus_observer/observer/payload_store.rs @@ -26,12 +26,12 @@ pub enum BlockPayloadStatus { } /// A simple struct to store the block payloads of ordered and committed blocks -#[derive(Clone)] pub struct BlockPayloadStore { // The configuration of the consensus observer consensus_observer_config: ConsensusObserverConfig, - // Block transaction payloads (indexed by epoch and round) + // Block transaction payloads (indexed by epoch and round). + // This is directly accessed by the payload manager. block_payloads: Arc>>, } @@ -61,6 +61,15 @@ impl BlockPayloadStore { self.block_payloads.lock().clear(); } + /// Returns true iff we already have a payload entry for the given block + pub fn existing_payload_entry(&self, block_payload: &BlockPayload) -> bool { + // Get the epoch and round of the payload + let epoch_and_round = (block_payload.epoch(), block_payload.round()); + + // Check if a payload already exists in the store + self.block_payloads.lock().contains_key(&epoch_and_round) + } + /// Returns a reference to the block payloads pub fn get_block_payloads(&self) -> Arc>> { self.block_payloads.clone() @@ -78,14 +87,15 @@ impl BlockPayloadStore { warn!( LogSchema::new(LogEntry::ConsensusObserver).message(&format!( "Exceeded the maximum number of payloads: {:?}. Dropping block: {:?}!", - max_num_pending_blocks, block_payload.block, + max_num_pending_blocks, + block_payload.block(), )) ); return; // Drop the block if we've exceeded the maximum } // Create the new payload status - let epoch_and_round = (block_payload.block.epoch(), block_payload.block.round()); + let epoch_and_round = (block_payload.epoch(), block_payload.round()); let payload_status = if verified_payload_signatures { BlockPayloadStatus::AvailableAndVerified(block_payload) } else { @@ -161,7 +171,7 @@ impl BlockPayloadStore { // Get the block transaction payload let transaction_payload = match entry.get() { BlockPayloadStatus::AvailableAndVerified(block_payload) => { - &block_payload.transaction_payload + block_payload.transaction_payload() }, BlockPayloadStatus::AvailableAndUnverified(_) => { // The payload should have already been verified @@ -251,7 +261,7 @@ impl BlockPayloadStore { // Collect the rounds of all newly verified blocks let verified_payload_rounds: Vec = verified_payloads_to_update .iter() - .map(|block_payload| block_payload.block.round()) + .map(|block_payload| block_payload.round()) .collect(); // Update the verified block payloads. Note: this will cause @@ -299,16 +309,12 @@ mod test { }; // Create a new block payload store - let block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); // Add some unverified blocks to the payload store let num_blocks_in_store = 100; - let unverified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), - num_blocks_in_store, - 1, - false, - ); + let unverified_blocks = + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 1, false); // Verify the payloads don't exist in the block payload store assert!(!block_payload_store.all_payloads_exist(&unverified_blocks)); @@ -320,12 +326,8 @@ mod test { // Add some verified blocks to the payload store let num_blocks_in_store = 100; - let verified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), - num_blocks_in_store, - 0, - true, - ); + let verified_blocks = + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 0, true); // Check that all the payloads exist in the block payload store assert!(block_payload_store.all_payloads_exist(&verified_blocks)); @@ -355,22 +357,18 @@ mod test { fn test_all_payloads_exist_unverified() { // Create a new block payload store let consensus_observer_config = ConsensusObserverConfig::default(); - let block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); // Add several verified blocks to the payload store let num_blocks_in_store = 10; - let verified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), - num_blocks_in_store, - 0, - true, - ); + let verified_blocks = + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 0, true); // Check that the payloads exists in the block payload store assert!(block_payload_store.all_payloads_exist(&verified_blocks)); // Mark the payload of the first block as unverified - mark_payload_as_unverified(block_payload_store.clone(), &verified_blocks[0]); + mark_payload_as_unverified(&block_payload_store, &verified_blocks[0]); // Check that the payload no longer exists in the block payload store assert!(!block_payload_store.all_payloads_exist(&verified_blocks)); @@ -383,19 +381,15 @@ mod test { fn test_clear_all_payloads() { // Create a new block payload store let consensus_observer_config = ConsensusObserverConfig::default(); - let block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); // Add some unverified blocks to the payload store let num_blocks_in_store = 30; - create_and_add_blocks_to_store(block_payload_store.clone(), num_blocks_in_store, 1, false); + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 1, false); // Add some verified blocks to the payload store - let verified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), - num_blocks_in_store, - 0, - true, - ); + let verified_blocks = + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 0, true); // Check that the payloads exist in the block payload store assert!(block_payload_store.all_payloads_exist(&verified_blocks)); @@ -415,6 +409,41 @@ mod test { check_num_verified_payloads(&block_payload_store, 0); } + #[test] + fn test_existing_payload_entry() { + // Create a new block payload store + let consensus_observer_config = ConsensusObserverConfig::default(); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Create a new block payload + let epoch = 10; + let round = 100; + let block_payload = create_block_payload(epoch, round); + + // Check that the payload doesn't exist in the block payload store + assert!(!block_payload_store.existing_payload_entry(&block_payload)); + + // Insert the verified block payload into the block payload store + block_payload_store.insert_block_payload(block_payload.clone(), true); + + // Check that the payload now exists in the block payload store + assert!(block_payload_store.existing_payload_entry(&block_payload)); + + // Create another block payload + let epoch = 5; + let round = 101; + let block_payload = create_block_payload(epoch, round); + + // Check that the payload doesn't exist in the block payload store + assert!(!block_payload_store.existing_payload_entry(&block_payload)); + + // Insert the unverified block payload into the block payload store + block_payload_store.insert_block_payload(block_payload.clone(), false); + + // Check that the payload now exists in the block payload store + assert!(block_payload_store.existing_payload_entry(&block_payload)); + } + #[test] fn test_insert_block_payload() { // Create a new block payload store @@ -423,12 +452,8 @@ mod test { // Add some verified blocks to the payload store let num_blocks_in_store = 20; - let verified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), - num_blocks_in_store, - 0, - true, - ); + let verified_blocks = + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 0, true); // Check that the block payload store contains the new block payloads assert!(block_payload_store.all_payloads_exist(&verified_blocks)); @@ -438,7 +463,7 @@ mod test { check_num_verified_payloads(&block_payload_store, num_blocks_in_store); // Mark the payload of the first block as unverified - mark_payload_as_unverified(block_payload_store.clone(), &verified_blocks[0]); + mark_payload_as_unverified(&block_payload_store, &verified_blocks[0]); // Check that the payload no longer exists in the block payload store assert!(!block_payload_store.all_payloads_exist(&verified_blocks)); @@ -465,11 +490,11 @@ mod test { }; // Create a new block payload store - let block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); // Add the maximum number of verified blocks to the payload store let num_blocks_in_store = max_num_pending_blocks as usize; - create_and_add_blocks_to_store(block_payload_store.clone(), num_blocks_in_store, 0, true); + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 0, true); // Verify the number of blocks in the block payload store check_num_verified_payloads(&block_payload_store, num_blocks_in_store); @@ -477,7 +502,7 @@ mod test { // Add more blocks to the payload store let num_blocks_to_add = 5; - create_and_add_blocks_to_store(block_payload_store.clone(), num_blocks_to_add, 0, true); + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_to_add, 0, true); // Verify the number of blocks in the block payload store check_num_verified_payloads(&block_payload_store, max_num_pending_blocks as usize); @@ -485,7 +510,7 @@ mod test { // Add a large number of blocks to the payload store let num_blocks_to_add = 100; - create_and_add_blocks_to_store(block_payload_store.clone(), num_blocks_to_add, 0, true); + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_to_add, 0, true); // Verify the number of blocks in the block payload store check_num_verified_payloads(&block_payload_store, max_num_pending_blocks as usize); @@ -502,11 +527,11 @@ mod test { }; // Create a new block payload store - let block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); // Add the maximum number of unverified blocks to the payload store let num_blocks_in_store = max_num_pending_blocks as usize; - create_and_add_blocks_to_store(block_payload_store.clone(), num_blocks_in_store, 0, false); + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 0, false); // Verify the number of blocks in the block payload store check_num_unverified_payloads(&block_payload_store, num_blocks_in_store); @@ -514,7 +539,7 @@ mod test { // Add more blocks to the payload store let num_blocks_to_add = 5; - create_and_add_blocks_to_store(block_payload_store.clone(), num_blocks_to_add, 0, false); + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_to_add, 0, false); // Verify the number of blocks in the block payload store check_num_unverified_payloads(&block_payload_store, max_num_pending_blocks as usize); @@ -522,7 +547,7 @@ mod test { // Add a large number of blocks to the payload store let num_blocks_to_add = 100; - create_and_add_blocks_to_store(block_payload_store.clone(), num_blocks_to_add, 0, false); + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_to_add, 0, false); // Verify the number of blocks in the block payload store check_num_unverified_payloads(&block_payload_store, max_num_pending_blocks as usize); @@ -533,13 +558,13 @@ mod test { fn test_remove_blocks_for_epoch_round_verified() { // Create a new block payload store let consensus_observer_config = ConsensusObserverConfig::default(); - let block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); // Add some verified blocks to the payload store for the current epoch let current_epoch = 0; let num_blocks_in_store = 100; let verified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_blocks_in_store, current_epoch, true, @@ -573,7 +598,7 @@ mod test { // Add some verified blocks to the payload store for the next epoch let next_epoch = current_epoch + 1; create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_blocks_in_store, next_epoch, true, @@ -591,13 +616,13 @@ mod test { fn test_remove_blocks_for_epoch_round_unverified() { // Create a new block payload store let consensus_observer_config = ConsensusObserverConfig::default(); - let block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); // Add some unverified blocks to the payload store for the current epoch let current_epoch = 10; let num_blocks_in_store = 100; let unverified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_blocks_in_store, current_epoch, false, @@ -630,7 +655,7 @@ mod test { // Add some unverified blocks to the payload store for the next epoch let next_epoch = current_epoch + 1; create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_blocks_in_store, next_epoch, false, @@ -648,13 +673,13 @@ mod test { fn test_remove_committed_blocks_verified() { // Create a new block payload store let consensus_observer_config = ConsensusObserverConfig::default(); - let block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); // Add some blocks to the payload store for the current epoch let current_epoch = 0; let num_blocks_in_store = 100; let verified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_blocks_in_store, current_epoch, true, @@ -700,7 +725,7 @@ mod test { // Add some blocks to the payload store for the next epoch let next_epoch = 1; let verified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_blocks_in_store, next_epoch, true, @@ -717,13 +742,13 @@ mod test { fn test_remove_committed_blocks_unverified() { // Create a new block payload store let consensus_observer_config = ConsensusObserverConfig::default(); - let block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); // Add some blocks to the payload store for the current epoch let current_epoch = 10; let num_blocks_in_store = 100; let unverified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_blocks_in_store, current_epoch, false, @@ -768,7 +793,7 @@ mod test { // Add some blocks to the payload store for the next epoch let next_epoch = 11; let unverified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_blocks_in_store, next_epoch, false, @@ -791,7 +816,7 @@ mod test { let current_epoch = 0; let num_verified_blocks = 10; create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_verified_blocks, current_epoch, true, @@ -801,7 +826,7 @@ mod test { let next_epoch = current_epoch + 1; let num_unverified_blocks = 20; let unverified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_unverified_blocks, next_epoch, false, @@ -811,7 +836,7 @@ mod test { let future_epoch = current_epoch + 30; let num_future_blocks = 30; let future_unverified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_future_blocks, future_epoch, false, @@ -877,7 +902,7 @@ mod test { let current_epoch = 0; let num_verified_blocks = 10; let verified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_verified_blocks, current_epoch, true, @@ -895,7 +920,7 @@ mod test { .unwrap(); // Mark the first block payload as unverified - mark_payload_as_unverified(block_payload_store.clone(), &verified_blocks[0]); + mark_payload_as_unverified(&block_payload_store, &verified_blocks[0]); // Verify the ordered block and ensure it fails (since the payloads are unverified) let error = block_payload_store @@ -923,7 +948,7 @@ mod test { let current_epoch = 10; let num_verified_blocks = 6; create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_verified_blocks, current_epoch, true, @@ -933,7 +958,7 @@ mod test { let next_epoch = current_epoch + 1; let num_unverified_blocks = 15; let unverified_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_unverified_blocks, next_epoch, false, @@ -943,7 +968,7 @@ mod test { let future_epoch = next_epoch + 1; let num_future_blocks = 10; let unverified_future_blocks = create_and_add_blocks_to_store( - block_payload_store.clone(), + &mut block_payload_store, num_future_blocks, future_epoch, false, @@ -986,7 +1011,7 @@ mod test { /// Creates and adds the given number of blocks to the block payload store fn create_and_add_blocks_to_store( - mut block_payload_store: BlockPayloadStore, + block_payload_store: &mut BlockPayloadStore, num_blocks: usize, epoch: u64, verified_payload_signatures: bool, @@ -1060,6 +1085,12 @@ mod test { pipelined_blocks } + /// Creates a new block payload with the given epoch and round + fn create_block_payload(epoch: u64, round: Round) -> BlockPayload { + let block_info = BlockInfo::random_with_epoch(epoch, round); + BlockPayload::new(block_info, BlockTransactionPayload::empty()) + } + /// Checks the number of unverified payloads in the block payload store fn check_num_unverified_payloads( block_payload_store: &BlockPayloadStore, @@ -1110,7 +1141,7 @@ mod test { /// Marks the payload of the given block as unverified fn mark_payload_as_unverified( - block_payload_store: BlockPayloadStore, + block_payload_store: &BlockPayloadStore, block: &Arc, ) { // Get the payload entry for the given block diff --git a/consensus/src/consensus_observer/observer/pending_blocks.rs b/consensus/src/consensus_observer/observer/pending_blocks.rs index 46c0586f081..2a7ebbde051 100644 --- a/consensus/src/consensus_observer/observer/pending_blocks.rs +++ b/consensus/src/consensus_observer/observer/pending_blocks.rs @@ -19,41 +19,47 @@ use std::{ }; /// A simple struct to hold blocks that are waiting for payloads -#[derive(Clone)] pub struct PendingBlockStore { // The configuration of the consensus observer consensus_observer_config: ConsensusObserverConfig, - // A map of ordered blocks that are without payloads. The key is the - // (epoch, round) of the first block in the ordered block. - blocks_without_payloads: Arc>>, + // A map of ordered blocks that are without payloads. The key is + // the (epoch, round) of the first block in the ordered block. + blocks_without_payloads: BTreeMap<(u64, Round), OrderedBlock>, } impl PendingBlockStore { pub fn new(consensus_observer_config: ConsensusObserverConfig) -> Self { Self { consensus_observer_config, - blocks_without_payloads: Arc::new(Mutex::new(BTreeMap::new())), + blocks_without_payloads: BTreeMap::new(), } } /// Clears all missing blocks from the store - pub fn clear_missing_blocks(&self) { - self.blocks_without_payloads.lock().clear(); + pub fn clear_missing_blocks(&mut self) { + self.blocks_without_payloads.clear(); + } + + /// Returns true iff the store contains an entry for the given ordered block + pub fn existing_pending_block(&self, ordered_block: &OrderedBlock) -> bool { + // Get the epoch and round of the first block + let first_block = ordered_block.first_block(); + let first_block_epoch_round = (first_block.epoch(), first_block.round()); + + // Check if the block is already in the store + self.blocks_without_payloads + .contains_key(&first_block_epoch_round) } /// Inserts a block (without payloads) into the store - pub fn insert_pending_block(&self, ordered_block: OrderedBlock) { + pub fn insert_pending_block(&mut self, ordered_block: OrderedBlock) { // Get the epoch and round of the first block let first_block = ordered_block.first_block(); let first_block_epoch_round = (first_block.epoch(), first_block.round()); // Insert the block into the store using the round of the first block - match self - .blocks_without_payloads - .lock() - .entry(first_block_epoch_round) - { + match self.blocks_without_payloads.entry(first_block_epoch_round) { Entry::Occupied(_) => { // The block is already in the store warn!( @@ -75,16 +81,15 @@ impl PendingBlockStore { /// Garbage collects the pending blocks store by removing /// the oldest blocks if the store is too large. - fn garbage_collect_pending_blocks(&self) { + fn garbage_collect_pending_blocks(&mut self) { // Calculate the number of blocks to remove - let mut blocks_without_payloads = self.blocks_without_payloads.lock(); - let num_pending_blocks = blocks_without_payloads.len() as u64; + let num_pending_blocks = self.blocks_without_payloads.len() as u64; let max_pending_blocks = self.consensus_observer_config.max_num_pending_blocks; let num_blocks_to_remove = num_pending_blocks.saturating_sub(max_pending_blocks); // Remove the oldest blocks if the store is too large for _ in 0..num_blocks_to_remove { - if let Some((oldest_epoch_round, _)) = blocks_without_payloads.pop_first() { + if let Some((oldest_epoch_round, _)) = self.blocks_without_payloads.pop_first() { warn!( LogSchema::new(LogEntry::ConsensusObserver).message(&format!( "The pending block store is too large: {:?} blocks. Removing the block for the oldest epoch and round: {:?}", @@ -98,25 +103,28 @@ impl PendingBlockStore { /// Removes and returns the block from the store that is now ready /// to be processed (after the new payload has been received). pub fn remove_ready_block( - &self, + &mut self, received_payload_epoch: u64, received_payload_round: Round, - block_payload_store: &BlockPayloadStore, + block_payload_store: Arc>, ) -> Option { // Calculate the round at which to split the blocks let split_round = received_payload_round.saturating_add(1); // Split the blocks at the epoch and round - let mut blocks_without_payloads = self.blocks_without_payloads.lock(); - let mut blocks_at_higher_rounds = - blocks_without_payloads.split_off(&(received_payload_epoch, split_round)); + let mut blocks_at_higher_rounds = self + .blocks_without_payloads + .split_off(&(received_payload_epoch, split_round)); // Check if the last block is ready (this should be the only ready block). // Any earlier blocks are considered out-of-date and will be dropped. let mut ready_block = None; - if let Some((epoch_and_round, ordered_block)) = blocks_without_payloads.pop_last() { + if let Some((epoch_and_round, ordered_block)) = self.blocks_without_payloads.pop_last() { // If all payloads exist for the block, then the block is ready - if block_payload_store.all_payloads_exist(ordered_block.blocks()) { + if block_payload_store + .lock() + .all_payloads_exist(ordered_block.blocks()) + { ready_block = Some(ordered_block); } else { // Otherwise, check if we're still waiting for higher payloads for the block @@ -127,18 +135,18 @@ impl PendingBlockStore { } // Check if any out-of-date blocks were dropped - if !blocks_without_payloads.is_empty() { + if !self.blocks_without_payloads.is_empty() { info!( LogSchema::new(LogEntry::ConsensusObserver).message(&format!( "Dropped {:?} out-of-date pending blocks before epoch and round: {:?}", - blocks_without_payloads.len(), + self.blocks_without_payloads.len(), (received_payload_epoch, received_payload_round) )) ); } // Update the pending blocks to only include the blocks at higher rounds - *blocks_without_payloads = blocks_at_higher_rounds; + self.blocks_without_payloads = blocks_at_higher_rounds; // Return the ready block (if one exists) ready_block @@ -147,8 +155,7 @@ impl PendingBlockStore { /// Updates the metrics for the pending blocks pub fn update_pending_blocks_metrics(&self) { // Update the number of pending block entries - let blocks_without_payloads = self.blocks_without_payloads.lock(); - let num_entries = blocks_without_payloads.len() as u64; + let num_entries = self.blocks_without_payloads.len() as u64; metrics::set_gauge_with_label( &metrics::OBSERVER_NUM_PROCESSED_BLOCKS, metrics::PENDING_BLOCK_ENTRIES_LABEL, @@ -156,7 +163,8 @@ impl PendingBlockStore { ); // Update the total number of pending blocks - let num_pending_blocks = blocks_without_payloads + let num_pending_blocks = self + .blocks_without_payloads .values() .map(|block| block.blocks().len() as u64) .sum(); @@ -167,7 +175,8 @@ impl PendingBlockStore { ); // Update the highest round for the pending blocks - let highest_pending_round = blocks_without_payloads + let highest_pending_round = self + .blocks_without_payloads .last_key_value() .map(|(_, pending_block)| pending_block.last_block().round()) .unwrap_or(0); @@ -208,13 +217,15 @@ mod test { max_num_pending_blocks: max_num_pending_blocks as u64, ..ConsensusObserverConfig::default() }; - let pending_block_store = PendingBlockStore::new(consensus_observer_config); + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); // Insert the maximum number of blocks into the store let current_epoch = 0; let starting_round = 0; let missing_blocks = create_and_add_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, current_epoch, starting_round, @@ -223,17 +234,80 @@ mod test { // Verify that the store is not empty verify_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, &missing_blocks, ); // Clear the missing blocks from the store - pending_block_store.clear_missing_blocks(); + pending_block_store.lock().clear_missing_blocks(); // Verify that the store is now empty - let blocks_without_payloads = pending_block_store.blocks_without_payloads.lock(); - assert!(blocks_without_payloads.is_empty()); + assert!(pending_block_store + .lock() + .blocks_without_payloads + .is_empty()); + } + + #[test] + fn test_existing_pending_block() { + // Create a new pending block store + let max_num_pending_blocks = 10; + let consensus_observer_config = ConsensusObserverConfig { + max_num_pending_blocks: max_num_pending_blocks as u64, + ..ConsensusObserverConfig::default() + }; + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + ConsensusObserverConfig::default(), + ))); + + // Insert the maximum number of blocks into the store + let current_epoch = 10; + let starting_round = 100; + let pending_blocks = create_and_add_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + current_epoch, + starting_round, + 5, + ); + + // Verify that all blocks were inserted correctly + for pending_block in &pending_blocks { + assert!(pending_block_store + .lock() + .existing_pending_block(pending_block)); + } + + // Create a new block payload store and insert payloads for the second block + let block_payload_store = Arc::new(Mutex::new(BlockPayloadStore::new( + consensus_observer_config, + ))); + let second_block = pending_blocks[1].clone(); + insert_payloads_for_ordered_block(block_payload_store.clone(), &second_block); + + // Remove the second block (which is now ready) + let payload_round = second_block.first_block().round(); + let ready_block = pending_block_store.lock().remove_ready_block( + current_epoch, + payload_round, + block_payload_store.clone(), + ); + assert_eq!(ready_block, Some(second_block)); + + // Verify that the first and second blocks were removed + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks - 2, + &pending_blocks[2..].to_vec(), + ); + + // Verify that the first and second blocks are no longer in the store + for pending_block in &pending_blocks[..2] { + assert!(!pending_block_store + .lock() + .existing_pending_block(pending_block)); + } } #[test] @@ -244,13 +318,15 @@ mod test { max_num_pending_blocks: max_num_pending_blocks as u64, ..ConsensusObserverConfig::default() }; - let pending_block_store = PendingBlockStore::new(consensus_observer_config); + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); // Insert the maximum number of blocks into the store let current_epoch = 0; let starting_round = 0; let pending_blocks = create_and_add_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, current_epoch, starting_round, @@ -259,7 +335,7 @@ mod test { // Verify that all blocks were inserted correctly verify_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, &pending_blocks, ); @@ -267,7 +343,7 @@ mod test { // Insert the maximum number of blocks into the store again let starting_round = (max_num_pending_blocks * 100) as Round; let pending_blocks = create_and_add_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, current_epoch, starting_round, @@ -276,7 +352,7 @@ mod test { // Verify that all blocks were inserted correctly verify_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, &pending_blocks, ); @@ -284,12 +360,17 @@ mod test { // Insert one more block into the store (for the next epoch) let next_epoch = 1; let starting_round = 0; - let new_pending_block = - create_and_add_pending_blocks(&pending_block_store, 1, next_epoch, starting_round, 5); + let new_pending_block = create_and_add_pending_blocks( + pending_block_store.clone(), + 1, + next_epoch, + starting_round, + 5, + ); // Verify the new block was inserted correctly verify_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, &new_pending_block, ); @@ -303,13 +384,15 @@ mod test { max_num_pending_blocks: max_num_pending_blocks as u64, ..ConsensusObserverConfig::default() }; - let pending_block_store = PendingBlockStore::new(consensus_observer_config); + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); // Insert the maximum number of blocks into the store let current_epoch = 0; let starting_round = 200; let mut pending_blocks = create_and_add_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, current_epoch, starting_round, @@ -318,7 +401,7 @@ mod test { // Verify that all blocks were inserted correctly verify_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, &pending_blocks, ); @@ -329,7 +412,7 @@ mod test { // Insert one more block into the store let starting_round = ((max_num_pending_blocks * 10) + (i * 100)) as Round; let new_pending_block = create_and_add_pending_blocks( - &pending_block_store, + pending_block_store.clone(), 1, current_epoch, starting_round, @@ -338,7 +421,7 @@ mod test { // Verify the new block was inserted correctly verify_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, &new_pending_block, ); @@ -348,7 +431,8 @@ mod test { let oldest_block_round = oldest_block.first_block().round(); // Verify that the oldest block was garbage collected - let blocks_without_payloads = pending_block_store.blocks_without_payloads.lock(); + let blocks_without_payloads = + pending_block_store.lock().blocks_without_payloads.clone(); assert!(!blocks_without_payloads.contains_key(&(current_epoch, oldest_block_round))); } @@ -359,7 +443,7 @@ mod test { // Insert one more block into the store let starting_round = i; let new_pending_block = create_and_add_pending_blocks( - &pending_block_store, + pending_block_store.clone(), 1, next_epoch, starting_round, @@ -368,7 +452,7 @@ mod test { // Verify the new block was inserted correctly verify_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, &new_pending_block, ); @@ -378,7 +462,8 @@ mod test { let oldest_block_round = oldest_block.first_block().round(); // Verify that the oldest block was garbage collected - let blocks_without_payloads = pending_block_store.blocks_without_payloads.lock(); + let blocks_without_payloads = + pending_block_store.lock().blocks_without_payloads.clone(); assert!(!blocks_without_payloads.contains_key(&(current_epoch, oldest_block_round))); } } @@ -391,13 +476,15 @@ mod test { max_num_pending_blocks: max_num_pending_blocks as u64, ..ConsensusObserverConfig::default() }; - let pending_block_store = PendingBlockStore::new(consensus_observer_config); + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); // Insert the maximum number of blocks into the store let current_epoch = 0; let starting_round = 0; let pending_blocks = create_and_add_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, current_epoch, starting_round, @@ -405,43 +492,45 @@ mod test { ); // Create a new block payload store and insert payloads for the second block - let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let block_payload_store = Arc::new(Mutex::new(BlockPayloadStore::new( + consensus_observer_config, + ))); let second_block = pending_blocks[1].clone(); - insert_payloads_for_ordered_block(&mut block_payload_store, &second_block); + insert_payloads_for_ordered_block(block_payload_store.clone(), &second_block); // Remove the second block (which is now ready) let payload_round = second_block.first_block().round(); - let ready_block = pending_block_store.remove_ready_block( + let ready_block = pending_block_store.lock().remove_ready_block( current_epoch, payload_round, - &block_payload_store, + block_payload_store.clone(), ); assert_eq!(ready_block, Some(second_block)); // Verify that the first and second blocks were removed verify_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks - 2, &pending_blocks[2..].to_vec(), ); // Insert payloads for the last block let last_block = pending_blocks.last().unwrap().clone(); - insert_payloads_for_ordered_block(&mut block_payload_store, &last_block); + insert_payloads_for_ordered_block(block_payload_store.clone(), &last_block); // Remove the last block (which is now ready) let payload_round = last_block.first_block().round(); - let ready_block = pending_block_store.remove_ready_block( + let ready_block = pending_block_store.lock().remove_ready_block( current_epoch, payload_round, - &block_payload_store, + block_payload_store.clone(), ); // Verify that the last block was removed assert_eq!(ready_block, Some(last_block)); // Verify that the store is empty - verify_pending_blocks(&pending_block_store, 0, &vec![]); + verify_pending_blocks(pending_block_store.clone(), 0, &vec![]); } #[test] @@ -452,13 +541,15 @@ mod test { max_num_pending_blocks: max_num_pending_blocks as u64, ..ConsensusObserverConfig::default() }; - let pending_block_store = PendingBlockStore::new(consensus_observer_config); + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); // Insert the maximum number of blocks into the store let current_epoch = 10; let starting_round = 100; let pending_blocks = create_and_add_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, current_epoch, starting_round, @@ -466,7 +557,9 @@ mod test { ); // Create an empty block payload store - let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let block_payload_store = Arc::new(Mutex::new(BlockPayloadStore::new( + consensus_observer_config, + ))); // Incrementally insert and process each payload for the first block let first_block = pending_blocks.first().unwrap().clone(); @@ -474,14 +567,16 @@ mod test { // Insert the block let block_payload = BlockPayload::new(block.block_info(), BlockTransactionPayload::empty()); - block_payload_store.insert_block_payload(block_payload, true); + block_payload_store + .lock() + .insert_block_payload(block_payload, true); // Attempt to remove the block (which might not be ready) let payload_round = block.round(); - let ready_block = pending_block_store.remove_ready_block( + let ready_block = pending_block_store.lock().remove_ready_block( current_epoch, payload_round, - &block_payload_store, + block_payload_store.clone(), ); // If the block is ready, verify that it was removed. @@ -492,7 +587,7 @@ mod test { // Verify that the block was removed verify_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks - 1, &pending_blocks[1..].to_vec(), ); @@ -502,7 +597,7 @@ mod test { // Verify that the block still remains verify_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, &pending_blocks, ); @@ -517,14 +612,16 @@ mod test { if payload_round != last_block.first_block().round() { let block_payload = BlockPayload::new(block.block_info(), BlockTransactionPayload::empty()); - block_payload_store.insert_block_payload(block_payload, true); + block_payload_store + .lock() + .insert_block_payload(block_payload, true); } // Attempt to remove the block (which might not be ready) - let ready_block = pending_block_store.remove_ready_block( + let ready_block = pending_block_store.lock().remove_ready_block( current_epoch, payload_round, - &block_payload_store, + block_payload_store.clone(), ); // The block should not be ready @@ -532,14 +629,14 @@ mod test { // Verify that the block still remains or has been removed on the last insert if payload_round == last_block.last_block().round() { - verify_pending_blocks(&pending_block_store, 0, &vec![]); + verify_pending_blocks(pending_block_store.clone(), 0, &vec![]); } else { - verify_pending_blocks(&pending_block_store, 1, &vec![last_block.clone()]); + verify_pending_blocks(pending_block_store.clone(), 1, &vec![last_block.clone()]); } } // Verify that the store is now empty - verify_pending_blocks(&pending_block_store, 0, &vec![]); + verify_pending_blocks(pending_block_store.clone(), 0, &vec![]); } #[test] @@ -550,13 +647,15 @@ mod test { max_num_pending_blocks: max_num_pending_blocks as u64, ..ConsensusObserverConfig::default() }; - let pending_block_store = PendingBlockStore::new(consensus_observer_config); + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); // Insert the maximum number of blocks into the store let current_epoch = 0; let starting_round = 0; let pending_blocks = create_and_add_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, current_epoch, starting_round, @@ -564,63 +663,65 @@ mod test { ); // Create a new block payload store and insert payloads for the first block - let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let block_payload_store = Arc::new(Mutex::new(BlockPayloadStore::new( + consensus_observer_config, + ))); let first_block = pending_blocks.first().unwrap().clone(); - insert_payloads_for_ordered_block(&mut block_payload_store, &first_block); + insert_payloads_for_ordered_block(block_payload_store.clone(), &first_block); // Remove the first block (which is now ready) let payload_round = first_block.first_block().round(); - let ready_block = pending_block_store.remove_ready_block( + let ready_block = pending_block_store.lock().remove_ready_block( current_epoch, payload_round, - &block_payload_store, + block_payload_store.clone(), ); assert_eq!(ready_block, Some(first_block)); // Verify that the first block was removed verify_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks - 1, &pending_blocks[1..].to_vec(), ); // Insert payloads for the second block let second_block = pending_blocks[1].clone(); - insert_payloads_for_ordered_block(&mut block_payload_store, &second_block); + insert_payloads_for_ordered_block(block_payload_store.clone(), &second_block); // Remove the second block (which is now ready) let payload_round = second_block.first_block().round(); - let ready_block = pending_block_store.remove_ready_block( + let ready_block = pending_block_store.lock().remove_ready_block( current_epoch, payload_round, - &block_payload_store, + block_payload_store.clone(), ); assert_eq!(ready_block, Some(second_block)); // Verify that the first and second blocks were removed verify_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks - 2, &pending_blocks[2..].to_vec(), ); // Insert payloads for the last block let last_block = pending_blocks.last().unwrap().clone(); - insert_payloads_for_ordered_block(&mut block_payload_store, &last_block); + insert_payloads_for_ordered_block(block_payload_store.clone(), &last_block); // Remove the last block (which is now ready) let payload_round = last_block.first_block().round(); - let ready_block = pending_block_store.remove_ready_block( + let ready_block = pending_block_store.lock().remove_ready_block( current_epoch, payload_round, - &block_payload_store, + block_payload_store.clone(), ); // Verify that the last block was removed assert_eq!(ready_block, Some(last_block)); // Verify that the store is empty - verify_pending_blocks(&pending_block_store, 0, &vec![]); + verify_pending_blocks(pending_block_store.clone(), 0, &vec![]); } #[test] @@ -631,13 +732,15 @@ mod test { max_num_pending_blocks: max_num_pending_blocks as u64, ..ConsensusObserverConfig::default() }; - let pending_block_store = PendingBlockStore::new(consensus_observer_config); + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); // Insert the maximum number of blocks into the store let current_epoch = 10; let starting_round = 100; let pending_blocks = create_and_add_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks, current_epoch, starting_round, @@ -645,21 +748,23 @@ mod test { ); // Create an empty block payload store - let block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let block_payload_store = Arc::new(Mutex::new(BlockPayloadStore::new( + consensus_observer_config, + ))); // Remove the third block (which is not ready) let third_block = pending_blocks[2].clone(); let third_block_round = third_block.first_block().round(); - let ready_block = pending_block_store.remove_ready_block( + let ready_block = pending_block_store.lock().remove_ready_block( current_epoch, third_block_round, - &block_payload_store, + block_payload_store.clone(), ); assert!(ready_block.is_none()); // Verify that the first three blocks were removed verify_pending_blocks( - &pending_block_store, + pending_block_store.clone(), max_num_pending_blocks - 3, &pending_blocks[3..].to_vec(), ); @@ -667,20 +772,20 @@ mod test { // Remove the last block (which is not ready) let last_block = pending_blocks.last().unwrap().clone(); let last_block_round = last_block.first_block().round(); - let ready_block = pending_block_store.remove_ready_block( + let ready_block = pending_block_store.lock().remove_ready_block( current_epoch, last_block_round, - &block_payload_store, + block_payload_store.clone(), ); assert!(ready_block.is_none()); // Verify that the store is now empty - verify_pending_blocks(&pending_block_store, 0, &vec![]); + verify_pending_blocks(pending_block_store.clone(), 0, &vec![]); } /// Creates and adds the specified number of blocks to the pending block store fn create_and_add_pending_blocks( - pending_block_store: &PendingBlockStore, + pending_block_store: Arc>, num_pending_blocks: usize, epoch: u64, starting_round: Round, @@ -732,7 +837,9 @@ mod test { let ordered_block = OrderedBlock::new(pipelined_blocks, ordered_proof.clone()); // Insert the ordered block into the pending block store - pending_block_store.insert_pending_block(ordered_block.clone()); + pending_block_store + .lock() + .insert_pending_block(ordered_block.clone()); // Add the ordered block to the pending blocks pending_blocks.push(ordered_block); @@ -743,31 +850,37 @@ mod test { /// Inserts payloads into the payload store for the ordered block fn insert_payloads_for_ordered_block( - block_payload_store: &mut BlockPayloadStore, + block_payload_store: Arc>, ordered_block: &OrderedBlock, ) { for block in ordered_block.blocks() { let block_payload = BlockPayload::new(block.block_info(), BlockTransactionPayload::empty()); - block_payload_store.insert_block_payload(block_payload, true); + block_payload_store + .lock() + .insert_block_payload(block_payload, true); } } /// Verifies that the pending block store contains the expected blocks fn verify_pending_blocks( - pending_block_store: &PendingBlockStore, + pending_block_store: Arc>, num_expected_blocks: usize, pending_blocks: &Vec, ) { // Check the number of pending blocks - let blocks_without_payloads = pending_block_store.blocks_without_payloads.lock(); - assert_eq!(blocks_without_payloads.len(), num_expected_blocks); + assert_eq!( + pending_block_store.lock().blocks_without_payloads.len(), + num_expected_blocks + ); // Check that all pending blocks are in the store for pending_block in pending_blocks { let first_block = pending_block.first_block(); assert_eq!( - blocks_without_payloads + pending_block_store + .lock() + .blocks_without_payloads .get(&(first_block.epoch(), first_block.round())) .unwrap(), pending_block diff --git a/consensus/src/consensus_observer/observer/subscription.rs b/consensus/src/consensus_observer/observer/subscription.rs index fe29aa6a5a5..5d9ae4d43de 100644 --- a/consensus/src/consensus_observer/observer/subscription.rs +++ b/consensus/src/consensus_observer/observer/subscription.rs @@ -1,25 +1,17 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::consensus_observer::common::{ - error::Error, - logging::{LogEntry, LogSchema}, -}; +use crate::consensus_observer::{common::error::Error, observer::subscription_utils}; use aptos_config::{config::ConsensusObserverConfig, network_id::PeerNetworkId}; -use aptos_logger::{info, warn}; -use aptos_network::{application::metadata::PeerMetadata, ProtocolId}; +use aptos_network::application::metadata::PeerMetadata; use aptos_storage_interface::DbReader; use aptos_time_service::{TimeService, TimeServiceTrait}; -use ordered_float::OrderedFloat; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{HashMap, HashSet}, sync::Arc, time::{Duration, Instant}, }; -// A useful constant for representing the maximum ping latency -const MAX_PING_LATENCY_SECS: f64 = 10_000.0; - /// A single consensus observer subscription pub struct ConsensusObserverSubscription { // The configuration of the consensus observer @@ -31,7 +23,7 @@ pub struct ConsensusObserverSubscription { // The peer network id of the active subscription peer_network_id: PeerNetworkId, - // The timestamp of the last message received from the peer + // The timestamp of the last message received for the subscription last_message_receive_time: Instant, // The timestamp and connected peers for the last optimality check @@ -66,12 +58,40 @@ impl ConsensusObserverSubscription { } } + /// Checks if the subscription is still healthy. If not, an error + /// is returned indicating the reason for the subscription failure. + pub fn check_subscription_health( + &mut self, + connected_peers_and_metadata: &HashMap, + ) -> Result<(), Error> { + // Verify the subscription peer is still connected + let peer_network_id = self.get_peer_network_id(); + if !connected_peers_and_metadata.contains_key(&peer_network_id) { + return Err(Error::SubscriptionDisconnected(format!( + "The peer: {:?} is no longer connected!", + peer_network_id + ))); + } + + // Verify the subscription has not timed out + self.check_subscription_timeout()?; + + // Verify that the DB is continuing to sync and commit new data + self.check_syncing_progress()?; + + // Verify that the subscription peer is still optimal + self.check_subscription_peer_optimality(connected_peers_and_metadata)?; + + // The subscription seems healthy + Ok(()) + } + /// Verifies that the peer currently selected for the subscription is /// optimal. This is only done if: (i) the peers have changed since the /// last check; or (ii) enough time has elapsed to force a refresh. - pub fn check_subscription_peer_optimality( + fn check_subscription_peer_optimality( &mut self, - peers_and_metadata: HashMap, + peers_and_metadata: &HashMap, ) -> Result<(), Error> { // Get the last optimality check time and connected peers let (last_optimality_check_time, last_optimality_check_peers) = @@ -106,16 +126,21 @@ impl ConsensusObserverSubscription { self.last_optimality_check_time_and_peers = (time_now, current_connected_peers); // Sort the peers by subscription optimality - let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); - - // Verify that we're subscribed to the most optimal peer - if let Some(optimal_peer) = sorted_peers.first() { - if *optimal_peer != self.peer_network_id { - return Err(Error::SubscriptionSuboptimal(format!( - "Subscription to peer: {} is no longer optimal! New optimal peer: {}", - self.peer_network_id, optimal_peer - ))); - } + let sorted_peers = + subscription_utils::sort_peers_by_subscription_optimality(peers_and_metadata); + + // Verify that this peer is one of the most optimal peers + let max_concurrent_subscriptions = + self.consensus_observer_config.max_concurrent_subscriptions as usize; + if !sorted_peers + .iter() + .take(max_concurrent_subscriptions) + .any(|peer| peer == &self.peer_network_id) + { + return Err(Error::SubscriptionSuboptimal(format!( + "Subscription to peer: {} is no longer optimal! New optimal peers: {:?}", + self.peer_network_id, sorted_peers + ))); } Ok(()) @@ -123,7 +148,7 @@ impl ConsensusObserverSubscription { /// Verifies that the subscription has not timed out based /// on the last received message time. - pub fn check_subscription_timeout(&self) -> Result<(), Error> { + fn check_subscription_timeout(&self) -> Result<(), Error> { // Calculate the duration since the last message let time_now = self.time_service.now(); let duration_since_last_message = time_now.duration_since(self.last_message_receive_time); @@ -142,7 +167,7 @@ impl ConsensusObserverSubscription { } /// Verifies that the DB is continuing to sync and commit new data - pub fn check_syncing_progress(&mut self) -> Result<(), Error> { + fn check_syncing_progress(&mut self) -> Result<(), Error> { // Get the current synced version from storage let current_synced_version = self.db_reader @@ -185,151 +210,10 @@ impl ConsensusObserverSubscription { self.peer_network_id } - /// Verifies the given message is from the expected peer - pub fn verify_message_sender(&mut self, peer_network_id: &PeerNetworkId) -> Result<(), Error> { - // Verify the message is from the expected peer - if self.peer_network_id != *peer_network_id { - return Err(Error::UnexpectedError(format!( - "Received message from unexpected peer: {}! Subscribed to: {}", - peer_network_id, self.peer_network_id - ))); - } - - // Update the last message receive time + /// Updates the last message receive time to the current time + pub fn update_last_message_receive_time(&mut self) { self.last_message_receive_time = self.time_service.now(); - - Ok(()) - } -} - -/// Gets the distance from the validators for the specified peer from the peer metadata -fn get_distance_for_peer( - peer_network_id: &PeerNetworkId, - peer_metadata: &PeerMetadata, -) -> Option { - // Get the distance for the peer - let peer_monitoring_metadata = peer_metadata.get_peer_monitoring_metadata(); - let distance = peer_monitoring_metadata - .latest_network_info_response - .as_ref() - .map(|response| response.distance_from_validators); - - // If the distance is missing, log a warning - if distance.is_none() { - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Unable to get distance for peer! Peer: {:?}", - peer_network_id - )) - ); - } - - distance -} - -/// Gets the latency for the specified peer from the peer metadata -fn get_latency_for_peer( - peer_network_id: &PeerNetworkId, - peer_metadata: &PeerMetadata, -) -> Option { - // Get the latency for the peer - let peer_monitoring_metadata = peer_metadata.get_peer_monitoring_metadata(); - let latency = peer_monitoring_metadata.average_ping_latency_secs; - - // If the latency is missing, log a warning - if latency.is_none() { - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Unable to get latency for peer! Peer: {:?}", - peer_network_id - )) - ); - } - - latency -} - -/// Sorts the peers by subscription optimality (in descending order of -/// optimality). This requires: (i) sorting the peers by distance from the -/// validator set and ping latency (lower values are more optimal); and (ii) -/// filtering out peers that don't support consensus observer. -/// -/// Note: we prioritize distance over latency as we want to avoid close -/// but not up-to-date peers. If peers don't have sufficient metadata -/// for sorting, they are given a lower priority. -pub fn sort_peers_by_subscription_optimality( - peers_and_metadata: &HashMap, -) -> Vec { - // Group peers and latencies by validator distance, i.e., distance -> [(peer, latency)] - let mut unsupported_peers = Vec::new(); - let mut peers_and_latencies_by_distance = BTreeMap::new(); - for (peer_network_id, peer_metadata) in peers_and_metadata { - // Verify that the peer supports consensus observer - if !supports_consensus_observer(peer_metadata) { - unsupported_peers.push(*peer_network_id); - continue; // Skip the peer - } - - // Get the distance and latency for the peer - let distance = get_distance_for_peer(peer_network_id, peer_metadata); - let latency = get_latency_for_peer(peer_network_id, peer_metadata); - - // If the distance is not found, use the maximum distance - let distance = - distance.unwrap_or(aptos_peer_monitoring_service_types::MAX_DISTANCE_FROM_VALIDATORS); - - // If the latency is not found, use a large latency - let latency = latency.unwrap_or(MAX_PING_LATENCY_SECS); - - // Add the peer and latency to the distance group - peers_and_latencies_by_distance - .entry(distance) - .or_insert_with(Vec::new) - .push((*peer_network_id, OrderedFloat(latency))); - } - - // If there are peers that don't support consensus observer, log them - if !unsupported_peers.is_empty() { - info!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Found {} peers that don't support consensus observer! Peers: {:?}", - unsupported_peers.len(), - unsupported_peers - )) - ); } - - // Sort the peers by distance and latency. Note: BTreeMaps are - // sorted by key, so the entries will be sorted by distance in ascending order. - let mut sorted_peers = Vec::new(); - for (_, mut peers_and_latencies) in peers_and_latencies_by_distance { - // Sort the peers by latency - peers_and_latencies.sort_by_key(|(_, latency)| *latency); - - // Add the peers to the sorted list (in sorted order) - sorted_peers.extend( - peers_and_latencies - .into_iter() - .map(|(peer_network_id, _)| peer_network_id), - ); - } - - // Log the sorted peers - info!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Sorted {} peers by subscription optimality! Peers: {:?}", - sorted_peers.len(), - sorted_peers - )) - ); - - sorted_peers -} - -/// Returns true iff the peer metadata indicates support for consensus observer -fn supports_consensus_observer(peer_metadata: &PeerMetadata) -> bool { - peer_metadata.supports_protocol(ProtocolId::ConsensusObserver) - && peer_metadata.supports_protocol(ProtocolId::ConsensusObserverRpc) } #[cfg(test)] @@ -340,12 +224,12 @@ mod test { use aptos_network::{ protocols::wire::handshake::v1::{MessagingProtocolVersion, ProtocolIdSet}, transport::{ConnectionId, ConnectionMetadata}, + ProtocolId, }; - use aptos_peer_monitoring_service_types::{ - response::NetworkInformationResponse, PeerMonitoringMetadata, - }; + use aptos_peer_monitoring_service_types::PeerMonitoringMetadata; use aptos_storage_interface::Result; use aptos_types::{network_address::NetworkAddress, transaction::Version}; + use claims::assert_matches; use mockall::mock; // This is a simple mock of the DbReader (it generates a MockDatabaseReader) @@ -357,12 +241,15 @@ mod test { } #[test] - fn check_subscription_peer_optimality() { - // Create a consensus observer config and time service - let consensus_observer_config = ConsensusObserverConfig::default(); - let time_service = TimeService::mock(); + fn test_check_subscription_health_connected_and_timeout() { + // Create a consensus observer config + let consensus_observer_config = ConsensusObserverConfig { + max_synced_version_timeout_ms: 100_000_000, // Use a large value so that we don't get DB progress errors + ..ConsensusObserverConfig::default() + }; // Create a new observer subscription + let time_service = TimeService::mock(); let peer_network_id = PeerNetworkId::random(); let mut subscription = ConsensusObserverSubscription::new( consensus_observer_config, @@ -371,47 +258,180 @@ mod test { time_service.clone(), ); - // Verify the time and peers for the last optimality check - let (last_check_time, last_check_peers) = - subscription.last_optimality_check_time_and_peers.clone(); - assert_eq!(last_check_time, time_service.now()); - assert!(last_check_peers.is_empty()); + // Verify that the subscription is unhealthy (the peer is not connected) + assert_matches!( + subscription.check_subscription_health(&HashMap::new()), + Err(Error::SubscriptionDisconnected(_)) + ); // Create a peers and metadata map for the subscription let mut peers_and_metadata = HashMap::new(); - peers_and_metadata.insert( + add_metadata_for_peer(&mut peers_and_metadata, peer_network_id, true, false); + + // Elapse enough time to timeout the subscription + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_subscription_timeout_ms + 1, + )); + + // Verify that the subscription has timed out + assert_matches!( + subscription.check_subscription_health(&peers_and_metadata), + Err(Error::SubscriptionTimeout(_)) + ); + } + + #[test] + fn test_check_subscription_health_progress() { + // Create a consensus observer config with a large timeout + let consensus_observer_config = ConsensusObserverConfig { + max_subscription_timeout_ms: 100_000_000, // Use a large value so that we don't time out + ..ConsensusObserverConfig::default() + }; + + // Create a mock DB reader with expectations + let first_synced_version = 1; + let second_synced_version = 2; + let mut mock_db_reader = MockDatabaseReader::new(); + mock_db_reader + .expect_get_latest_ledger_info_version() + .returning(move || Ok(first_synced_version)) + .times(1); // Only allow one call for the first version + mock_db_reader + .expect_get_latest_ledger_info_version() + .returning(move || Ok(second_synced_version)); // Allow multiple calls for the second version + + // Create a new observer subscription + let peer_network_id = PeerNetworkId::random(); + let time_service = TimeService::mock(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(mock_db_reader), peer_network_id, - PeerMetadata::new_for_test( - create_connection_metadata(peer_network_id, true), - PeerMonitoringMetadata::new(None, None, None, None, None), - ), + time_service.clone(), + ); + + // Verify that the DB is making sync progress and that the highest synced version is updated + let mock_time_service = time_service.into_mock(); + verify_subscription_syncing_progress( + &mut subscription, + first_synced_version, + mock_time_service.now(), + ); + + // Elapse enough time to timeout the subscription + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_synced_version_timeout_ms + 1, + )); + + // Verify that the DB is still making sync progress (the next version is higher) + verify_subscription_syncing_progress( + &mut subscription, + second_synced_version, + mock_time_service.now(), + ); + + // Elapse enough time to timeout the subscription + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_synced_version_timeout_ms + 1, + )); + + // Verify that the DB is not making sync progress and that the subscription has timed out + assert_matches!( + subscription.check_syncing_progress(), + Err(Error::SubscriptionProgressStopped(_)) + ); + } + + #[test] + fn test_check_subscription_health_optimality() { + // Create a consensus observer config with a single subscription and large timeouts + let consensus_observer_config = ConsensusObserverConfig { + max_concurrent_subscriptions: 1, + max_subscription_timeout_ms: 100_000_000, // Use a large value so that we don't time out + max_synced_version_timeout_ms: 100_000_000, // Use a large value so that we don't get DB progress errors + ..ConsensusObserverConfig::default() + }; + + // Create a mock DB reader with expectations + let mut mock_db_reader = MockDatabaseReader::new(); + mock_db_reader + .expect_get_latest_ledger_info_version() + .returning(move || Ok(1)); + + // Create a new observer subscription + let time_service = TimeService::mock(); + let peer_network_id = PeerNetworkId::random(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(mock_db_reader), + peer_network_id, + time_service.clone(), ); + // Create a peers and metadata map for the subscription + let mut peers_and_metadata = HashMap::new(); + add_metadata_for_peer(&mut peers_and_metadata, peer_network_id, true, false); + + // Verify that the subscription is healthy + assert!(subscription + .check_subscription_health(&peers_and_metadata) + .is_ok()); + // Add a more optimal peer to the set of peers let new_optimal_peer = PeerNetworkId::random(); - peers_and_metadata.insert( - new_optimal_peer, - PeerMetadata::new_for_test( - create_connection_metadata(new_optimal_peer, true), - PeerMonitoringMetadata::new(Some(0.1), None, None, None, None), - ), + add_metadata_for_peer(&mut peers_and_metadata, new_optimal_peer, true, true); + + // Elapse enough time for a peer optimality check + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Verify that the subscription is no longer optimal + assert_matches!( + subscription.check_subscription_health(&peers_and_metadata), + Err(Error::SubscriptionSuboptimal(_)) + ); + } + + #[test] + fn test_check_subscription_peer_optimality_single() { + // Create a consensus observer config with a maximum of 1 subscription + let consensus_observer_config = create_observer_config(1); + + // Create a new observer subscription + let time_service = TimeService::mock(); + let peer_network_id = PeerNetworkId::random(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(MockDatabaseReader::new()), + peer_network_id, + time_service.clone(), ); + // Verify the time and peers for the last optimality check + let mock_time_service = time_service.into_mock(); + verify_last_check_time_and_peers(&subscription, mock_time_service.now(), HashSet::new()); + + // Create a peers and metadata map for the subscription + let mut peers_and_metadata = HashMap::new(); + add_metadata_for_peer(&mut peers_and_metadata, peer_network_id, true, false); + + // Add a more optimal peer to the set of peers + let new_optimal_peer = PeerNetworkId::random(); + add_metadata_for_peer(&mut peers_and_metadata, new_optimal_peer, true, true); + // Verify that the peer is optimal (not enough time has elapsed to check) - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_ok()); + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); // Elapse some amount of time (but not enough to check optimality) - let mock_time_service = time_service.into_mock(); mock_time_service.advance(Duration::from_millis( consensus_observer_config.subscription_peer_change_interval_ms / 2, )); // Verify that the peer is still optimal (not enough time has elapsed to check) - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_ok()); + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); // Elapse enough time to check the peer optimality mock_time_service.advance(Duration::from_millis( @@ -419,17 +439,13 @@ mod test { )); // Verify that the peer is no longer optimal (a more optimal peer has been added) - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_err()); + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, false); // Verify the time of the last peer optimality check - let (last_check_time, last_check_peers) = - subscription.last_optimality_check_time_and_peers.clone(); - assert_eq!(last_check_time, mock_time_service.now()); - assert_eq!( - last_check_peers, - peers_and_metadata.keys().cloned().collect() + verify_last_check_time_and_peers( + &subscription, + mock_time_service.now(), + peers_and_metadata.keys().cloned().collect(), ); // Elapse enough time to check the peer optimality @@ -438,35 +454,29 @@ mod test { )); // Verify that the peer is now optimal (the peers haven't changed) - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_ok()); + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); // Remove the current peer from the list of peers peers_and_metadata.remove(&peer_network_id); // Verify that the peer is not optimal (the peers have changed) - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_err()); + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, false); // Verify the time of the last peer optimality check - let (last_check_time, last_check_peers) = - subscription.last_optimality_check_time_and_peers.clone(); - assert_eq!(last_check_time, mock_time_service.now()); - assert_eq!( - last_check_peers, - peers_and_metadata.keys().cloned().collect() + verify_last_check_time_and_peers( + &subscription, + mock_time_service.now(), + peers_and_metadata.keys().cloned().collect(), ); } #[test] - fn check_subscription_peer_refresh() { - // Create a consensus observer config and time service - let consensus_observer_config = ConsensusObserverConfig::default(); - let time_service = TimeService::mock(); + fn test_check_subscription_peer_optimality_multiple() { + // Create a consensus observer config with a maximum of 2 subscriptions + let consensus_observer_config = create_observer_config(2); // Create a new observer subscription + let time_service = TimeService::mock(); let peer_network_id = PeerNetworkId::random(); let mut subscription = ConsensusObserverSubscription::new( consensus_observer_config, @@ -477,33 +487,73 @@ mod test { // Create a peers and metadata map for the subscription let mut peers_and_metadata = HashMap::new(); - peers_and_metadata.insert( + add_metadata_for_peer(&mut peers_and_metadata, peer_network_id, true, false); + + // Add a more optimal peer to the set of peers + let new_optimal_peer = PeerNetworkId::random(); + add_metadata_for_peer(&mut peers_and_metadata, new_optimal_peer, true, true); + + // Elapse enough time to check the peer optimality + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Verify that the peer is optimal (it's in the top 2 most optimal peers) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); + + // Add another more optimal peer to the set of peers + let another_optimal_peer = PeerNetworkId::random(); + add_metadata_for_peer(&mut peers_and_metadata, another_optimal_peer, true, true); + + // Elapse enough time to check the peer optimality + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Verify that the peer is no longer optimal (it's not in the top 2 most optimal peers) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, false); + + // Remove the previous optimal peer from the list of peers + peers_and_metadata.remove(&new_optimal_peer); + + // Elapse enough time to check the peer optimality + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Verify that the peer is optimal (it's in the top 2 most optimal peers) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); + } + + #[test] + fn test_check_subscription_peer_optimality_refresh() { + // Create a consensus observer config with a maximum of 1 subscription + let consensus_observer_config = create_observer_config(1); + + // Create a new observer subscription + let time_service = TimeService::mock(); + let peer_network_id = PeerNetworkId::random(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(MockDatabaseReader::new()), peer_network_id, - PeerMetadata::new_for_test( - create_connection_metadata(peer_network_id, true), - PeerMonitoringMetadata::new(None, None, None, None, None), - ), + time_service.clone(), ); + // Create a peers and metadata map for the subscription + let mut peers_and_metadata = HashMap::new(); + add_metadata_for_peer(&mut peers_and_metadata, peer_network_id, true, false); + // Verify that the peer is optimal (not enough time has elapsed to refresh) - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_ok()); + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); // Add a more optimal peer to the set of peers let new_optimal_peer = PeerNetworkId::random(); - peers_and_metadata.insert( - new_optimal_peer, - PeerMetadata::new_for_test( - create_connection_metadata(new_optimal_peer, true), - PeerMonitoringMetadata::new(Some(0.1), None, None, None, None), - ), - ); + add_metadata_for_peer(&mut peers_and_metadata, new_optimal_peer, true, true); // Verify that the peer is still optimal (not enough time has elapsed to refresh) - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_ok()); + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); // Elapse enough time to refresh optimality let mock_time_service = time_service.into_mock(); @@ -512,9 +562,7 @@ mod test { )); // Verify that the peer is no longer optimal - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_err()); + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, false); // Elapse some amount of time (but not enough to refresh) mock_time_service.advance(Duration::from_millis( @@ -522,9 +570,7 @@ mod test { )); // Verify that the peer is now optimal (not enough time has elapsed to refresh) - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_ok()); + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); // Remove the more optimal peer from the list of peers peers_and_metadata.remove(&new_optimal_peer); @@ -535,23 +581,23 @@ mod test { )); // Verify that the peer is optimal - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata) - .is_ok()); + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); // Verify the time of the last peer optimality check - let current_time = mock_time_service.now(); - let (last_check_time, _) = subscription.last_optimality_check_time_and_peers; - assert_eq!(last_check_time, current_time); + verify_last_check_time_and_peers( + &subscription, + mock_time_service.now(), + peers_and_metadata.keys().cloned().collect(), + ); } #[test] - fn check_subscription_peer_optimality_supported() { - // Create a consensus observer config and time service - let consensus_observer_config = ConsensusObserverConfig::default(); - let time_service = TimeService::mock(); + fn test_check_subscription_peer_optimality_supported() { + // Create a consensus observer config with a maximum of 1 subscription + let consensus_observer_config = create_observer_config(1); // Create a new observer subscription + let time_service = TimeService::mock(); let peer_network_id = PeerNetworkId::random(); let mut subscription = ConsensusObserverSubscription::new( consensus_observer_config, @@ -562,13 +608,7 @@ mod test { // Insert empty metadata for the subscription peer let mut peers_and_metadata = HashMap::new(); - peers_and_metadata.insert( - peer_network_id, - PeerMetadata::new_for_test( - create_connection_metadata(peer_network_id, true), - PeerMonitoringMetadata::new(None, None, None, None, None), - ), - ); + add_metadata_for_peer(&mut peers_and_metadata, peer_network_id, true, false); // Elapse enough time to check optimality let mock_time_service = time_service.into_mock(); @@ -577,19 +617,11 @@ mod test { )); // Verify that the peer is still optimal (there are no other peers) - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_ok()); + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); // Add a more optimal peer without consensus observer support let unsupported_peer = PeerNetworkId::random(); - peers_and_metadata.insert( - unsupported_peer, - PeerMetadata::new_for_test( - create_connection_metadata(unsupported_peer, false), - PeerMonitoringMetadata::new(Some(0.1), None, None, None, None), - ), - ); + add_metadata_for_peer(&mut peers_and_metadata, unsupported_peer, false, false); // Elapse enough time to check optimality mock_time_service.advance(Duration::from_millis( @@ -597,19 +629,11 @@ mod test { )); // Verify that the peer is still optimal (the unsupported peer is ignored) - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_ok()); + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); // Add another more optimal peer with consensus observer support let supported_peer = PeerNetworkId::random(); - peers_and_metadata.insert( - supported_peer, - PeerMetadata::new_for_test( - create_connection_metadata(supported_peer, true), - PeerMonitoringMetadata::new(Some(0.01), None, None, None, None), - ), - ); + add_metadata_for_peer(&mut peers_and_metadata, supported_peer, true, true); // Elapse enough time to check optimality mock_time_service.advance(Duration::from_millis( @@ -617,9 +641,7 @@ mod test { )); // Verify that the peer is no longer optimal - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_err()); + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, false); } #[test] @@ -637,7 +659,7 @@ mod test { // Verify that the subscription has not timed out and that the last message time is updated let current_time = time_service.now(); - assert!(subscription.check_subscription_timeout().is_ok()); + verify_subscription_time_out(&subscription, false); assert_eq!(subscription.last_message_receive_time, current_time); // Elapse some amount of time (but not enough to timeout) @@ -647,17 +669,15 @@ mod test { )); // Verify that the subscription has not timed out - assert!(subscription.check_subscription_timeout().is_ok()); + verify_subscription_time_out(&subscription, false); - // Verify a new message is received successfully and that the last message time is updated + // Update the last message receive time let current_time = mock_time_service.now(); - subscription - .verify_message_sender(&peer_network_id) - .unwrap(); + subscription.update_last_message_receive_time(); assert_eq!(subscription.last_message_receive_time, current_time); // Verify that the subscription has not timed out - assert!(subscription.check_subscription_timeout().is_ok()); + verify_subscription_time_out(&subscription, false); // Elapse enough time to timeout the subscription mock_time_service.advance(Duration::from_millis( @@ -665,7 +685,7 @@ mod test { )); // Verify that the subscription has timed out - assert!(subscription.check_subscription_timeout().is_err()); + verify_subscription_time_out(&subscription, true); } #[test] @@ -694,25 +714,23 @@ mod test { ); // Verify that the DB is making sync progress and that the highest synced version is updated - let current_time = time_service.now(); - assert!(subscription.check_syncing_progress().is_ok()); - assert_eq!( - subscription.highest_synced_version_and_time, - (first_synced_version, current_time) + let mock_time_service = time_service.into_mock(); + verify_subscription_syncing_progress( + &mut subscription, + first_synced_version, + mock_time_service.now(), ); // Elapse some amount of time (not enough to timeout) - let mock_time_service = time_service.into_mock(); mock_time_service.advance(Duration::from_millis( consensus_observer_config.max_synced_version_timeout_ms / 2, )); // Verify that the DB is still making sync progress - let current_time = mock_time_service.now(); - assert!(subscription.check_syncing_progress().is_ok()); - assert_eq!( - subscription.highest_synced_version_and_time, - (first_synced_version, current_time) + verify_subscription_syncing_progress( + &mut subscription, + first_synced_version, + mock_time_service.now(), ); // Elapse enough time to timeout the subscription @@ -721,11 +739,10 @@ mod test { )); // Verify that the DB is still making sync progress (the next version is higher) - let current_time = mock_time_service.now(); - assert!(subscription.check_syncing_progress().is_ok()); - assert_eq!( - subscription.highest_synced_version_and_time, - (second_synced_version, current_time) + verify_subscription_syncing_progress( + &mut subscription, + second_synced_version, + mock_time_service.now(), ); // Elapse enough time to timeout the subscription @@ -734,11 +751,31 @@ mod test { )); // Verify that the DB is not making sync progress and that the subscription has timed out - assert!(subscription.check_syncing_progress().is_err()); + assert_matches!( + subscription.check_syncing_progress(), + Err(Error::SubscriptionProgressStopped(_)) + ); + } + + #[test] + fn test_get_peer_network_id() { + // Create a new observer subscription + let consensus_observer_config = ConsensusObserverConfig::default(); + let peer_network_id = PeerNetworkId::random(); + let time_service = TimeService::mock(); + let subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(MockDatabaseReader::new()), + peer_network_id, + time_service.clone(), + ); + + // Verify that the peer network id matches the expected value + assert_eq!(subscription.get_peer_network_id(), peer_network_id); } #[test] - fn test_verify_message_sender() { + fn test_update_last_message_receive_time() { // Create a new observer subscription let consensus_observer_config = ConsensusObserverConfig::default(); let peer_network_id = PeerNetworkId::random(); @@ -750,140 +787,39 @@ mod test { time_service.clone(), ); - // Verify that the message sender is valid - let current_time = time_service.now(); - assert!(subscription.verify_message_sender(&peer_network_id).is_ok()); - assert_eq!(subscription.last_message_receive_time, current_time); + // Verify the initial last message time + assert_eq!(subscription.last_message_receive_time, time_service.now()); // Elapse some amount of time let mock_time_service = time_service.into_mock(); mock_time_service.advance(Duration::from_secs(10)); - // Verify that the message sender is not the expected peer - let other_peer_network_id = PeerNetworkId::random(); - assert!(subscription - .verify_message_sender(&other_peer_network_id) - .is_err()); - assert_eq!(subscription.last_message_receive_time, current_time); - - // Elapse more time - mock_time_service.advance(Duration::from_secs(10)); - - // Verify that the message sender is the expected peer and that the last message time is updated + // Update the last message time let current_time = mock_time_service.now(); - assert!(subscription.verify_message_sender(&peer_network_id).is_ok()); - assert_eq!(subscription.last_message_receive_time, current_time); - } + subscription.update_last_message_receive_time(); - #[test] - fn test_sort_peers_by_distance_and_latency() { - // Sort an empty list of peers - let peers_and_metadata = HashMap::new(); - assert!(sort_peers_by_subscription_optimality(&peers_and_metadata).is_empty()); - - // Create a list of peers with empty metadata - let peers_and_metadata = create_peers_and_metadata(true, true, true, 10); - - // Sort the peers and verify the results - let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); - assert_eq!(sorted_peers.len(), 10); - - // Create a list of peers with valid metadata - let peers_and_metadata = create_peers_and_metadata(false, false, true, 10); - - // Sort the peers - let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); - - // Verify the order of the peers - verify_increasing_distance_latencies(&peers_and_metadata, &sorted_peers); - assert_eq!(sorted_peers.len(), 10); - - // Create a list of peers with and without metadata - let mut peers_and_metadata = create_peers_and_metadata(false, false, true, 10); - peers_and_metadata.extend(create_peers_and_metadata(true, false, true, 10)); - peers_and_metadata.extend(create_peers_and_metadata(false, true, true, 10)); - peers_and_metadata.extend(create_peers_and_metadata(true, true, true, 10)); - - // Sort the peers - let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); - assert_eq!(sorted_peers.len(), 40); - - // Verify the order of the first 20 peers - let (first_20_peers, sorted_peers) = sorted_peers.split_at(20); - verify_increasing_distance_latencies(&peers_and_metadata, first_20_peers); - - // Verify that the next 10 peers only have latency metadata - let (next_10_peers, sorted_peers) = sorted_peers.split_at(10); - for sorted_peer in next_10_peers { - let peer_metadata = peers_and_metadata.get(sorted_peer).unwrap(); - assert!(get_distance_for_peer(sorted_peer, peer_metadata).is_none()); - assert!(get_latency_for_peer(sorted_peer, peer_metadata).is_some()); - } - - // Verify that the last 10 peers have no metadata - let (last_10_peers, remaining_peers) = sorted_peers.split_at(10); - for sorted_peer in last_10_peers { - let peer_metadata = peers_and_metadata.get(sorted_peer).unwrap(); - assert!(get_distance_for_peer(sorted_peer, peer_metadata).is_none()); - assert!(get_latency_for_peer(sorted_peer, peer_metadata).is_none()); - } - assert!(remaining_peers.is_empty()); + // Verify that the last message time is updated + assert_eq!(subscription.last_message_receive_time, current_time); } - #[test] - fn test_sort_peers_by_distance_and_latency_filter() { - // Sort an empty list of peers - let peers_and_metadata = HashMap::new(); - assert!(sort_peers_by_subscription_optimality(&peers_and_metadata).is_empty()); - - // Create a list of peers with empty metadata (with consensus observer support) - let peers_and_metadata = create_peers_and_metadata(true, true, true, 10); - - // Sort the peers and verify the results - let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); - assert_eq!(sorted_peers.len(), 10); - - // Create a list of peers with empty metadata (without consensus observer support) - let peers_and_metadata = create_peers_and_metadata(true, true, false, 10); - - // Sort the peers and verify the results - let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); - assert!(sorted_peers.is_empty()); - - // Create a list of peers with valid metadata (without consensus observer support) - let peers_and_metadata = create_peers_and_metadata(false, false, false, 10); - - // Sort the peers and verify the results - let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); - assert!(sorted_peers.is_empty()); - - // Create a list of peers with empty metadata (with and without consensus observer support) - let mut peers_and_metadata = create_peers_and_metadata(true, true, true, 5); - peers_and_metadata.extend(create_peers_and_metadata(true, true, false, 50)); - - // Sort the peers and verify the results (only the supported peers are sorted) - let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); - assert_eq!(sorted_peers.len(), 5); - - // Create a list of peers with valid metadata (with and without consensus observer support) - let mut peers_and_metadata = create_peers_and_metadata(false, false, true, 50); - peers_and_metadata.extend(create_peers_and_metadata(false, false, false, 10)); - - // Sort the peers and verify the results (only the supported peers are sorted) - let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); - assert_eq!(sorted_peers.len(), 50); - - // Create a list of peers with valid metadata (with and without consensus observer support) - let supported_peer_and_metadata = create_peers_and_metadata(false, false, true, 1); - let unsupported_peer_and_metadata = create_peers_and_metadata(false, false, false, 1); - let mut peers_and_metadata = HashMap::new(); - peers_and_metadata.extend(supported_peer_and_metadata.clone()); - peers_and_metadata.extend(unsupported_peer_and_metadata); + /// Adds metadata for the specified peer to the map of peers and metadata + fn add_metadata_for_peer( + peers_and_metadata: &mut HashMap, + peer_network_id: PeerNetworkId, + support_consensus_observer: bool, + set_ping_latency: bool, + ) { + // Determine the ping latency to use for the peer + let average_ping_latency = if set_ping_latency { Some(0.1) } else { None }; - // Sort the peers and verify the results (only the supported peer is sorted) - let supported_peer = supported_peer_and_metadata.keys().next().unwrap(); - let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); - assert_eq!(sorted_peers, vec![*supported_peer]); + // Add the peer and metadata to the map + peers_and_metadata.insert( + peer_network_id, + PeerMetadata::new_for_test( + create_connection_metadata(peer_network_id, support_consensus_observer), + PeerMonitoringMetadata::new(average_ping_latency, None, None, None, None), + ), + ); } /// Creates a new connection metadata for testing @@ -913,82 +849,69 @@ mod test { } } - /// Creates a new peer and metadata for testing - fn create_peer_and_metadata( - latency: Option, - distance_from_validators: Option, - support_consensus_observer: bool, - ) -> (PeerNetworkId, PeerMetadata) { - // Create a random peer - let peer_network_id = PeerNetworkId::random(); - - // Create a new peer metadata with the given latency and distance - let connection_metadata = - create_connection_metadata(peer_network_id, support_consensus_observer); - let network_information_response = - distance_from_validators.map(|distance| NetworkInformationResponse { - connected_peers: BTreeMap::new(), - distance_from_validators: distance, - }); - let peer_monitoring_metadata = - PeerMonitoringMetadata::new(latency, None, network_information_response, None, None); - let peer_metadata = - PeerMetadata::new_for_test(connection_metadata, peer_monitoring_metadata); - - (peer_network_id, peer_metadata) + /// Creates a consensus observer config with the given max concurrent subscriptions + fn create_observer_config(max_concurrent_subscriptions: u64) -> ConsensusObserverConfig { + ConsensusObserverConfig { + max_concurrent_subscriptions, + ..ConsensusObserverConfig::default() + } } - /// Creates a list of peers and metadata for testing - fn create_peers_and_metadata( - empty_latency: bool, - empty_distance: bool, - support_consensus_observer: bool, - num_peers: u64, - ) -> HashMap { - let mut peers_and_metadata = HashMap::new(); - for i in 1..num_peers + 1 { - // Determine the distance for the peer - let distance = if empty_distance { None } else { Some(i) }; + /// Verifies that the last check time and peers are as expected + fn verify_last_check_time_and_peers( + subscription: &ConsensusObserverSubscription, + expected_last_check_time: Instant, + expected_last_check_peers: HashSet, + ) { + // Get the last check time and peers from the subscription + let (last_check_time, last_check_peers) = + subscription.last_optimality_check_time_and_peers.clone(); + + // Verify the last check time and peers match the expected values + assert_eq!(last_check_time, expected_last_check_time); + assert_eq!(last_check_peers, expected_last_check_peers); + } - // Determine the latency for the peer - let latency = if empty_latency { None } else { Some(i as f64) }; + /// Verifies that the subscription time out matches the expected value + fn verify_subscription_time_out(subscription: &ConsensusObserverSubscription, timed_out: bool) { + // Check if the subscription has timed out + let result = subscription.check_subscription_timeout(); - // Create a new peer and metadata - let (peer_network_id, peer_metadata) = - create_peer_and_metadata(latency, distance, support_consensus_observer); - peers_and_metadata.insert(peer_network_id, peer_metadata); + // Verify the result + if timed_out { + assert_matches!(result, Err(Error::SubscriptionTimeout(_))); + } else { + assert!(result.is_ok()); } - peers_and_metadata } - /// Verifies that the distance and latencies for the peers are in - /// increasing order (with the distance taking precedence over the latency). - fn verify_increasing_distance_latencies( + /// Verifies that the peer optimality matches the expected value + fn verify_subscription_peer_optimality( + subscription: &mut ConsensusObserverSubscription, peers_and_metadata: &HashMap, - sorted_peers: &[PeerNetworkId], + is_optimal: bool, ) { - let mut previous_latency = None; - let mut previous_distance = 0; - for sorted_peer in sorted_peers { - // Get the distance and latency for the peer - let peer_metadata = peers_and_metadata.get(sorted_peer).unwrap(); - let distance = get_distance_for_peer(sorted_peer, peer_metadata).unwrap(); - let latency = get_latency_for_peer(sorted_peer, peer_metadata); - - // Verify the order of the peers - if distance == previous_distance { - if let Some(latency) = latency { - if let Some(previous_latency) = previous_latency { - assert!(latency >= previous_latency); - } - } - } else { - assert!(distance > previous_distance); - } + // Check the subscription peer optimality + let result = subscription.check_subscription_peer_optimality(peers_and_metadata); - // Update the previous latency and distance - previous_latency = latency; - previous_distance = distance; + // Verify the result + if is_optimal { + assert!(result.is_ok()); + } else { + assert_matches!(result, Err(Error::SubscriptionSuboptimal(_))); } } + + /// Verifies that the syncing progress is as expected + fn verify_subscription_syncing_progress( + subscription: &mut ConsensusObserverSubscription, + first_synced_version: Version, + time: Instant, + ) { + assert!(subscription.check_syncing_progress().is_ok()); + assert_eq!( + subscription.highest_synced_version_and_time, + (first_synced_version, time) + ); + } } diff --git a/consensus/src/consensus_observer/observer/subscription_manager.rs b/consensus/src/consensus_observer/observer/subscription_manager.rs index 2f124e5841c..24ae1f7d321 100644 --- a/consensus/src/consensus_observer/observer/subscription_manager.rs +++ b/consensus/src/consensus_observer/observer/subscription_manager.rs @@ -13,20 +13,27 @@ use crate::consensus_observer::{ ConsensusObserverMessage, ConsensusObserverRequest, ConsensusObserverResponse, }, }, - observer::{subscription, subscription::ConsensusObserverSubscription}, + observer::{subscription::ConsensusObserverSubscription, subscription_utils}, publisher::consensus_publisher::ConsensusPublisher, }; use aptos_config::{config::ConsensusObserverConfig, network_id::PeerNetworkId}; -use aptos_logger::{error, info, warn}; +use aptos_infallible::Mutex; +use aptos_logger::{info, warn}; use aptos_network::application::{interface::NetworkClient, metadata::PeerMetadata}; use aptos_storage_interface::DbReader; use aptos_time_service::TimeService; +use itertools::Itertools; use std::{collections::HashMap, sync::Arc}; +use tokio::task::JoinHandle; /// The manager for consensus observer subscriptions pub struct SubscriptionManager { - // The currently active consensus observer subscription - active_observer_subscription: Option, + // The currently active set of consensus observer subscriptions + active_observer_subscriptions: + Arc>>, + + // The active subscription creation task (if one is currently running) + active_subscription_creation_task: Arc>>>, // The consensus observer client to send network messages consensus_observer_client: @@ -56,7 +63,8 @@ impl SubscriptionManager { time_service: TimeService, ) -> Self { Self { - active_observer_subscription: None, + active_observer_subscriptions: Arc::new(Mutex::new(HashMap::new())), + active_subscription_creation_task: Arc::new(Mutex::new(None)), consensus_observer_client, consensus_observer_config, consensus_publisher, @@ -65,244 +73,224 @@ impl SubscriptionManager { } } - /// Checks if the active subscription is still healthy. If not, an error is returned. - fn check_active_subscription(&mut self) -> Result<(), Error> { - let active_observer_subscription = self.active_observer_subscription.take(); - if let Some(mut active_subscription) = active_observer_subscription { - // Check if the peer for the subscription is still connected - let peer_network_id = active_subscription.get_peer_network_id(); - let peer_still_connected = self - .get_connected_peers_and_metadata() - .map_or(false, |peers_and_metadata| { - peers_and_metadata.contains_key(&peer_network_id) - }); - - // Verify the peer is still connected - if !peer_still_connected { - return Err(Error::SubscriptionDisconnected( - "The peer is no longer connected!".to_string(), - )); - } - - // Verify the subscription has not timed out - active_subscription.check_subscription_timeout()?; - - // Verify that the DB is continuing to sync and commit new data - active_subscription.check_syncing_progress()?; - - // Verify that the subscription peer is optimal - if let Some(peers_and_metadata) = self.get_connected_peers_and_metadata() { - active_subscription.check_subscription_peer_optimality(peers_and_metadata)?; - } - - // The subscription seems healthy, we can keep it - self.active_observer_subscription = Some(active_subscription); + /// Checks if the subscription to the given peer is still healthy. + /// If not, an error explaining why it is unhealthy is returned. + fn check_subscription_health( + &mut self, + connected_peers_and_metadata: &HashMap, + peer_network_id: PeerNetworkId, + ) -> Result<(), Error> { + // Get the active subscription for the peer + let mut active_observer_subscriptions = self.active_observer_subscriptions.lock(); + let active_subscription = active_observer_subscriptions.get_mut(&peer_network_id); + + // Check the health of the subscription + match active_subscription { + Some(active_subscription) => { + active_subscription.check_subscription_health(connected_peers_and_metadata) + }, + None => Err(Error::UnexpectedError(format!( + "The subscription to peer: {:?} is not active!", + peer_network_id + ))), } + } - Ok(()) + /// Checks the health of the active subscriptions. If any subscription is + /// unhealthy, it will be terminated and new subscriptions will be created. + /// This returns an error iff all subscriptions were unhealthy and terminated. + pub async fn check_and_manage_subscriptions(&mut self) -> Result<(), Error> { + // Get the subscription and connected peers + let initial_subscription_peers = self.get_active_subscription_peers(); + let connected_peers_and_metadata = self.get_connected_peers_and_metadata(); + + // Terminate any unhealthy subscriptions + let terminated_subscriptions = + self.terminate_unhealthy_subscriptions(&connected_peers_and_metadata); + + // Check if all subscriptions were terminated + let num_terminated_subscriptions = terminated_subscriptions.len(); + let all_subscriptions_terminated = num_terminated_subscriptions > 0 + && num_terminated_subscriptions == initial_subscription_peers.len(); + + // Calculate the number of new subscriptions to create + let remaining_subscription_peers = self.get_active_subscription_peers(); + let max_concurrent_subscriptions = + self.consensus_observer_config.max_concurrent_subscriptions as usize; + let num_subscriptions_to_create = + max_concurrent_subscriptions.saturating_sub(remaining_subscription_peers.len()); + + // Update the total subscription metrics + update_total_subscription_metrics(&remaining_subscription_peers); + + // Spawn a task to create the new subscriptions (asynchronously) + self.spawn_subscription_creation_task( + num_subscriptions_to_create, + remaining_subscription_peers, + terminated_subscriptions, + connected_peers_and_metadata, + ) + .await; + + // Return an error if all subscriptions were terminated + if all_subscriptions_terminated { + Err(Error::SubscriptionsReset(format!( + "All {:?} subscriptions were unhealthy and terminated!", + num_terminated_subscriptions, + ))) + } else { + Ok(()) + } } - /// Checks the health of the active subscription. If the subscription is - /// unhealthy, it will be terminated and a new subscription will be created. - /// This returns true iff a new subscription was created. - pub async fn check_and_manage_subscriptions(&mut self) -> bool { - // Get the peer ID of the currently active subscription (if any) - let active_subscription_peer = self - .active_observer_subscription - .as_ref() - .map(|subscription| subscription.get_peer_network_id()); + /// Returns the currently active subscription peers + fn get_active_subscription_peers(&self) -> Vec { + let active_observer_subscriptions = self.active_observer_subscriptions.lock(); + active_observer_subscriptions.keys().cloned().collect() + } - // If we have an active subscription, verify that the subscription - // is still healthy. If not, the subscription should be terminated. - if let Some(active_subscription_peer) = active_subscription_peer { - if let Err(error) = self.check_active_subscription() { - // Log the subscription termination + /// Gets the connected peers and metadata. If an error + /// occurred, it is logged and an empty map is returned. + fn get_connected_peers_and_metadata(&self) -> HashMap { + self.consensus_observer_client + .get_peers_and_metadata() + .get_connected_peers_and_metadata() + .unwrap_or_else(|error| { + // Log the error warn!( LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Terminating subscription to peer: {:?}! Error: {:?}", - active_subscription_peer, error + "Failed to get connected peers and metadata! Error: {:?}", + error )) ); - // Unsubscribe from the peer - self.unsubscribe_from_peer(active_subscription_peer); - - // Update the subscription termination metrics - self.update_subscription_termination_metrics(active_subscription_peer, error); - } - } - - // If we don't have a subscription, we should select a new peer to - // subscribe to. If we had a previous subscription (and it was - // terminated) it should be excluded from the selection process. - if self.active_observer_subscription.is_none() { - // Create a new observer subscription - self.create_new_observer_subscription(active_subscription_peer) - .await; - - // If we successfully created a new subscription, update the metrics - if let Some(active_subscription) = &self.active_observer_subscription { - // Update the subscription creation metrics - self.update_subscription_creation_metrics( - active_subscription.get_peer_network_id(), - ); - - return true; // A new subscription was created - } - } - - false // No new subscription was created + // Return an empty map + HashMap::new() + }) } - /// Creates a new observer subscription by sending subscription requests to - /// appropriate peers and waiting for a successful response. If `previous_subscription_peer` - /// is provided, it will be excluded from the selection process. - async fn create_new_observer_subscription( + /// Spawns a new subscription creation task to create + /// the specified number of new subscriptions. + async fn spawn_subscription_creation_task( &mut self, - previous_subscription_peer: Option, + num_subscriptions_to_create: usize, + active_subscription_peers: Vec, + terminated_subscriptions: Vec<(PeerNetworkId, Error)>, + connected_peers_and_metadata: HashMap, ) { - // Get a set of sorted peers to service our subscription request - let sorted_peers = match self.sort_peers_for_subscription(previous_subscription_peer) { - Some(sorted_peers) => sorted_peers, - None => { - error!(LogSchema::new(LogEntry::ConsensusObserver) - .message("Failed to sort peers for subscription requests!")); - return; - }, - }; - - // Verify that we have potential peers - if sorted_peers.is_empty() { - warn!(LogSchema::new(LogEntry::ConsensusObserver) - .message("There are no peers to subscribe to!")); + // If there are no new subscriptions to create, return early + if num_subscriptions_to_create == 0 { return; } - // Go through the sorted peers and attempt to subscribe to a single peer. - // The first peer that responds successfully will be the selected peer. - for selected_peer in &sorted_peers { - info!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Attempting to subscribe to peer: {}!", - selected_peer - )) - ); - - // Send a subscription request to the peer and wait for the response. - // Note: it is fine to block here because we assume only a single active subscription. - let subscription_request = ConsensusObserverRequest::Subscribe; - let request_timeout_ms = self.consensus_observer_config.network_request_timeout_ms; - let response = self - .consensus_observer_client - .send_rpc_request_to_peer(selected_peer, subscription_request, request_timeout_ms) - .await; - - // Process the response and update the active subscription - match response { - Ok(ConsensusObserverResponse::SubscribeAck) => { - info!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Successfully subscribed to peer: {}!", - selected_peer - )) - ); - - // Update the active subscription - let subscription = ConsensusObserverSubscription::new( - self.consensus_observer_config, - self.db_reader.clone(), - *selected_peer, - self.time_service.clone(), - ); - self.active_observer_subscription = Some(subscription); - - return; // Return after successfully subscribing - }, - Ok(response) => { - // We received an invalid response - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Got unexpected response type: {:?}", - response.get_label() - )) - ); - }, - Err(error) => { - // We encountered an error while sending the request - error!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to send subscription request to peer: {}! Error: {:?}", - selected_peer, error - )) - ); - }, + // If there is an active subscription creation task, return early + if let Some(subscription_creation_task) = &*self.active_subscription_creation_task.lock() { + if !subscription_creation_task.is_finished() { + return; // The task is still running } } - // We failed to connect to any peers - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to subscribe to any peers! Num peers attempted: {:?}", - sorted_peers.len() - )) - ); - } + // Clone the shared state for the task + let active_observer_subscriptions = self.active_observer_subscriptions.clone(); + let consensus_observer_config = self.consensus_observer_config; + let consensus_observer_client = self.consensus_observer_client.clone(); + let consensus_publisher = self.consensus_publisher.clone(); + let db_reader = self.db_reader.clone(); + let time_service = self.time_service.clone(); + + // Spawn a new subscription creation task + let subscription_creation_task = tokio::spawn(async move { + // Identify the terminated subscription peers + let terminated_subscription_peers = terminated_subscriptions + .iter() + .map(|(peer, _)| *peer) + .collect(); + + // Create the new subscriptions + let new_subscriptions = subscription_utils::create_new_subscriptions( + consensus_observer_config, + consensus_observer_client, + consensus_publisher, + db_reader, + time_service, + connected_peers_and_metadata, + num_subscriptions_to_create, + active_subscription_peers, + terminated_subscription_peers, + ) + .await; + + // Identify the new subscription peers + let new_subscription_peers = new_subscriptions + .iter() + .map(|subscription| subscription.get_peer_network_id()) + .collect::>(); + + // Add the new subscriptions to the list of active subscriptions + for subscription in new_subscriptions { + active_observer_subscriptions + .lock() + .insert(subscription.get_peer_network_id(), subscription); + } - /// Gets the connected peers and metadata. If an error occurred, - /// it is logged and None is returned. - fn get_connected_peers_and_metadata(&self) -> Option> { - match self - .consensus_observer_client - .get_peers_and_metadata() - .get_connected_peers_and_metadata() - { - Ok(connected_peers_and_metadata) => Some(connected_peers_and_metadata), - Err(error) => { - error!( + // Log a warning if we failed to create as many subscriptions as requested + let num_subscriptions_created = new_subscription_peers.len(); + if num_subscriptions_created < num_subscriptions_to_create { + warn!( LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to get connected peers and metadata! Error: {:?}", - error + "Failed to create the requested number of subscriptions! Number of subscriptions \ + requested: {:?}, number of subscriptions created: {:?}.", + num_subscriptions_to_create, + num_subscriptions_created )) ); - None - }, - } + } + + // Update the subscription change metrics + update_subscription_change_metrics(new_subscription_peers, terminated_subscriptions); + }); + + // Update the active subscription creation task + *self.active_subscription_creation_task.lock() = Some(subscription_creation_task); } - /// Produces a list of sorted peers to service our subscription request. - /// Note: if `previous_subscription_peer` is provided, it will be excluded - /// from the selection process. Likewise, all peers currently subscribed to us - /// will be excluded from the selection process. - fn sort_peers_for_subscription( + /// Terminates any unhealthy subscriptions and returns the list of terminated subscriptions + fn terminate_unhealthy_subscriptions( &mut self, - previous_subscription_peer: Option, - ) -> Option> { - if let Some(mut peers_and_metadata) = self.get_connected_peers_and_metadata() { - // Remove the previous subscription peer (if provided) - if let Some(previous_subscription_peer) = previous_subscription_peer { - let _ = peers_and_metadata.remove(&previous_subscription_peer); - } - - // Remove any peers that are currently subscribed to us - if let Some(consensus_publisher) = &self.consensus_publisher { - for peer_network_id in consensus_publisher.get_active_subscribers() { - let _ = peers_and_metadata.remove(&peer_network_id); - } - } + connected_peers_and_metadata: &HashMap, + ) -> Vec<(PeerNetworkId, Error)> { + let mut terminated_subscriptions = vec![]; + for subscription_peer in self.get_active_subscription_peers() { + // Check the health of the subscription and terminate it if needed + if let Err(error) = + self.check_subscription_health(connected_peers_and_metadata, subscription_peer) + { + // Log the subscription termination error + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Terminating subscription to peer: {:?}! Termination reason: {:?}", + subscription_peer, error + )) + ); - // Sort the peers by subscription optimality - let sorted_peers = - subscription::sort_peers_by_subscription_optimality(&peers_and_metadata); + // Unsubscribe from the peer and remove the subscription + self.unsubscribe_from_peer(subscription_peer); - // Return the sorted peers - Some(sorted_peers) - } else { - None // No connected peers were found + // Add the peer to the list of terminated subscriptions + terminated_subscriptions.push((subscription_peer, error)); + } } + + terminated_subscriptions } /// Unsubscribes from the given peer by sending an unsubscribe request - fn unsubscribe_from_peer(&self, peer_network_id: PeerNetworkId) { + fn unsubscribe_from_peer(&mut self, peer_network_id: PeerNetworkId) { + // Remove the peer from the active subscriptions + self.active_observer_subscriptions + .lock() + .remove(&peer_network_id); + // Send an unsubscribe request to the peer and process the response. // Note: we execute this asynchronously, as we don't need to wait for the response. let consensus_observer_client = self.consensus_observer_client.clone(); @@ -339,7 +327,7 @@ impl SubscriptionManager { }, Err(error) => { // We encountered an error while sending the request - error!( + warn!( LogSchema::new(LogEntry::ConsensusObserver).message(&format!( "Failed to send unsubscribe request to peer: {}! Error: {:?}", peer_network_id, error @@ -350,64 +338,68 @@ impl SubscriptionManager { }); } - /// Updates the subscription creation metrics for the given peer - fn update_subscription_creation_metrics(&self, peer_network_id: PeerNetworkId) { - // Set the number of active subscriptions - metrics::set_gauge( - &metrics::OBSERVER_NUM_ACTIVE_SUBSCRIPTIONS, - &peer_network_id.network_id(), - 1, - ); + /// Verifies that the message is from an active + /// subscription. If not, an error is returned. + pub fn verify_message_for_subscription( + &mut self, + message_sender: PeerNetworkId, + ) -> Result<(), Error> { + // Check if the message is from an active subscription + if let Some(active_subscription) = self + .active_observer_subscriptions + .lock() + .get_mut(&message_sender) + { + // Update the last message receive time and return early + active_subscription.update_last_message_receive_time(); + return Ok(()); + } + + // Otherwise, the message is not from an active subscription. + // Send another unsubscribe request, and return an error. + self.unsubscribe_from_peer(message_sender); + Err(Error::InvalidMessageError(format!( + "Received message from unexpected peer, and not an active subscription: {}!", + message_sender + ))) + } +} - // Update the number of created subscriptions - metrics::increment_request_counter( +/// Updates the subscription creation and termination metrics +fn update_subscription_change_metrics( + new_subscription_peers: Vec, + terminated_subscription_peers: Vec<(PeerNetworkId, Error)>, +) { + // Update the created subscriptions metrics + for peer_network_id in new_subscription_peers { + metrics::increment_counter( &metrics::OBSERVER_CREATED_SUBSCRIPTIONS, metrics::CREATED_SUBSCRIPTION_LABEL, &peer_network_id, ); } - /// Updates the subscription termination metrics for the given peer - fn update_subscription_termination_metrics( - &self, - peer_network_id: PeerNetworkId, - error: Error, - ) { - // Reset the number of active subscriptions - metrics::set_gauge( - &metrics::OBSERVER_NUM_ACTIVE_SUBSCRIPTIONS, - &peer_network_id.network_id(), - 0, - ); - - // Update the number of terminated subscriptions - metrics::increment_request_counter( + // Update the terminated subscriptions metrics + for (peer_network_id, termination_reason) in terminated_subscription_peers { + metrics::increment_counter( &metrics::OBSERVER_TERMINATED_SUBSCRIPTIONS, - error.get_label(), + termination_reason.get_label(), &peer_network_id, ); } +} - /// Verifies that the message sender is the currently subscribed peer. - /// If the sender is not the subscribed peer, an error is returned. - pub fn verify_message_sender(&mut self, message_sender: PeerNetworkId) -> Result<(), Error> { - if let Some(active_subscription) = &mut self.active_observer_subscription { - active_subscription - .verify_message_sender(&message_sender) - .map_err(|error| { - // Send another unsubscription request to the peer (in case the previous was lost) - self.unsubscribe_from_peer(message_sender); - error - }) - } else { - // Send another unsubscription request to the peer (in case the previous was lost) - self.unsubscribe_from_peer(message_sender); - - Err(Error::UnexpectedError(format!( - "Received message from unexpected peer: {}! No active subscription found!", - message_sender - ))) - } +/// Updates the total subscription metrics (grouped by network ID) +fn update_total_subscription_metrics(active_subscription_peers: &[PeerNetworkId]) { + for (network_id, active_subscription_peers) in &active_subscription_peers + .iter() + .chunk_by(|peer_network_id| peer_network_id.network_id()) + { + metrics::set_gauge( + &metrics::OBSERVER_NUM_ACTIVE_SUBSCRIPTIONS, + &network_id, + active_subscription_peers.collect::>().len() as i64, + ); } } @@ -439,7 +431,96 @@ mod test { } #[tokio::test] - async fn test_check_active_subscription_connected() { + async fn test_check_and_manage_subscriptions() { + // Create a consensus observer client + let network_id = NetworkId::Public; + let (peers_and_metadata, consensus_observer_client) = + create_consensus_observer_client(&[network_id]); + + // Create a new subscription manager + let consensus_observer_config = ConsensusObserverConfig::default(); + let db_reader = create_mock_db_reader(); + let time_service = TimeService::mock(); + let mut subscription_manager = SubscriptionManager::new( + consensus_observer_client, + consensus_observer_config, + None, + db_reader.clone(), + time_service.clone(), + ); + + // Verify that no subscriptions are active + verify_active_subscription_peers(&subscription_manager, vec![]); + + // Check and manage the subscriptions + let result = subscription_manager.check_and_manage_subscriptions().await; + + // Verify that no subscriptions were terminated + assert!(result.is_ok()); + verify_active_subscription_peers(&subscription_manager, vec![]); + + // Add a new connected peer and subscription + let connected_peer_1 = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 1, None, true); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + connected_peer_1, + time_service.clone(), + ); + + // Add another connected peer and subscription + let connected_peer_2 = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 2, None, true); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + connected_peer_2, + TimeService::mock(), // Use a different time service (to avoid timeouts!) + ); + + // Check and manage the subscriptions + subscription_manager + .check_and_manage_subscriptions() + .await + .unwrap(); + + // Verify that the subscriptions are still active + verify_active_subscription_peers(&subscription_manager, vec![ + connected_peer_1, + connected_peer_2, + ]); + + // Elapse time to simulate a timeout for peer 1 + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_subscription_timeout_ms + 1, + )); + + // Check and manage the subscriptions + subscription_manager + .check_and_manage_subscriptions() + .await + .unwrap(); + + // Verify that the first subscription was terminated + verify_active_subscription_peers(&subscription_manager, vec![connected_peer_2]); + + // Disconnect the second peer + remove_peer_and_connection(peers_and_metadata.clone(), connected_peer_2); + + // Check and manage the subscriptions + let result = subscription_manager.check_and_manage_subscriptions().await; + + // Verify that the second subscription was terminated and an error was returned + verify_active_subscription_peers(&subscription_manager, vec![]); + assert_matches!(result, Err(Error::SubscriptionsReset(_))); + } + + #[tokio::test] + async fn test_check_subscription_health_connected() { // Create a consensus observer client let network_id = NetworkId::Public; let (peers_and_metadata, consensus_observer_client) = @@ -457,20 +538,20 @@ mod test { ); // Create a new subscription - let observer_subscription = ConsensusObserverSubscription::new( + let peer_network_id = PeerNetworkId::random(); + create_observer_subscription( + &mut subscription_manager, consensus_observer_config, db_reader.clone(), - PeerNetworkId::random(), + peer_network_id, TimeService::mock(), ); - subscription_manager.active_observer_subscription = Some(observer_subscription); - // Check the active subscription and verify that it is removed (the peer is not connected) - assert_matches!( - subscription_manager.check_active_subscription(), - Err(Error::SubscriptionDisconnected(_)) - ); - assert!(subscription_manager.active_observer_subscription.is_none()); + // Check the active subscription and verify that it unhealthy (the peer is not connected) + check_subscription_connection(&mut subscription_manager, peer_network_id, false); + + // Terminate unhealthy subscriptions and verify the subscription was removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![peer_network_id]); // Add a new connected peer let connected_peer = @@ -485,14 +566,18 @@ mod test { TimeService::mock(), ); - // Check the active subscription and verify that it is still active (the peer is connected) - assert!(subscription_manager.check_active_subscription().is_ok()); - let active_subscription = subscription_manager.active_observer_subscription.unwrap(); - assert_eq!(active_subscription.get_peer_network_id(), connected_peer); + // Check the active subscription is still healthy + check_subscription_connection(&mut subscription_manager, connected_peer, true); + + // Terminate unhealthy subscriptions and verify none are removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![]); + + // Verify that the active subscription is still present + verify_active_subscription_peers(&subscription_manager, vec![connected_peer]); } #[tokio::test] - async fn test_check_active_subscription_progress_stopped() { + async fn test_check_subscription_health_progress_stopped() { // Create a consensus observer config let consensus_observer_config = ConsensusObserverConfig { max_subscription_timeout_ms: 100_000_000, // Use a large value so that we don't time out @@ -528,22 +613,30 @@ mod test { time_service.clone(), ); + // Check the active subscription and verify that it is healthy + check_subscription_progress(&mut subscription_manager, connected_peer, true); + + // Terminate unhealthy subscriptions and verify none are removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![]); + // Elapse time to simulate a DB progress error let mock_time_service = time_service.clone().into_mock(); mock_time_service.advance(Duration::from_millis( consensus_observer_config.max_synced_version_timeout_ms + 1, )); - // Check the active subscription and verify that it is removed (the DB is not syncing) - assert_matches!( - subscription_manager.check_active_subscription(), - Err(Error::SubscriptionProgressStopped(_)) - ); - assert!(subscription_manager.active_observer_subscription.is_none()); + // Check the active subscription and verify that it is unhealthy (the DB is not syncing) + check_subscription_progress(&mut subscription_manager, connected_peer, false); + + // Terminate unhealthy subscriptions and verify the subscription was removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![connected_peer]); + + // Verify the active subscription is no longer present + verify_active_subscription_peers(&subscription_manager, vec![]); } #[tokio::test] - async fn test_check_active_subscription_timeout() { + async fn test_check_subscription_health_timeout() { // Create a consensus observer client let network_id = NetworkId::Public; let (peers_and_metadata, consensus_observer_client) = @@ -574,25 +667,34 @@ mod test { time_service.clone(), ); + // Check the active subscription and verify that it is healthy + check_subscription_timeout(&mut subscription_manager, connected_peer, true); + + // Terminate unhealthy subscriptions and verify none are removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![]); + // Elapse time to simulate a timeout let mock_time_service = time_service.clone().into_mock(); mock_time_service.advance(Duration::from_millis( consensus_observer_config.max_subscription_timeout_ms + 1, )); - // Check the active subscription and verify that it is removed (the subscription timed out) - assert_matches!( - subscription_manager.check_active_subscription(), - Err(Error::SubscriptionTimeout(_)) - ); - assert!(subscription_manager.active_observer_subscription.is_none()); + // Check the active subscription and verify that it is unhealthy (the subscription timed out) + check_subscription_timeout(&mut subscription_manager, connected_peer, false); + + // Terminate unhealthy subscriptions and verify the subscription was removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![connected_peer]); + + // Verify the active subscription is no longer present + verify_active_subscription_peers(&subscription_manager, vec![]); } #[tokio::test] - async fn test_check_active_subscription_suboptimal() { + async fn test_check_subscription_health_suboptimal() { // Create a consensus observer config let consensus_observer_config = ConsensusObserverConfig { max_subscription_timeout_ms: 100_000_000, // Use a large value so that we don't time out + max_concurrent_subscriptions: 1, // Only allow one subscription max_synced_version_timeout_ms: 100_000_000, // Use a large value so that we don't get DB progress errors ..ConsensusObserverConfig::default() }; @@ -618,7 +720,7 @@ mod test { // Add a suboptimal validator peer let suboptimal_peer = - create_peer_and_connection(network_id, peers_and_metadata.clone(), 0, None, true); + create_peer_and_connection(network_id, peers_and_metadata.clone(), 1, None, true); // Create a new subscription to the suboptimal peer create_observer_subscription( @@ -629,106 +731,175 @@ mod test { time_service.clone(), ); + // Check the active subscription and verify that it is healthy + check_subscription_optimality(&mut subscription_manager, suboptimal_peer, true); + + // Terminate unhealthy subscriptions and verify none are removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![]); + // Elapse enough time to trigger the peer optimality check let mock_time_service = time_service.clone().into_mock(); mock_time_service.advance(Duration::from_millis( consensus_observer_config.subscription_peer_change_interval_ms + 1, )); - // Check the active subscription and verify that it is removed (the peer is suboptimal) - assert_matches!( - subscription_manager.check_active_subscription(), - Err(Error::SubscriptionSuboptimal(_)) - ); - assert!(subscription_manager.active_observer_subscription.is_none()); + // Check the active subscription and verify that it is unhealthy (the peer is suboptimal) + check_subscription_optimality(&mut subscription_manager, suboptimal_peer, false); + + // Elapse enough time to trigger the peer optimality check again + let mock_time_service = time_service.clone().into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_refresh_interval_ms + 1, + )); + + // Terminate any unhealthy subscriptions and verify the subscription was removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![suboptimal_peer]); + + // Verify the active subscription is no longer present + verify_active_subscription_peers(&subscription_manager, vec![]); } #[tokio::test] - async fn test_sort_peers_for_subscription() { + #[allow(clippy::await_holding_lock)] // Required to wait on the subscription creation task + async fn test_spawn_subscription_creation_task() { // Create a consensus observer client - let network_ids = &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public]; - let (peers_and_metadata, consensus_observer_client) = - create_consensus_observer_client(network_ids); + let network_id = NetworkId::Public; + let (_, consensus_observer_client) = create_consensus_observer_client(&[network_id]); // Create a new subscription manager let consensus_observer_config = ConsensusObserverConfig::default(); let db_reader = create_mock_db_reader(); + let time_service = TimeService::mock(); let mut subscription_manager = SubscriptionManager::new( consensus_observer_client, consensus_observer_config, None, db_reader.clone(), - TimeService::mock(), + time_service.clone(), ); - // Sort the peers for a subscription and verify that no peers are returned - let sorted_peers = subscription_manager - .sort_peers_for_subscription(None) - .unwrap(); - assert!(sorted_peers.is_empty()); - - // Add a connected validator peer, VFN peer and public peer - for network_id in network_ids { - let distance_from_validators = match network_id { - NetworkId::Validator => 0, - NetworkId::Vfn => 1, - NetworkId::Public => 2, - }; - create_peer_and_connection( - *network_id, - peers_and_metadata.clone(), - distance_from_validators, - None, - true, - ); + // Verify that the active subscription creation task is empty + verify_subscription_creation_task(&subscription_manager, false); + + // Spawn a subscription creation task with 0 subscriptions to create + subscription_manager + .spawn_subscription_creation_task(0, vec![], vec![], hashmap![]) + .await; + + // Verify that the active subscription creation task is still empty (no task was spawned) + verify_subscription_creation_task(&subscription_manager, false); + + // Spawn a subscription creation task with 1 subscription to create + subscription_manager + .spawn_subscription_creation_task(1, vec![], vec![], hashmap![]) + .await; + + // Verify that the active subscription creation task is now populated + verify_subscription_creation_task(&subscription_manager, true); + + // Wait for the active subscription creation task to finish + if let Some(active_task) = subscription_manager + .active_subscription_creation_task + .lock() + .as_mut() + { + active_task.await.unwrap(); } - // Sort the peers for a subscription and verify the ordering (according to distance) - let sorted_peers = subscription_manager - .sort_peers_for_subscription(None) - .unwrap(); - assert_eq!(sorted_peers[0].network_id(), NetworkId::Validator); - assert_eq!(sorted_peers[1].network_id(), NetworkId::Vfn); - assert_eq!(sorted_peers[2].network_id(), NetworkId::Public); - assert_eq!(sorted_peers.len(), 3); - - // Sort the peers, but mark the validator as the last subscribed peer - let previous_subscription_peer = sorted_peers[0]; - let sorted_peer_subset = subscription_manager - .sort_peers_for_subscription(Some(previous_subscription_peer)) - .unwrap(); - assert_eq!(sorted_peer_subset[0].network_id(), NetworkId::Vfn); - assert_eq!(sorted_peer_subset[1].network_id(), NetworkId::Public); - assert_eq!(sorted_peer_subset.len(), 2); + // Verify that the active subscription creation task is still present + verify_subscription_creation_task(&subscription_manager, true); - // Remove all the peers and verify that no peers are returned - for peer_network_id in sorted_peers { - remove_peer_and_connection(peers_and_metadata.clone(), peer_network_id); + // Verify that the active subscription creation task is finished + if let Some(active_task) = subscription_manager + .active_subscription_creation_task + .lock() + .as_ref() + { + assert!(active_task.is_finished()); } - // Add multiple validator peers, with different latencies - let mut validator_peers = vec![]; - for ping_latency_secs in [0.9, 0.8, 0.5, 0.1, 0.05] { - let validator_peer = create_peer_and_connection( - NetworkId::Validator, - peers_and_metadata.clone(), - 0, - Some(ping_latency_secs), - true, + // Spawn a subscription creation task with 2 subscriptions to create + subscription_manager + .spawn_subscription_creation_task(2, vec![], vec![], hashmap![]) + .await; + + // Verify the new active subscription creation task is not finished + if let Some(active_task) = subscription_manager + .active_subscription_creation_task + .lock() + .as_ref() + { + assert!(!active_task.is_finished()); + }; + } + + #[tokio::test] + async fn test_terminate_unhealthy_subscriptions_multiple() { + // Create a consensus observer client + let network_id = NetworkId::Public; + let (peers_and_metadata, consensus_observer_client) = + create_consensus_observer_client(&[network_id]); + + // Create a new subscription manager + let consensus_observer_config = ConsensusObserverConfig::default(); + let db_reader = create_mock_db_reader(); + let time_service = TimeService::mock(); + let mut subscription_manager = SubscriptionManager::new( + consensus_observer_client, + consensus_observer_config, + None, + db_reader.clone(), + time_service.clone(), + ); + + // Create two new subscriptions + let subscription_peer_1 = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 1, None, true); + let subscription_peer_2 = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 1, None, true); + for peer in &[subscription_peer_1, subscription_peer_2] { + // Create the subscription + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + *peer, + time_service.clone(), ); - validator_peers.push(validator_peer); } - // Sort the peers for a subscription and verify the ordering (according to latency) - let sorted_peers = subscription_manager - .sort_peers_for_subscription(None) - .unwrap(); - let expected_peers = validator_peers.into_iter().rev().collect::>(); - assert_eq!(sorted_peers, expected_peers); + // Terminate unhealthy subscriptions and verify that both subscriptions are still healthy + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![]); + + // Create another subscription + let subscription_peer_3 = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 1, None, true); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + subscription_peer_3, + TimeService::mock(), // Use a different time service (to avoid timeouts) + ); + + // Elapse time to simulate a timeout (on the first two subscriptions) + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_subscription_timeout_ms + 1, + )); + + // Terminate unhealthy subscriptions and verify the first two subscriptions were terminated + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![ + subscription_peer_1, + subscription_peer_2, + ]); + + // Verify the third subscription is still active + verify_active_subscription_peers(&subscription_manager, vec![subscription_peer_3]); } #[tokio::test] - async fn test_verify_message_sender() { + async fn test_unsubscribe_from_peer() { // Create a consensus observer client let network_id = NetworkId::Public; let (_, consensus_observer_client) = create_consensus_observer_client(&[network_id]); @@ -744,10 +915,68 @@ mod test { TimeService::mock(), ); - // Check that message verification fails (we have no active subscription) - assert!(subscription_manager - .verify_message_sender(PeerNetworkId::random()) - .is_err()); + // Verify that no subscriptions are active + verify_active_subscription_peers(&subscription_manager, vec![]); + + // Create a new subscription + let subscription_peer_1 = PeerNetworkId::random(); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + subscription_peer_1, + TimeService::mock(), + ); + + // Verify the subscription is active + verify_active_subscription_peers(&subscription_manager, vec![subscription_peer_1]); + + // Create another subscription + let subscription_peer_2 = PeerNetworkId::random(); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + subscription_peer_2, + TimeService::mock(), + ); + + // Verify the second subscription is active + verify_active_subscription_peers(&subscription_manager, vec![ + subscription_peer_1, + subscription_peer_2, + ]); + + // Unsubscribe from the first peer + subscription_manager.unsubscribe_from_peer(subscription_peer_1); + + // Verify that the first subscription is no longer active + verify_active_subscription_peers(&subscription_manager, vec![subscription_peer_2]); + } + + #[tokio::test] + async fn test_verify_message_for_subscription() { + // Create a consensus observer client + let network_id = NetworkId::Public; + let (_, consensus_observer_client) = create_consensus_observer_client(&[network_id]); + + // Create a new subscription manager + let consensus_observer_config = ConsensusObserverConfig::default(); + let db_reader = Arc::new(MockDatabaseReader::new()); + let mut subscription_manager = SubscriptionManager::new( + consensus_observer_client, + consensus_observer_config, + None, + db_reader.clone(), + TimeService::mock(), + ); + + // Check that message verification fails (we have no active subscriptions) + check_message_verification_result( + &mut subscription_manager, + PeerNetworkId::random(), + false, + ); // Create a new subscription let subscription_peer = PeerNetworkId::random(); @@ -759,15 +988,125 @@ mod test { TimeService::mock(), ); - // Check that message verification fails if the peer doesn't match the subscription - assert!(subscription_manager - .verify_message_sender(PeerNetworkId::random()) - .is_err()); + // Check that message verification passes for the subscription + check_message_verification_result(&mut subscription_manager, subscription_peer, true); + + // Create another subscription + let second_subscription_peer = PeerNetworkId::random(); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + second_subscription_peer, + TimeService::mock(), + ); + + // Check that message verification passes for the second subscription + check_message_verification_result( + &mut subscription_manager, + second_subscription_peer, + true, + ); + + // Check that message verification fails if the peer doesn't match either subscription + check_message_verification_result( + &mut subscription_manager, + PeerNetworkId::random(), + false, + ); + } + + /// Checks the result of verifying a message from a given peer + fn check_message_verification_result( + subscription_manager: &mut SubscriptionManager, + peer_network_id: PeerNetworkId, + pass_verification: bool, + ) { + // Verify the message for the given peer + let result = subscription_manager.verify_message_for_subscription(peer_network_id); + + // Ensure the result matches the expected value + if pass_verification { + assert!(result.is_ok()); + } else { + assert_matches!(result, Err(Error::InvalidMessageError(_))); + } + } + + /// Checks the health of a subscription and verifies the connection status + fn check_subscription_connection( + subscription_manager: &mut SubscriptionManager, + subscription_peer: PeerNetworkId, + expect_connected: bool, + ) { + // Check the health of the subscription + let connected_peers_and_metadata = subscription_manager.get_connected_peers_and_metadata(); + let result = subscription_manager + .check_subscription_health(&connected_peers_and_metadata, subscription_peer); + + // Check the result based on the expected connection status + if expect_connected { + assert!(result.is_ok()); + } else { + assert_matches!(result, Err(Error::SubscriptionDisconnected(_))); + } + } + + /// Checks the health of a subscription and verifies the optimality status + fn check_subscription_optimality( + subscription_manager: &mut SubscriptionManager, + subscription_peer: PeerNetworkId, + expect_optimal: bool, + ) { + // Check the health of the subscription + let connected_peers_and_metadata = subscription_manager.get_connected_peers_and_metadata(); + let result = subscription_manager + .check_subscription_health(&connected_peers_and_metadata, subscription_peer); + + // Check the result based on the expected optimality status + if expect_optimal { + assert!(result.is_ok()); + } else { + assert_matches!(result, Err(Error::SubscriptionSuboptimal(_))); + } + } + + /// Checks the health of a subscription and verifies the progress status + fn check_subscription_progress( + subscription_manager: &mut SubscriptionManager, + subscription_peer: PeerNetworkId, + expect_progress: bool, + ) { + // Check the health of the subscription + let connected_peers_and_metadata = subscription_manager.get_connected_peers_and_metadata(); + let result = subscription_manager + .check_subscription_health(&connected_peers_and_metadata, subscription_peer); + + // Check the result based on the expected progress status + if expect_progress { + assert!(result.is_ok()); + } else { + assert_matches!(result, Err(Error::SubscriptionProgressStopped(_))); + } + } - // Check that message verification passes if the peer matches the subscription - assert!(subscription_manager - .verify_message_sender(subscription_peer) - .is_ok()); + /// Checks the health of a subscription and verifies the timeout status + fn check_subscription_timeout( + subscription_manager: &mut SubscriptionManager, + subscription_peer: PeerNetworkId, + expect_timeout: bool, + ) { + // Check the health of the subscription + let connected_peers_and_metadata = subscription_manager.get_connected_peers_and_metadata(); + let result = subscription_manager + .check_subscription_health(&connected_peers_and_metadata, subscription_peer); + + // Check the result based on the expected timeout status + if expect_timeout { + assert!(result.is_ok()); + } else { + assert_matches!(result, Err(Error::SubscriptionTimeout(_))); + } } /// Creates a new consensus observer client and a peers and metadata container @@ -808,7 +1147,10 @@ mod test { subscription_peer, time_service, ); - subscription_manager.active_observer_subscription = Some(observer_subscription); + subscription_manager + .active_observer_subscriptions + .lock() + .insert(subscription_peer, observer_subscription); } /// Creates a new peer with the specified connection metadata @@ -879,4 +1221,53 @@ mod test { .remove_peer_metadata(peer_network_id, connection_id) .unwrap(); } + + /// Verifies the active subscription peers + fn verify_active_subscription_peers( + subscription_manager: &SubscriptionManager, + expected_active_peers: Vec, + ) { + // Get the active subscription peers + let active_peers = subscription_manager.get_active_subscription_peers(); + + // Verify the active subscription peers + for peer in &expected_active_peers { + assert!(active_peers.contains(peer)); + } + assert_eq!(active_peers.len(), expected_active_peers.len()); + } + + /// Verifies the status of the active subscription creation task + fn verify_subscription_creation_task( + subscription_manager: &SubscriptionManager, + expect_active_task: bool, + ) { + let current_active_task = subscription_manager + .active_subscription_creation_task + .lock() + .is_some(); + assert_eq!(current_active_task, expect_active_task); + } + + /// Verifies the list of terminated unhealthy subscriptions + fn verify_terminated_unhealthy_subscriptions( + subscription_manager: &mut SubscriptionManager, + expected_terminated_peers: Vec, + ) { + // Get the connected peers and metadata + let connected_peers_and_metadata = subscription_manager.get_connected_peers_and_metadata(); + + // Terminate any unhealthy subscriptions + let terminated_subscriptions = + subscription_manager.terminate_unhealthy_subscriptions(&connected_peers_and_metadata); + + // Verify the terminated subscriptions + for (terminated_subscription_peer, _) in &terminated_subscriptions { + assert!(expected_terminated_peers.contains(terminated_subscription_peer)); + } + assert_eq!( + terminated_subscriptions.len(), + expected_terminated_peers.len() + ); + } } diff --git a/consensus/src/consensus_observer/observer/subscription_utils.rs b/consensus/src/consensus_observer/observer/subscription_utils.rs new file mode 100644 index 00000000000..0bca7c61b00 --- /dev/null +++ b/consensus/src/consensus_observer/observer/subscription_utils.rs @@ -0,0 +1,1186 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::consensus_observer::{ + common::logging::{LogEntry, LogSchema}, + network::{ + observer_client::ConsensusObserverClient, + observer_message::{ + ConsensusObserverMessage, ConsensusObserverRequest, ConsensusObserverResponse, + }, + }, + observer::subscription::ConsensusObserverSubscription, + publisher::consensus_publisher::ConsensusPublisher, +}; +use aptos_config::{config::ConsensusObserverConfig, network_id::PeerNetworkId}; +use aptos_logger::{error, info, warn}; +use aptos_network::{ + application::{interface::NetworkClient, metadata::PeerMetadata}, + ProtocolId, +}; +use aptos_storage_interface::DbReader; +use aptos_time_service::TimeService; +use ordered_float::OrderedFloat; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; + +// A useful constant for representing the maximum ping latency +const MAX_PING_LATENCY_SECS: f64 = 10_000.0; + +/// Attempts to create the given number of new subscriptions +/// from the connected peers and metadata. Any active or unhealthy +/// subscriptions are excluded from the selection process. +pub async fn create_new_subscriptions( + consensus_observer_config: ConsensusObserverConfig, + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, + consensus_publisher: Option>, + db_reader: Arc, + time_service: TimeService, + connected_peers_and_metadata: HashMap, + num_subscriptions_to_create: usize, + active_subscription_peers: Vec, + unhealthy_subscription_peers: Vec, +) -> Vec { + // Sort the potential peers for subscription requests + let mut sorted_potential_peers = match sort_peers_for_subscriptions( + connected_peers_and_metadata, + unhealthy_subscription_peers, + active_subscription_peers, + consensus_publisher, + ) { + Some(sorted_peers) => sorted_peers, + None => { + error!(LogSchema::new(LogEntry::ConsensusObserver) + .message("Failed to sort peers for subscription requests!")); + return vec![]; + }, + }; + + // Verify that we have potential peers to subscribe to + if sorted_potential_peers.is_empty() { + warn!(LogSchema::new(LogEntry::ConsensusObserver) + .message("There are no potential peers to subscribe to!")); + return vec![]; + } + + // Go through the potential peers and attempt to create new subscriptions + let mut created_subscriptions = vec![]; + for _ in 0..num_subscriptions_to_create { + // If there are no peers left to subscribe to, return early + if sorted_potential_peers.is_empty() { + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "There are no more potential peers to subscribe to! \ + Num created subscriptions: {:?}", + created_subscriptions.len() + )) + ); + break; + } + + // Attempt to create a new subscription + let (observer_subscription, failed_subscription_peers) = create_single_subscription( + consensus_observer_config, + consensus_observer_client.clone(), + db_reader.clone(), + sorted_potential_peers.clone(), + time_service.clone(), + ) + .await; + + // Remove the failed peers from the sorted list + sorted_potential_peers.retain(|peer| !failed_subscription_peers.contains(peer)); + + // Process a successful subscription creation + if let Some(observer_subscription) = observer_subscription { + // Remove the peer from the sorted list (for the next selection) + sorted_potential_peers + .retain(|peer| *peer != observer_subscription.get_peer_network_id()); + + // Add the newly created subscription to the subscription list + created_subscriptions.push(observer_subscription); + } + } + + // Return the list of created subscriptions + created_subscriptions +} + +/// Attempts to create a new subscription to a single peer from the +/// sorted list of potential peers. If successful, the new subscription +/// is returned, alongside any peers with failed attempts. +async fn create_single_subscription( + consensus_observer_config: ConsensusObserverConfig, + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, + db_reader: Arc, + sorted_potential_peers: Vec, + time_service: TimeService, +) -> (Option, Vec) { + let mut peers_with_failed_attempts = vec![]; + for potential_peer in sorted_potential_peers { + // Log the subscription attempt + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Attempting to subscribe to potential peer: {}!", + potential_peer + )) + ); + + // Send a subscription request to the peer and wait for the response + let subscription_request = ConsensusObserverRequest::Subscribe; + let request_timeout_ms = consensus_observer_config.network_request_timeout_ms; + let response = consensus_observer_client + .send_rpc_request_to_peer(&potential_peer, subscription_request, request_timeout_ms) + .await; + + // Process the response and update the active subscription + match response { + Ok(ConsensusObserverResponse::SubscribeAck) => { + // Log the successful subscription + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Successfully subscribed to peer: {}!", + potential_peer + )) + ); + + // Create the new subscription + let subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + db_reader.clone(), + potential_peer, + time_service.clone(), + ); + + // Return the successful subscription + return (Some(subscription), peers_with_failed_attempts); + }, + Ok(response) => { + // We received an invalid response + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Got unexpected response type for subscription request: {:?}", + response.get_label() + )) + ); + + // Add the peer to the list of failed attempts + peers_with_failed_attempts.push(potential_peer); + }, + Err(error) => { + // We encountered an error while sending the request + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to send subscription request to peer: {}! Error: {:?}", + potential_peer, error + )) + ); + + // Add the peer to the list of failed attempts + peers_with_failed_attempts.push(potential_peer); + }, + } + } + + // We failed to create a new subscription + (None, peers_with_failed_attempts) +} + +/// Gets the distance from the validators for the specified peer from the peer metadata +fn get_distance_for_peer( + peer_network_id: &PeerNetworkId, + peer_metadata: &PeerMetadata, +) -> Option { + // Get the distance for the peer + let peer_monitoring_metadata = peer_metadata.get_peer_monitoring_metadata(); + let distance = peer_monitoring_metadata + .latest_network_info_response + .as_ref() + .map(|response| response.distance_from_validators); + + // If the distance is missing, log a warning + if distance.is_none() { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Unable to get distance for peer! Peer: {:?}", + peer_network_id + )) + ); + } + + distance +} + +/// Gets the latency for the specified peer from the peer metadata +fn get_latency_for_peer( + peer_network_id: &PeerNetworkId, + peer_metadata: &PeerMetadata, +) -> Option { + // Get the latency for the peer + let peer_monitoring_metadata = peer_metadata.get_peer_monitoring_metadata(); + let latency = peer_monitoring_metadata.average_ping_latency_secs; + + // If the latency is missing, log a warning + if latency.is_none() { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Unable to get latency for peer! Peer: {:?}", + peer_network_id + )) + ); + } + + latency +} + +/// Produces a list of sorted peers to service the subscription requests. +/// Any active or unhealthy subscriptions are excluded from the selection process. +/// Likewise, any peers currently subscribed to us are also excluded. +fn sort_peers_for_subscriptions( + mut connected_peers_and_metadata: HashMap, + active_subscription_peers: Vec, + unhealthy_subscription_peers: Vec, + consensus_publisher: Option>, +) -> Option> { + // Remove any peers we're already subscribed to + for active_subscription_peer in active_subscription_peers { + let _ = connected_peers_and_metadata.remove(&active_subscription_peer); + } + + // Remove any unhealthy subscription peers + for unhealthy_peer in unhealthy_subscription_peers { + let _ = connected_peers_and_metadata.remove(&unhealthy_peer); + } + + // Remove any peers that are currently subscribed to us + if let Some(consensus_publisher) = consensus_publisher { + for peer_network_id in consensus_publisher.get_active_subscribers() { + let _ = connected_peers_and_metadata.remove(&peer_network_id); + } + } + + // Sort the peers by subscription optimality + let sorted_peers = sort_peers_by_subscription_optimality(&connected_peers_and_metadata); + + // Return the sorted peers + Some(sorted_peers) +} + +/// Sorts the peers by subscription optimality (in descending order of +/// optimality). This requires: (i) sorting the peers by distance from the +/// validator set and ping latency (lower values are more optimal); and (ii) +/// filtering out peers that don't support consensus observer. +/// +/// Note: we prioritize distance over latency as we want to avoid close +/// but not up-to-date peers. If peers don't have sufficient metadata +/// for sorting, they are given a lower priority. +pub fn sort_peers_by_subscription_optimality( + peers_and_metadata: &HashMap, +) -> Vec { + // Group peers and latencies by validator distance, i.e., distance -> [(peer, latency)] + let mut unsupported_peers = Vec::new(); + let mut peers_and_latencies_by_distance = BTreeMap::new(); + for (peer_network_id, peer_metadata) in peers_and_metadata { + // Verify that the peer supports consensus observer + if !supports_consensus_observer(peer_metadata) { + unsupported_peers.push(*peer_network_id); + continue; // Skip the peer + } + + // Get the distance and latency for the peer + let distance = get_distance_for_peer(peer_network_id, peer_metadata); + let latency = get_latency_for_peer(peer_network_id, peer_metadata); + + // If the distance is not found, use the maximum distance + let distance = + distance.unwrap_or(aptos_peer_monitoring_service_types::MAX_DISTANCE_FROM_VALIDATORS); + + // If the latency is not found, use a large latency + let latency = latency.unwrap_or(MAX_PING_LATENCY_SECS); + + // Add the peer and latency to the distance group + peers_and_latencies_by_distance + .entry(distance) + .or_insert_with(Vec::new) + .push((*peer_network_id, OrderedFloat(latency))); + } + + // If there are peers that don't support consensus observer, log them + if !unsupported_peers.is_empty() { + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Found {} peers that don't support consensus observer! Peers: {:?}", + unsupported_peers.len(), + unsupported_peers + )) + ); + } + + // Sort the peers by distance and latency. Note: BTreeMaps are + // sorted by key, so the entries will be sorted by distance in ascending order. + let mut sorted_peers = Vec::new(); + for (_, mut peers_and_latencies) in peers_and_latencies_by_distance { + // Sort the peers by latency + peers_and_latencies.sort_by_key(|(_, latency)| *latency); + + // Add the peers to the sorted list (in sorted order) + sorted_peers.extend( + peers_and_latencies + .into_iter() + .map(|(peer_network_id, _)| peer_network_id), + ); + } + + // Log the sorted peers + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Sorted {} peers by subscription optimality! Peers: {:?}", + sorted_peers.len(), + sorted_peers + )) + ); + + sorted_peers +} + +/// Returns true iff the peer metadata indicates support for consensus observer +fn supports_consensus_observer(peer_metadata: &PeerMetadata) -> bool { + peer_metadata.supports_protocol(ProtocolId::ConsensusObserver) + && peer_metadata.supports_protocol(ProtocolId::ConsensusObserverRpc) +} + +#[cfg(test)] +mod tests { + use super::*; + use aptos_channels::{aptos_channel, message_queues::QueueStyle}; + use aptos_config::{config::PeerRole, network_id::NetworkId}; + use aptos_netcore::transport::ConnectionOrigin; + use aptos_network::{ + application::storage::PeersAndMetadata, + peer_manager::{ConnectionRequestSender, PeerManagerRequest, PeerManagerRequestSender}, + protocols::{ + network::{NetworkSender, NewNetworkSender}, + wire::handshake::v1::{MessagingProtocolVersion, ProtocolIdSet}, + }, + transport::{ConnectionId, ConnectionMetadata}, + }; + use aptos_peer_monitoring_service_types::{ + response::NetworkInformationResponse, PeerMonitoringMetadata, + }; + use aptos_storage_interface::Result; + use aptos_types::{network_address::NetworkAddress, transaction::Version, PeerId}; + use bytes::Bytes; + use futures::StreamExt; + use mockall::mock; + use std::collections::HashSet; + + // This is a simple mock of the DbReader (it generates a MockDatabaseReader) + mock! { + pub DatabaseReader {} + impl DbReader for DatabaseReader { + fn get_latest_ledger_info_version(&self) -> Result; + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_create_new_subscriptions() { + // Create a consensus observer config and client + let consensus_observer_config = ConsensusObserverConfig::default(); + let network_ids = &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public]; + let (peers_and_metadata, consensus_observer_client, mut peer_manager_request_receivers) = + create_consensus_observer_client(network_ids); + + // Create a list of connected peers (one per network) + let mut connected_peers = vec![]; + for network_id in &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public] { + // Create a new peer + let peer_network_id = create_peer_and_connection( + *network_id, + peers_and_metadata.clone(), + get_distance_from_validators(network_id), + None, + true, + ); + + // Add the peer to the list of sorted peers + connected_peers.push(peer_network_id); + } + + // Get the connected peers and metadata + let connected_peers_and_metadata = peers_and_metadata + .get_connected_peers_and_metadata() + .unwrap(); + + // Spawn the subscription creation task to create 2 subscriptions + let num_subscriptions_to_create = 2; + let subscription_creation_handle = tokio::spawn(async move { + create_new_subscriptions( + consensus_observer_config, + consensus_observer_client.clone(), + None, + Arc::new(MockDatabaseReader::new()), + TimeService::mock(), + connected_peers_and_metadata, + num_subscriptions_to_create, + vec![], + vec![], + ) + .await + }); + + // Handle the peer manager requests made by the subscription creation task. + // The VFN peer should fail the subscription request. + for connected_peer in &connected_peers { + let network_id = connected_peer.network_id(); + handle_next_subscription_request( + network_id, + &mut peer_manager_request_receivers, + network_id != NetworkId::Vfn, // The VFN peer should fail the subscription request + ) + .await; + } + + // Wait for the subscription creation task to complete + let consensus_observer_subscriptions = subscription_creation_handle.await.unwrap(); + + // Verify the number of created subscriptions + assert_eq!( + consensus_observer_subscriptions.len(), + num_subscriptions_to_create + ); + + // Verify the created subscription peers + let first_peer = *connected_peers.first().unwrap(); + let last_peer = *connected_peers.last().unwrap(); + let expected_subscription_peers = [first_peer, last_peer]; + for consensus_observer_subscription in consensus_observer_subscriptions { + let peer_network_id = consensus_observer_subscription.get_peer_network_id(); + assert!(expected_subscription_peers.contains(&peer_network_id)); + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_create_new_subscriptions_multiple() { + // Create a consensus observer config and client + let consensus_observer_config = ConsensusObserverConfig::default(); + let network_ids = &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public]; + let (peers_and_metadata, consensus_observer_client, mut peer_manager_request_receivers) = + create_consensus_observer_client(network_ids); + + // Create a list of connected peers (one per network) + let mut connected_peers = vec![]; + for network_id in &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public] { + // Create a new peer + let peer_network_id = create_peer_and_connection( + *network_id, + peers_and_metadata.clone(), + get_distance_from_validators(network_id), + None, + true, + ); + + // Add the peer to the list of sorted peers + connected_peers.push(peer_network_id); + } + + // Create multiple sets of subscriptions and verify the results + for num_subscriptions_to_create in [0, 1, 2, 3, 10] { + // Determine the expected subscription peers + let expected_subscription_peers = connected_peers + .iter() + .take(num_subscriptions_to_create) + .cloned() + .collect(); + + // Create the subscriptions and verify the result + create_and_verify_subscriptions( + consensus_observer_config, + peers_and_metadata.clone(), + consensus_observer_client.clone(), + &mut peer_manager_request_receivers, + num_subscriptions_to_create, + expected_subscription_peers, + ) + .await; + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_create_single_subscription() { + // Create a consensus observer config and client + let consensus_observer_config = ConsensusObserverConfig::default(); + let network_ids = &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public]; + let (peers_and_metadata, consensus_observer_client, mut peer_manager_request_receivers) = + create_consensus_observer_client(network_ids); + + // Create a list of connected peers (one per network) + let mut connected_peers = vec![]; + for network_id in &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public] { + // Create a new peer + let peer_network_id = + create_peer_and_connection(*network_id, peers_and_metadata.clone(), 0, None, true); + + // Add the peer to the list of sorted peers + connected_peers.push(peer_network_id); + } + + // Spawn the subscription creation task + let sorted_potential_peers = connected_peers.clone(); + let subscription_creation_handle = tokio::spawn(async move { + create_single_subscription( + consensus_observer_config, + consensus_observer_client.clone(), + Arc::new(MockDatabaseReader::new()), + sorted_potential_peers, + TimeService::mock(), + ) + .await + }); + + // Handle the peer manager requests made by the subscription creation task. + // We should only respond successfully to the peer on the public network. + handle_next_subscription_request( + NetworkId::Validator, + &mut peer_manager_request_receivers, + false, + ) + .await; + handle_next_subscription_request( + NetworkId::Vfn, + &mut peer_manager_request_receivers, + false, + ) + .await; + handle_next_subscription_request( + NetworkId::Public, + &mut peer_manager_request_receivers, + true, + ) + .await; + + // Wait for the subscription creation task to complete + let (observer_subscription, failed_subscription_peers) = + subscription_creation_handle.await.unwrap(); + + // Verify that the public peer was successfully subscribed to + assert_eq!( + &observer_subscription.unwrap().get_peer_network_id(), + connected_peers.last().unwrap() + ); + + // Verify that the other peers failed our subscription attempts + let expected_failed_peers = connected_peers.iter().take(2).cloned().collect::>(); + assert_eq!(failed_subscription_peers, expected_failed_peers); + } + + #[test] + fn test_sort_peers_by_distance_and_latency() { + // Sort an empty list of peers + let peers_and_metadata = HashMap::new(); + assert!(sort_peers_by_subscription_optimality(&peers_and_metadata).is_empty()); + + // Create a list of peers with empty metadata + let peers_and_metadata = create_peers_and_metadata(true, true, true, 10); + + // Sort the peers and verify the results + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert_eq!(sorted_peers.len(), 10); + + // Create a list of peers with valid metadata + let peers_and_metadata = create_peers_and_metadata(false, false, true, 10); + + // Sort the peers + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + + // Verify the order of the peers + verify_increasing_distance_latencies(&peers_and_metadata, &sorted_peers); + assert_eq!(sorted_peers.len(), 10); + + // Create a list of peers with and without metadata + let mut peers_and_metadata = create_peers_and_metadata(false, false, true, 10); + peers_and_metadata.extend(create_peers_and_metadata(true, false, true, 10)); + peers_and_metadata.extend(create_peers_and_metadata(false, true, true, 10)); + peers_and_metadata.extend(create_peers_and_metadata(true, true, true, 10)); + + // Sort the peers + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert_eq!(sorted_peers.len(), 40); + + // Verify the order of the first 20 peers + let (first_20_peers, sorted_peers) = sorted_peers.split_at(20); + verify_increasing_distance_latencies(&peers_and_metadata, first_20_peers); + + // Verify that the next 10 peers only have latency metadata + let (next_10_peers, sorted_peers) = sorted_peers.split_at(10); + for sorted_peer in next_10_peers { + let peer_metadata = peers_and_metadata.get(sorted_peer).unwrap(); + assert!(get_distance_for_peer(sorted_peer, peer_metadata).is_none()); + assert!(get_latency_for_peer(sorted_peer, peer_metadata).is_some()); + } + + // Verify that the last 10 peers have no metadata + let (last_10_peers, remaining_peers) = sorted_peers.split_at(10); + for sorted_peer in last_10_peers { + let peer_metadata = peers_and_metadata.get(sorted_peer).unwrap(); + assert!(get_distance_for_peer(sorted_peer, peer_metadata).is_none()); + assert!(get_latency_for_peer(sorted_peer, peer_metadata).is_none()); + } + assert!(remaining_peers.is_empty()); + } + + #[test] + fn test_sort_peers_by_distance_and_latency_filter() { + // Sort an empty list of peers + let peers_and_metadata = HashMap::new(); + assert!(sort_peers_by_subscription_optimality(&peers_and_metadata).is_empty()); + + // Create a list of peers with empty metadata (with consensus observer support) + let peers_and_metadata = create_peers_and_metadata(true, true, true, 10); + + // Sort the peers and verify the results + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert_eq!(sorted_peers.len(), 10); + + // Create a list of peers with empty metadata (without consensus observer support) + let peers_and_metadata = create_peers_and_metadata(true, true, false, 10); + + // Sort the peers and verify the results + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert!(sorted_peers.is_empty()); + + // Create a list of peers with valid metadata (without consensus observer support) + let peers_and_metadata = create_peers_and_metadata(false, false, false, 10); + + // Sort the peers and verify the results + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert!(sorted_peers.is_empty()); + + // Create a list of peers with empty metadata (with and without consensus observer support) + let mut peers_and_metadata = create_peers_and_metadata(true, true, true, 5); + peers_and_metadata.extend(create_peers_and_metadata(true, true, false, 50)); + + // Sort the peers and verify the results (only the supported peers are sorted) + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert_eq!(sorted_peers.len(), 5); + + // Create a list of peers with valid metadata (with and without consensus observer support) + let mut peers_and_metadata = create_peers_and_metadata(false, false, true, 50); + peers_and_metadata.extend(create_peers_and_metadata(false, false, false, 10)); + + // Sort the peers and verify the results (only the supported peers are sorted) + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert_eq!(sorted_peers.len(), 50); + + // Create a list of peers with valid metadata (with and without consensus observer support) + let supported_peer_and_metadata = create_peers_and_metadata(false, false, true, 1); + let unsupported_peer_and_metadata = create_peers_and_metadata(false, false, false, 1); + let mut peers_and_metadata = HashMap::new(); + peers_and_metadata.extend(supported_peer_and_metadata.clone()); + peers_and_metadata.extend(unsupported_peer_and_metadata); + + // Sort the peers and verify the results (only the supported peer is sorted) + let supported_peer = supported_peer_and_metadata.keys().next().unwrap(); + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert_eq!(sorted_peers, vec![*supported_peer]); + } + + #[tokio::test] + async fn test_sort_peers_for_subscriptions() { + // Create a consensus observer client + let network_ids = &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public]; + let (peers_and_metadata, consensus_observer_client, _) = + create_consensus_observer_client(network_ids); + + // Create a consensus publisher + let consensus_observer_config = ConsensusObserverConfig::default(); + let (consensus_publisher, _) = + ConsensusPublisher::new(consensus_observer_config, consensus_observer_client.clone()); + let consensus_publisher = Arc::new(consensus_publisher); + + // Sort the peers and verify that no peers are returned + let sorted_peers = sort_subscription_peers( + consensus_publisher.clone(), + peers_and_metadata.clone(), + vec![], + vec![], + ); + assert!(sorted_peers.is_empty()); + + // Add a connected validator peer, VFN peer and public peer + for network_id in network_ids { + create_peer_and_connection( + *network_id, + peers_and_metadata.clone(), + get_distance_from_validators(network_id), + None, + true, + ); + } + + // Sort the peers and verify the ordering (according to distance) + let sorted_peers = sort_subscription_peers( + consensus_publisher.clone(), + peers_and_metadata.clone(), + vec![], + vec![], + ); + assert_eq!(sorted_peers[0].network_id(), NetworkId::Validator); + assert_eq!(sorted_peers[1].network_id(), NetworkId::Vfn); + assert_eq!(sorted_peers[2].network_id(), NetworkId::Public); + assert_eq!(sorted_peers.len(), 3); + + // Sort the peers, but mark the validator as unhealthy (so it's ignored) + let sorted_peer_subset = sort_subscription_peers( + consensus_publisher.clone(), + peers_and_metadata.clone(), + vec![], + vec![sorted_peers[0]], + ); + assert_eq!(sorted_peer_subset[0].network_id(), NetworkId::Vfn); + assert_eq!(sorted_peer_subset[1].network_id(), NetworkId::Public); + assert_eq!(sorted_peer_subset.len(), 2); + + // Sort the peers, but mark the VFN and validator as active subscriptions (so they're ignored) + let sorted_peer_subset = sort_subscription_peers( + consensus_publisher.clone(), + peers_and_metadata.clone(), + vec![sorted_peers[0], sorted_peers[1]], + vec![], + ); + assert_eq!(sorted_peer_subset[0].network_id(), NetworkId::Public); + assert_eq!(sorted_peer_subset.len(), 1); + + // Create a consensus publisher with the PFN as an active subscriber + let consensus_publisher_with_subscribers = + Arc::new(ConsensusPublisher::new_with_active_subscribers( + consensus_observer_config, + consensus_observer_client.clone(), + HashSet::from_iter(vec![sorted_peers[2]]), + )); + + // Sort the peers, and verify the PFN is ignored (since it's an active subscriber) + let sorted_peer_subset = sort_subscription_peers( + consensus_publisher_with_subscribers, + peers_and_metadata.clone(), + vec![], + vec![], + ); + assert_eq!(sorted_peer_subset[0].network_id(), NetworkId::Validator); + assert_eq!(sorted_peer_subset[1].network_id(), NetworkId::Vfn); + assert_eq!(sorted_peer_subset.len(), 2); + + // Remove all the peers and verify that no peers are returned upon sorting + for peer_network_id in sorted_peers { + remove_peer_and_connection(peers_and_metadata.clone(), peer_network_id); + } + let sorted_peers = sort_subscription_peers( + consensus_publisher.clone(), + peers_and_metadata.clone(), + vec![], + vec![], + ); + assert!(sorted_peers.is_empty()); + + // Add multiple validator peers, with different latencies + let mut validator_peers = vec![]; + for ping_latency_secs in [0.9, 0.8, 0.5, 0.1, 0.05] { + let validator_peer = create_peer_and_connection( + NetworkId::Validator, + peers_and_metadata.clone(), + 0, + Some(ping_latency_secs), + true, + ); + validator_peers.push(validator_peer); + } + + // Sort the peers and verify the ordering (according to latency) + let sorted_peers = sort_subscription_peers( + consensus_publisher, + peers_and_metadata.clone(), + vec![], + vec![], + ); + let expected_peers = validator_peers.into_iter().rev().collect::>(); + assert_eq!(sorted_peers, expected_peers); + } + + /// Creates new subscriptions and verifies the results + async fn create_and_verify_subscriptions( + consensus_observer_config: ConsensusObserverConfig, + peers_and_metadata: Arc, + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, + peer_manager_request_receivers: &mut HashMap< + NetworkId, + aptos_channel::Receiver<(PeerId, ProtocolId), PeerManagerRequest>, + >, + num_subscriptions_to_create: usize, + expected_subscription_peers: Vec, + ) { + // Get the connected peers and metadata + let connected_peers_and_metadata = peers_and_metadata + .get_connected_peers_and_metadata() + .unwrap(); + + // Spawn the subscription creation task + let subscription_creation_handle = tokio::spawn(async move { + create_new_subscriptions( + consensus_observer_config, + consensus_observer_client.clone(), + None, + Arc::new(MockDatabaseReader::new()), + TimeService::mock(), + connected_peers_and_metadata, + num_subscriptions_to_create, + vec![], + vec![], + ) + .await + }); + + // Handle the peer manager requests made by the subscription creation task + for expected_subscription_peer in &expected_subscription_peers { + handle_next_subscription_request( + expected_subscription_peer.network_id(), + peer_manager_request_receivers, + true, + ) + .await; + } + + // Wait for the subscription creation task to complete + let consensus_observer_subscriptions = subscription_creation_handle.await.unwrap(); + + // Verify the created subscriptions + assert_eq!( + consensus_observer_subscriptions.len(), + expected_subscription_peers.len() + ); + for subscription in consensus_observer_subscriptions { + assert!(expected_subscription_peers.contains(&subscription.get_peer_network_id())); + } + } + + /// Creates a new connection metadata for testing + fn create_connection_metadata( + peer_network_id: PeerNetworkId, + support_consensus_observer: bool, + ) -> ConnectionMetadata { + if support_consensus_observer { + // Create a protocol set that supports consensus observer + let protocol_set = ProtocolIdSet::from_iter(vec![ + ProtocolId::ConsensusObserver, + ProtocolId::ConsensusObserverRpc, + ]); + + // Create the connection metadata with the protocol set + ConnectionMetadata::new( + peer_network_id.peer_id(), + ConnectionId::default(), + NetworkAddress::mock(), + ConnectionOrigin::Inbound, + MessagingProtocolVersion::V1, + protocol_set, + PeerRole::PreferredUpstream, + ) + } else { + ConnectionMetadata::mock(peer_network_id.peer_id()) + } + } + + /// Creates a new consensus observer client, along with the + /// associated network senders and peers and metadata. + fn create_consensus_observer_client( + network_ids: &[NetworkId], + ) -> ( + Arc, + Arc>>, + HashMap>, + ) { + // Create the network senders and receivers for each network + let mut network_senders = HashMap::new(); + let mut peer_manager_request_receivers = HashMap::new(); + for network_id in network_ids { + // Create the request managers + let queue_cfg = aptos_channel::Config::new(10).queue_style(QueueStyle::FIFO); + let (peer_manager_request_sender, peer_manager_request_receiver) = queue_cfg.build(); + let (connected_request_sender, _) = queue_cfg.build(); + + // Create the network sender + let network_sender = NetworkSender::new( + PeerManagerRequestSender::new(peer_manager_request_sender), + ConnectionRequestSender::new(connected_request_sender), + ); + + // Save the network sender and the request receiver + network_senders.insert(*network_id, network_sender); + peer_manager_request_receivers.insert(*network_id, peer_manager_request_receiver); + } + + // Create the network client + let peers_and_metadata = PeersAndMetadata::new(network_ids); + let network_client = NetworkClient::new( + vec![ProtocolId::ConsensusObserver], + vec![ProtocolId::ConsensusObserverRpc], + network_senders, + peers_and_metadata.clone(), + ); + + // Create the consensus observer client + let consensus_observer_client = Arc::new(ConsensusObserverClient::new(network_client)); + + ( + peers_and_metadata, + consensus_observer_client, + peer_manager_request_receivers, + ) + } + + /// Creates a new peer with the specified connection metadata + fn create_peer_and_connection( + network_id: NetworkId, + peers_and_metadata: Arc, + distance_from_validators: u64, + ping_latency_secs: Option, + support_consensus_observer: bool, + ) -> PeerNetworkId { + // Create the connection metadata + let peer_network_id = PeerNetworkId::new(network_id, PeerId::random()); + let connection_metadata = if support_consensus_observer { + // Create a protocol set that supports consensus observer + let protocol_set = ProtocolIdSet::from_iter(vec![ + ProtocolId::ConsensusObserver, + ProtocolId::ConsensusObserverRpc, + ]); + + // Create the connection metadata with the protocol set + ConnectionMetadata::new( + peer_network_id.peer_id(), + ConnectionId::default(), + NetworkAddress::mock(), + ConnectionOrigin::Inbound, + MessagingProtocolVersion::V1, + protocol_set, + PeerRole::PreferredUpstream, + ) + } else { + ConnectionMetadata::mock(peer_network_id.peer_id()) + }; + + // Insert the connection into peers and metadata + peers_and_metadata + .insert_connection_metadata(peer_network_id, connection_metadata.clone()) + .unwrap(); + + // Update the peer monitoring metadata + let latest_network_info_response = NetworkInformationResponse { + connected_peers: BTreeMap::new(), + distance_from_validators, + }; + let monitoring_metdata = PeerMonitoringMetadata::new( + ping_latency_secs, + ping_latency_secs, + Some(latest_network_info_response), + None, + None, + ); + peers_and_metadata + .update_peer_monitoring_metadata(peer_network_id, monitoring_metdata.clone()) + .unwrap(); + + peer_network_id + } + + /// Creates a new peer and metadata for testing + fn create_peer_and_metadata( + latency: Option, + distance_from_validators: Option, + support_consensus_observer: bool, + ) -> (PeerNetworkId, PeerMetadata) { + // Create a random peer + let peer_network_id = PeerNetworkId::random(); + + // Create a new peer metadata with the given latency and distance + let connection_metadata = + create_connection_metadata(peer_network_id, support_consensus_observer); + let network_information_response = + distance_from_validators.map(|distance| NetworkInformationResponse { + connected_peers: BTreeMap::new(), + distance_from_validators: distance, + }); + let peer_monitoring_metadata = + PeerMonitoringMetadata::new(latency, None, network_information_response, None, None); + let peer_metadata = + PeerMetadata::new_for_test(connection_metadata, peer_monitoring_metadata); + + (peer_network_id, peer_metadata) + } + + /// Creates a list of peers and metadata for testing + fn create_peers_and_metadata( + empty_latency: bool, + empty_distance: bool, + support_consensus_observer: bool, + num_peers: u64, + ) -> HashMap { + let mut peers_and_metadata = HashMap::new(); + for i in 1..num_peers + 1 { + // Determine the distance for the peer + let distance = if empty_distance { None } else { Some(i) }; + + // Determine the latency for the peer + let latency = if empty_latency { None } else { Some(i as f64) }; + + // Create a new peer and metadata + let (peer_network_id, peer_metadata) = + create_peer_and_metadata(latency, distance, support_consensus_observer); + peers_and_metadata.insert(peer_network_id, peer_metadata); + } + peers_and_metadata + } + + /// Returns the distance from the validators for the specified network + fn get_distance_from_validators(network_id: &NetworkId) -> u64 { + match network_id { + NetworkId::Validator => 0, + NetworkId::Vfn => 1, + NetworkId::Public => 2, + } + } + + /// Fetches and handles the next subscription request from the peer manager + async fn handle_next_subscription_request( + network_id: NetworkId, + peer_manager_request_receivers: &mut HashMap< + NetworkId, + aptos_channel::Receiver<(PeerId, ProtocolId), PeerManagerRequest>, + >, + return_successfully: bool, + ) { + // Get the request receiver for the given network + let peer_manager_request_receiver = + peer_manager_request_receivers.get_mut(&network_id).unwrap(); + + // Wait for the next subscription request + match peer_manager_request_receiver.next().await { + Some(PeerManagerRequest::SendRpc(_, network_request)) => { + // Parse the network request + let data = network_request.data; + let response_sender = network_request.res_tx; + let message: ConsensusObserverMessage = bcs::from_bytes(data.as_ref()).unwrap(); + + // Process the network message + match message { + ConsensusObserverMessage::Request(request) => { + // Verify the request is for a new subscription + match request { + ConsensusObserverRequest::Subscribe => (), + _ => panic!( + "Unexpected consensus observer request received: {:?}!", + request + ), + } + + // Determine the response to send + let response = if return_successfully { + // Ack the subscription request + ConsensusObserverResponse::SubscribeAck + } else { + // Respond with the wrong message type + ConsensusObserverResponse::UnsubscribeAck + }; + let response_message = ConsensusObserverMessage::Response(response); + + // Send the response to the peer + let response_bytes = + bcs::to_bytes(&response_message).map(Bytes::from).unwrap(); + let _ = response_sender.send(Ok(response_bytes)); + }, + _ => panic!( + "Unexpected consensus observer message type received: {:?}!", + message + ), + } + }, + Some(PeerManagerRequest::SendDirectSend(_, _)) => { + panic!("Unexpected direct send message received!") + }, + None => panic!("No subscription request received!"), + } + } + + /// Removes the peer and connection metadata for the given peer + fn remove_peer_and_connection( + peers_and_metadata: Arc, + peer_network_id: PeerNetworkId, + ) { + let peer_metadata = peers_and_metadata + .get_metadata_for_peer(peer_network_id) + .unwrap(); + let connection_id = peer_metadata.get_connection_metadata().connection_id; + peers_and_metadata + .remove_peer_metadata(peer_network_id, connection_id) + .unwrap(); + } + + /// A simple helper method that sorts the given peers for a subscription + fn sort_subscription_peers( + consensus_publisher: Arc, + peers_and_metadata: Arc, + active_subscription_peers: Vec, + unhealthy_subscription_peers: Vec, + ) -> Vec { + // Get the connected peers and metadata + let connected_peers_and_metadata = peers_and_metadata + .get_connected_peers_and_metadata() + .unwrap(); + + // Sort the peers for subscription requests + sort_peers_for_subscriptions( + connected_peers_and_metadata, + unhealthy_subscription_peers, + active_subscription_peers, + Some(consensus_publisher), + ) + .unwrap() + } + + /// Verifies that the distance and latencies for the peers are in + /// increasing order (with the distance taking precedence over the latency). + fn verify_increasing_distance_latencies( + peers_and_metadata: &HashMap, + sorted_peers: &[PeerNetworkId], + ) { + let mut previous_latency = None; + let mut previous_distance = 0; + for sorted_peer in sorted_peers { + // Get the distance and latency for the peer + let peer_metadata = peers_and_metadata.get(sorted_peer).unwrap(); + let distance = get_distance_for_peer(sorted_peer, peer_metadata).unwrap(); + let latency = get_latency_for_peer(sorted_peer, peer_metadata); + + // Verify the order of the peers + if distance == previous_distance { + if let Some(latency) = latency { + if let Some(previous_latency) = previous_latency { + assert!(latency >= previous_latency); + } + } + } else { + assert!(distance > previous_distance); + } + + // Update the previous latency and distance + previous_latency = latency; + previous_distance = distance; + } + } +} diff --git a/consensus/src/consensus_observer/publisher/consensus_publisher.rs b/consensus/src/consensus_observer/publisher/consensus_publisher.rs index 11e2f63aa92..899901593f7 100644 --- a/consensus/src/consensus_observer/publisher/consensus_publisher.rs +++ b/consensus/src/consensus_observer/publisher/consensus_publisher.rs @@ -70,6 +70,26 @@ impl ConsensusPublisher { (consensus_publisher, outbound_message_receiver) } + #[cfg(test)] + /// Creates a new consensus publisher with the given active subscribers + pub fn new_with_active_subscribers( + consensus_observer_config: ConsensusObserverConfig, + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, + active_subscribers: HashSet, + ) -> Self { + // Create the consensus publisher + let (consensus_publisher, _) = + ConsensusPublisher::new(consensus_observer_config, consensus_observer_client); + + // Update the active subscribers + *consensus_publisher.active_subscribers.write() = active_subscribers; + + // Return the publisher + consensus_publisher + } + /// Adds the given subscriber to the set of active subscribers fn add_active_subscriber(&self, peer_network_id: PeerNetworkId) { self.active_subscribers.write().insert(peer_network_id); @@ -150,7 +170,7 @@ impl ConsensusPublisher { let (peer_network_id, message, response_sender) = network_message.into_parts(); // Update the RPC request counter - metrics::increment_request_counter( + metrics::increment_counter( &metrics::PUBLISHER_RECEIVED_REQUESTS, message.get_label(), &peer_network_id, diff --git a/consensus/src/counters.rs b/consensus/src/counters.rs index 214506e6f92..1af6f4f8c6d 100644 --- a/consensus/src/counters.rs +++ b/consensus/src/counters.rs @@ -662,9 +662,9 @@ pub static ORDER_VOTE_ADDED: Lazy = Lazy::new(|| { .unwrap() }); -pub static ORDER_VOTE_VERY_OLD: Lazy = Lazy::new(|| { +pub static ORDER_VOTE_NOT_IN_RANGE: Lazy = Lazy::new(|| { register_int_counter!( - "aptos_consensus_order_vote_very_old", + "aptos_consensus_order_vote_not_in_range", "Count of the number of order votes that are very old" ) .unwrap() diff --git a/consensus/src/dag/tests/rb_handler_tests.rs b/consensus/src/dag/tests/rb_handler_tests.rs index 87729b70783..8d3c9b2b429 100644 --- a/consensus/src/dag/tests/rb_handler_tests.rs +++ b/consensus/src/dag/tests/rb_handler_tests.rs @@ -152,7 +152,7 @@ async fn test_node_broadcast_receiver_failure() { let node_cert = NodeCertificate::new( node.metadata().clone(), validator_verifier - .aggregate_signatures(&partial_sigs) + .aggregate_signatures(partial_sigs.signatures_iter()) .unwrap(), ); let node = new_node(2, 20, signers[0].author(), vec![node_cert]); @@ -177,7 +177,7 @@ async fn test_node_broadcast_receiver_failure() { NodeCertificate::new( node.metadata().clone(), validator_verifier - .aggregate_signatures(&partial_sigs) + .aggregate_signatures(partial_sigs.signatures_iter()) .unwrap(), ) }) diff --git a/consensus/src/dag/types.rs b/consensus/src/dag/types.rs index 78b6c4f4bf9..0e4e9928dee 100644 --- a/consensus/src/dag/types.rs +++ b/consensus/src/dag/types.rs @@ -582,7 +582,7 @@ impl BroadcastStatus for Arc { let aggregated_signature = match self .epoch_state .verifier - .aggregate_signatures(partial_signatures) + .aggregate_signatures(partial_signatures.signatures_iter()) { Ok(signature) => signature, Err(_) => return Err(anyhow::anyhow!("Signature aggregation failed")), diff --git a/consensus/src/epoch_manager.rs b/consensus/src/epoch_manager.rs index da8c23ea509..a6c43221f9a 100644 --- a/consensus/src/epoch_manager.rs +++ b/consensus/src/epoch_manager.rs @@ -56,12 +56,9 @@ use crate::{ use anyhow::{anyhow, bail, ensure, Context}; use aptos_bounded_executor::BoundedExecutor; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; -use aptos_config::config::{ - ConsensusConfig, DagConsensusConfig, ExecutionConfig, NodeConfig, QcAggregatorType, -}; +use aptos_config::config::{ConsensusConfig, DagConsensusConfig, ExecutionConfig, NodeConfig}; use aptos_consensus_types::{ common::{Author, Round}, - delayed_qc_msg::DelayedQcMsg, epoch_retrieval::EpochRetrievalRequest, proof_of_store::ProofCache, utils::PayloadTxnsSize, @@ -96,11 +93,7 @@ use aptos_types::{ use aptos_validator_transaction_pool::VTxnPoolState; use fail::fail_point; use futures::{ - channel::{ - mpsc, - mpsc::{unbounded, Sender, UnboundedSender}, - oneshot, - }, + channel::{mpsc, mpsc::Sender, oneshot}, SinkExt, StreamExt, }; use itertools::Itertools; @@ -265,21 +258,13 @@ impl EpochManager

{ &self, time_service: Arc, timeout_sender: aptos_channels::Sender, - delayed_qc_tx: UnboundedSender, - qc_aggregator_type: QcAggregatorType, ) -> RoundState { let time_interval = Box::new(ExponentialTimeInterval::new( Duration::from_millis(self.config.round_initial_timeout_ms), self.config.round_timeout_backoff_exponent_base, self.config.round_timeout_backoff_max_exponent, )); - RoundState::new( - time_interval, - time_service, - timeout_sender, - delayed_qc_tx, - qc_aggregator_type, - ) + RoundState::new(time_interval, time_service, timeout_sender) } /// Create a proposer election handler based on proposers @@ -793,15 +778,10 @@ impl EpochManager

{ "Unable to initialize safety rules.", ); } - let (delayed_qc_tx, delayed_qc_rx) = unbounded(); info!(epoch = epoch, "Create RoundState"); - let round_state = self.create_round_state( - self.time_service.clone(), - self.timeout_sender.clone(), - delayed_qc_tx, - self.config.qc_aggregator_type.clone(), - ); + let round_state = + self.create_round_state(self.time_service.clone(), self.timeout_sender.clone()); info!(epoch = epoch, "Create ProposerElection"); let proposer_election = @@ -913,12 +893,7 @@ impl EpochManager

{ let (close_tx, close_rx) = oneshot::channel(); self.round_manager_close_tx = Some(close_tx); - tokio::spawn(round_manager.start( - round_manager_rx, - buffered_proposal_rx, - delayed_qc_rx, - close_rx, - )); + tokio::spawn(round_manager.start(round_manager_rx, buffered_proposal_rx, close_rx)); self.spawn_block_retrieval_task(epoch, block_store, max_blocks_allowed); } diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 87eb81e0f40..f8545073966 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -58,7 +58,6 @@ mod execution_pipeline; /// AptosNet interface. pub mod network_interface; mod payload_manager; -mod qc_aggregator; mod transaction_deduper; mod transaction_filter; mod transaction_shuffler; diff --git a/consensus/src/liveness/proposal_generator.rs b/consensus/src/liveness/proposal_generator.rs index 411d24c7ac2..334b0a76fbf 100644 --- a/consensus/src/liveness/proposal_generator.rs +++ b/consensus/src/liveness/proposal_generator.rs @@ -29,7 +29,7 @@ use aptos_consensus_types::{ }; use aptos_crypto::{hash::CryptoHash, HashValue}; use aptos_infallible::Mutex; -use aptos_logger::{error, info, sample, sample::SampleRate, warn}; +use aptos_logger::{error, sample, sample::SampleRate, warn}; use aptos_types::{on_chain_config::ValidatorTxnConfig, validator_txn::ValidatorTransaction}; use aptos_validator_transaction_pool as vtxn_pool; use futures::future::BoxFuture; @@ -203,7 +203,7 @@ impl PipelineBackpressureConfig { PROPOSER_ESTIMATED_CALIBRATED_BLOCK_TXNS.observe(calibrated_block_size as f64); // Check if calibrated block size is reduction in size, to turn on backpressure. if max_block_txns > calibrated_block_size { - info!( + warn!( block_execution_times = format!("{:?}", block_execution_times), estimated_calibrated_block_sizes = format!("{:?}", sizes), calibrated_block_size = calibrated_block_size, diff --git a/consensus/src/liveness/round_state.rs b/consensus/src/liveness/round_state.rs index ea7e6e7f5b3..74e78e9c9f0 100644 --- a/consensus/src/liveness/round_state.rs +++ b/consensus/src/liveness/round_state.rs @@ -7,18 +7,16 @@ use crate::{ pending_votes::{PendingVotes, VoteReceptionResult}, util::time_service::{SendTask, TimeService}, }; -use aptos_config::config::QcAggregatorType; use aptos_consensus_types::{ - common::Round, delayed_qc_msg::DelayedQcMsg, sync_info::SyncInfo, - timeout_2chain::TwoChainTimeoutWithPartialSignatures, vote::Vote, + common::Round, sync_info::SyncInfo, timeout_2chain::TwoChainTimeoutWithPartialSignatures, + vote::Vote, }; use aptos_crypto::HashValue; use aptos_logger::{prelude::*, Schema}; use aptos_types::{ - ledger_info::LedgerInfoWithPartialSignatures, validator_verifier::ValidatorVerifier, + ledger_info::LedgerInfoWithVerifiedSignatures, validator_verifier::ValidatorVerifier, }; use futures::future::AbortHandle; -use futures_channel::mpsc::UnboundedSender; use serde::Serialize; use std::{fmt, sync::Arc, time::Duration}; @@ -47,7 +45,7 @@ pub struct NewRoundEvent { pub round: Round, pub reason: NewRoundReason, pub timeout: Duration, - pub prev_round_votes: Vec<(HashValue, LedgerInfoWithPartialSignatures)>, + pub prev_round_votes: Vec<(HashValue, LedgerInfoWithVerifiedSignatures)>, pub prev_round_timeout_votes: Option, } @@ -163,9 +161,6 @@ pub struct RoundState { vote_sent: Option, // The handle to cancel previous timeout task when moving to next round. abort_handle: Option, - // Self sender to send delayed QC aggregation events to the round manager. - delayed_qc_tx: UnboundedSender, - qc_aggregator_type: QcAggregatorType, } #[derive(Default, Schema)] @@ -194,8 +189,6 @@ impl RoundState { time_interval: Box, time_service: Arc, timeout_sender: aptos_channels::Sender, - delayed_qc_tx: UnboundedSender, - qc_aggregator_type: QcAggregatorType, ) -> Self { // Our counters are initialized lazily, so they're not going to appear in // Prometheus if some conditions never happen. Invoking get() function enforces creation. @@ -203,11 +196,7 @@ impl RoundState { counters::TIMEOUT_ROUNDS_COUNT.get(); counters::TIMEOUT_COUNT.get(); - let pending_votes = PendingVotes::new( - time_service.clone(), - delayed_qc_tx.clone(), - qc_aggregator_type.clone(), - ); + let pending_votes = PendingVotes::new(); Self { time_interval, highest_ordered_round: 0, @@ -218,8 +207,6 @@ impl RoundState { pending_votes, vote_sent: None, abort_handle: None, - delayed_qc_tx, - qc_aggregator_type, } } @@ -262,11 +249,7 @@ impl RoundState { // Start a new round. self.current_round = new_round; - self.pending_votes = PendingVotes::new( - self.time_service.clone(), - self.delayed_qc_tx.clone(), - self.qc_aggregator_type.clone(), - ); + self.pending_votes = PendingVotes::new(); self.vote_sent = None; let timeout = self.setup_timeout(1); // The new round reason is QCReady in case both QC.round + 1 == new_round, otherwise @@ -310,16 +293,6 @@ impl RoundState { } } - pub fn process_delayed_qc_msg( - &mut self, - validator_verifier: &ValidatorVerifier, - msg: DelayedQcMsg, - ) -> VoteReceptionResult { - let DelayedQcMsg { vote } = msg; - self.pending_votes - .process_delayed_qc(validator_verifier, vote) - } - pub fn vote_sent(&self) -> Option { self.vote_sent.clone() } diff --git a/consensus/src/liveness/round_state_test.rs b/consensus/src/liveness/round_state_test.rs index 03f1d245359..ad2eec8809e 100644 --- a/consensus/src/liveness/round_state_test.rs +++ b/consensus/src/liveness/round_state_test.rs @@ -8,7 +8,6 @@ use crate::{ }, util::mock_time_service::SimulatedTimeService, }; -use aptos_config::config::QcAggregatorType; use aptos_consensus_types::{ common::Round, quorum_cert::QuorumCert, @@ -23,7 +22,6 @@ use aptos_types::{ ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, }; use futures::StreamExt; -use futures_channel::mpsc::unbounded; use std::{sync::Arc, time::Duration}; #[test] @@ -88,15 +86,8 @@ fn make_round_state() -> (RoundState, aptos_channels::Receiver) { let time_interval = Box::new(ExponentialTimeInterval::fixed(Duration::from_millis(2))); let simulated_time = SimulatedTimeService::auto_advance_until(Duration::from_millis(4)); let (timeout_tx, timeout_rx) = aptos_channels::new_test(1_024); - let (delayed_qc_tx, _) = unbounded(); ( - RoundState::new( - time_interval, - Arc::new(simulated_time), - timeout_tx, - delayed_qc_tx, - QcAggregatorType::NoDelay, - ), + RoundState::new(time_interval, Arc::new(simulated_time), timeout_tx), timeout_rx, ) } diff --git a/consensus/src/network.rs b/consensus/src/network.rs index 698e0896385..517c01fce47 100644 --- a/consensus/src/network.rs +++ b/consensus/src/network.rs @@ -346,7 +346,7 @@ impl NetworkSender { if self.author == peer { let self_msg = Event::Message(self.author, msg.clone()); if let Err(err) = self_sender.send(self_msg).await { - error!(error = ?err, "Error delivering a self msg"); + warn!(error = ?err, "Error delivering a self msg"); } continue; } diff --git a/consensus/src/payload_manager.rs b/consensus/src/payload_manager.rs index 4749efb10c6..c2e7c580fb9 100644 --- a/consensus/src/payload_manager.rs +++ b/consensus/src/payload_manager.rs @@ -471,7 +471,7 @@ async fn get_transactions_for_observer( }; // If the payload is valid, publish it to any downstream observers - let transaction_payload = block_payload.transaction_payload; + let transaction_payload = block_payload.transaction_payload(); if let Some(consensus_publisher) = consensus_publisher { let message = ConsensusObserverMessage::new_block_payload_message( block.gen_block_info(HashValue::zero(), 0, None), diff --git a/consensus/src/pending_order_votes.rs b/consensus/src/pending_order_votes.rs index 7420b565ce3..46cf23cfe2b 100644 --- a/consensus/src/pending_order_votes.rs +++ b/consensus/src/pending_order_votes.rs @@ -2,15 +2,15 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use aptos_consensus_types::{common::Author, order_vote::OrderVote}; +use aptos_consensus_types::{common::Author, order_vote::OrderVote, quorum_cert::QuorumCert}; use aptos_crypto::{hash::CryptoHash, HashValue}; use aptos_logger::prelude::*; use aptos_types::{ aggregate_signature::PartialSignatures, - ledger_info::{LedgerInfo, LedgerInfoWithPartialSignatures, LedgerInfoWithSignatures}, + ledger_info::{LedgerInfo, LedgerInfoWithSignatures, LedgerInfoWithVerifiedSignatures}, validator_verifier::{ValidatorVerifier, VerifyError}, }; -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; /// Result of the order vote processing. The failure case (Verification error) is returned /// as the Error part of the result. @@ -20,7 +20,8 @@ pub enum OrderVoteReceptionResult { /// QC currently has. VoteAdded(u128), /// This block has just been certified after adding the vote. - NewLedgerInfoWithSignatures(LedgerInfoWithSignatures), + /// Returns the created order certificate and the QC on which the order certificate is based. + NewLedgerInfoWithSignatures((Arc, LedgerInfoWithSignatures)), /// There might be some issues adding a vote ErrorAddingVote(VerifyError), /// Error happens when aggregating signature @@ -32,14 +33,16 @@ pub enum OrderVoteReceptionResult { #[derive(Debug, PartialEq, Eq)] enum OrderVoteStatus { EnoughVotes(LedgerInfoWithSignatures), - NotEnoughVotes(LedgerInfoWithPartialSignatures), + NotEnoughVotes(LedgerInfoWithVerifiedSignatures), } /// A PendingVotes structure keep track of order votes for the last few rounds pub struct PendingOrderVotes { /// Maps LedgerInfo digest to associated signatures (contained in a partial LedgerInfoWithSignatures). /// Order vote status stores caches the information on whether the votes are enough to form a QC. - li_digest_to_votes: HashMap, + /// We also store the QC that the order votes certify. + li_digest_to_votes: + HashMap, } impl PendingOrderVotes { @@ -50,29 +53,42 @@ impl PendingOrderVotes { } } + pub fn exists(&self, li_digest: &HashValue) -> bool { + self.li_digest_to_votes.contains_key(li_digest) + } + /// Add a vote to the pending votes // TODO: Should we add any counters here? pub fn insert_order_vote( &mut self, order_vote: &OrderVote, validator_verifier: &ValidatorVerifier, + verified_quorum_cert: Option, ) -> OrderVoteReceptionResult { // derive data from order vote let li_digest = order_vote.ledger_info().hash(); // obtain the ledger info with signatures associated to the order vote's ledger info - let status = self.li_digest_to_votes.entry(li_digest).or_insert_with(|| { + let (quorum_cert, status) = self.li_digest_to_votes.entry(li_digest).or_insert_with(|| { // if the ledger info with signatures doesn't exist yet, create it - OrderVoteStatus::NotEnoughVotes(LedgerInfoWithPartialSignatures::new( - order_vote.ledger_info().clone(), - PartialSignatures::empty(), - )) + ( + verified_quorum_cert.expect( + "Quorum Cert is expected when creating a new entry in pending order votes", + ), + OrderVoteStatus::NotEnoughVotes(LedgerInfoWithVerifiedSignatures::new( + order_vote.ledger_info().clone(), + PartialSignatures::empty(), + )), + ) }); match status { OrderVoteStatus::EnoughVotes(li_with_sig) => { // we already have enough votes for this ledger info - OrderVoteReceptionResult::NewLedgerInfoWithSignatures(li_with_sig.clone()) + OrderVoteReceptionResult::NewLedgerInfoWithSignatures(( + Arc::new(quorum_cert.clone()), + li_with_sig.clone(), + )) }, OrderVoteStatus::NotEnoughVotes(li_with_sig) => { // we don't have enough votes for this ledger info yet @@ -107,9 +123,10 @@ impl PendingOrderVotes { Ok(ledger_info_with_sig) => { *status = OrderVoteStatus::EnoughVotes(ledger_info_with_sig.clone()); - OrderVoteReceptionResult::NewLedgerInfoWithSignatures( + OrderVoteReceptionResult::NewLedgerInfoWithSignatures(( + Arc::new(quorum_cert.clone()), ledger_info_with_sig, - ) + )) }, Err(e) => OrderVoteReceptionResult::ErrorAggregatingSignature(e), } @@ -135,19 +152,21 @@ impl PendingOrderVotes { // Removes votes older than highest_ordered_round pub fn garbage_collect(&mut self, highest_ordered_round: u64) { - self.li_digest_to_votes.retain(|_, status| match status { - OrderVoteStatus::EnoughVotes(li_with_sig) => { - li_with_sig.ledger_info().round() > highest_ordered_round - }, - OrderVoteStatus::NotEnoughVotes(li_with_sig) => { - li_with_sig.ledger_info().round() > highest_ordered_round - }, - }); + self.li_digest_to_votes + .retain(|_, (_, status)| match status { + OrderVoteStatus::EnoughVotes(li_with_sig) => { + li_with_sig.ledger_info().round() > highest_ordered_round + }, + OrderVoteStatus::NotEnoughVotes(li_with_sig) => { + li_with_sig.ledger_info().round() > highest_ordered_round + }, + }); } pub fn has_enough_order_votes(&self, ledger_info: &LedgerInfo) -> bool { let li_digest = ledger_info.hash(); - if let Some(OrderVoteStatus::EnoughVotes(_)) = self.li_digest_to_votes.get(&li_digest) { + if let Some((_, OrderVoteStatus::EnoughVotes(_))) = self.li_digest_to_votes.get(&li_digest) + { return true; } false @@ -157,7 +176,7 @@ impl PendingOrderVotes { #[cfg(test)] mod tests { use super::{OrderVoteReceptionResult, PendingOrderVotes}; - use aptos_consensus_types::order_vote::OrderVote; + use aptos_consensus_types::{order_vote::OrderVote, quorum_cert::QuorumCert}; use aptos_crypto::HashValue; use aptos_types::{ block_info::BlockInfo, ledger_info::LedgerInfo, @@ -182,6 +201,7 @@ mod tests { // create random vote from validator[0] let li1 = random_ledger_info(); + let qc = QuorumCert::dummy(); let order_vote_1_author_0 = OrderVote::new_with_signature( signers[0].author(), li1.clone(), @@ -190,13 +210,21 @@ mod tests { // first time a new order vote is added -> OrderVoteAdded assert_eq!( - pending_order_votes.insert_order_vote(&order_vote_1_author_0, &validator), - OrderVoteReceptionResult::VoteAdded(1) + pending_order_votes.insert_order_vote( + &order_vote_1_author_0, + &validator, + Some(qc.clone()) + ), + OrderVoteReceptionResult::VoteAdded(1), ); // same author voting for the same thing -> OrderVoteAdded assert_eq!( - pending_order_votes.insert_order_vote(&order_vote_1_author_0, &validator), + pending_order_votes.insert_order_vote( + &order_vote_1_author_0, + &validator, + Some(qc.clone()) + ), OrderVoteReceptionResult::VoteAdded(1) ); @@ -208,8 +236,12 @@ mod tests { signers[1].sign(&li2).expect("Unable to sign ledger info"), ); assert_eq!( - pending_order_votes.insert_order_vote(&order_vote_2_author_1, &validator), - OrderVoteReceptionResult::VoteAdded(1) + pending_order_votes.insert_order_vote( + &order_vote_2_author_1, + &validator, + Some(qc.clone()) + ), + OrderVoteReceptionResult::VoteAdded(1), ); assert!(!pending_order_votes.has_enough_order_votes(&li1)); @@ -220,8 +252,12 @@ mod tests { li2.clone(), signers[2].sign(&li2).expect("Unable to sign ledger info"), ); - match pending_order_votes.insert_order_vote(&order_vote_2_author_2, &validator) { - OrderVoteReceptionResult::NewLedgerInfoWithSignatures(li_with_sig) => { + match pending_order_votes.insert_order_vote( + &order_vote_2_author_2, + &validator, + Some(qc.clone()), + ) { + OrderVoteReceptionResult::NewLedgerInfoWithSignatures((_, li_with_sig)) => { assert!(li_with_sig.check_voting_power(&validator).is_ok()); }, _ => { diff --git a/consensus/src/pending_votes.rs b/consensus/src/pending_votes.rs index ff8bc37a1ae..b2177d2c588 100644 --- a/consensus/src/pending_votes.rs +++ b/consensus/src/pending_votes.rs @@ -8,15 +8,9 @@ //! when enough votes (or timeout votes) have been observed. //! Votes are automatically dropped when the structure goes out of scope. -use crate::{ - counters, - qc_aggregator::{create_qc_aggregator, QcAggregator}, - util::time_service::TimeService, -}; -use aptos_config::config::QcAggregatorType; +use crate::counters; use aptos_consensus_types::{ common::Author, - delayed_qc_msg::DelayedQcMsg, quorum_cert::QuorumCert, timeout_2chain::{TwoChainTimeoutCertificate, TwoChainTimeoutWithPartialSignatures}, vote::Vote, @@ -26,10 +20,9 @@ use aptos_crypto::{hash::CryptoHash, HashValue}; use aptos_logger::prelude::*; use aptos_types::{ aggregate_signature::PartialSignatures, - ledger_info::LedgerInfoWithPartialSignatures, + ledger_info::LedgerInfoWithVerifiedSignatures, validator_verifier::{ValidatorVerifier, VerifyError}, }; -use futures_channel::mpsc::UnboundedSender; use std::{ collections::{BTreeMap, HashMap}, fmt, @@ -43,9 +36,6 @@ pub enum VoteReceptionResult { /// The vote has been added but QC has not been formed yet. Return the amount of voting power /// QC currently has. VoteAdded(u128), - /// The vote has been added and we have gather enough voting power to form the QC but we have - /// delayed the QC to aggregate as many signatures as possible. - VoteAddedQCDelayed(u128), /// The very same vote message has been processed in past. DuplicateVote, /// The very same author has already voted for another proposal in this round (equivocation). @@ -72,30 +62,23 @@ pub struct PendingVotes { /// This might keep multiple LedgerInfos for the current round: either due to different proposals (byzantine behavior) /// or due to different NIL proposals (clients can have a different view of what block to extend). li_digest_to_votes: - HashMap, + HashMap, /// Tracks all the signatures of the 2-chain timeout for the given round. maybe_partial_2chain_tc: Option, /// Map of Author to (vote, li_digest). This is useful to discard multiple votes. author_to_vote: HashMap, /// Whether we have echoed timeout for this round. echo_timeout: bool, - - qc_aggregator: Box, } impl PendingVotes { /// Creates an empty PendingVotes structure for a specific epoch and round - pub fn new( - time_service: Arc, - delayed_qc_tx: UnboundedSender, - qc_aggregator_type: QcAggregatorType, - ) -> Self { + pub fn new() -> Self { PendingVotes { li_digest_to_votes: HashMap::new(), maybe_partial_2chain_tc: None, author_to_vote: HashMap::new(), echo_timeout: false, - qc_aggregator: create_qc_aggregator(qc_aggregator_type, time_service, delayed_qc_tx), } } @@ -155,7 +138,7 @@ impl PendingVotes { // if the ledger info with signatures doesn't exist yet, create it ( len, - LedgerInfoWithPartialSignatures::new( + LedgerInfoWithVerifiedSignatures::new( vote.ledger_info().clone(), PartialSignatures::empty(), ), @@ -189,30 +172,37 @@ impl PendingVotes { li_with_sig.add_signature(vote.author(), vote.signature().clone()); // check if we have enough signatures to create a QC - let voting_power = - match validator_verifier.check_voting_power(li_with_sig.signatures().keys(), true) { - // a quorum of signature was reached, a new QC is formed - Ok(aggregated_voting_power) => { - return self.qc_aggregator.handle_aggregated_qc( - validator_verifier, - aggregated_voting_power, - vote, - li_with_sig, + let voting_power = match validator_verifier + .check_voting_power(li_with_sig.signatures().keys(), true) + { + // a quorum of signature was reached, a new QC is formed + Ok(aggregated_voting_power) => { + assert!( + aggregated_voting_power >= validator_verifier.quorum_voting_power(), + "QC aggregation should not be triggered if we don't have enough votes to form a QC" ); - }, + match li_with_sig.aggregate_signatures(validator_verifier) { + Ok(ledger_info_with_sig) => { + return VoteReceptionResult::NewQuorumCertificate(Arc::new( + QuorumCert::new(vote.vote_data().clone(), ledger_info_with_sig), + )) + }, + Err(e) => return VoteReceptionResult::ErrorAggregatingSignature(e), + } + }, - // not enough votes - Err(VerifyError::TooLittleVotingPower { voting_power, .. }) => voting_power, + // not enough votes + Err(VerifyError::TooLittleVotingPower { voting_power, .. }) => voting_power, - // error - Err(error) => { - error!( - "MUST_FIX: vote received could not be added: {}, vote: {}", - error, vote - ); - return VoteReceptionResult::ErrorAddingVote(error); - }, - }; + // error + Err(error) => { + error!( + "MUST_FIX: vote received could not be added: {}, vote: {}", + error, vote + ); + return VoteReceptionResult::ErrorAddingVote(error); + }, + }; // // 4. We couldn't form a QC, let's check if we can create a TC @@ -274,7 +264,7 @@ impl PendingVotes { pub fn aggregate_qc_now( validator_verifier: &ValidatorVerifier, - li_with_sig: &LedgerInfoWithPartialSignatures, + li_with_sig: &LedgerInfoWithVerifiedSignatures, vote_data: &VoteData, ) -> VoteReceptionResult { match li_with_sig.aggregate_signatures(validator_verifier) { @@ -327,7 +317,7 @@ impl PendingVotes { pub fn drain_votes( &mut self, ) -> ( - Vec<(HashValue, LedgerInfoWithPartialSignatures)>, + Vec<(HashValue, LedgerInfoWithVerifiedSignatures)>, Option, ) { for (hash_index, _) in self.li_digest_to_votes.values() { @@ -405,8 +395,6 @@ impl fmt::Display for PendingVotes { #[cfg(test)] mod tests { use super::{PendingVotes, VoteReceptionResult}; - use crate::util::mock_time_service::SimulatedTimeService; - use aptos_config::config::QcAggregatorType; use aptos_consensus_types::{ block::block_test_utils::certificate_for_genesis, vote::Vote, vote_data::VoteData, }; @@ -415,9 +403,7 @@ mod tests { block_info::BlockInfo, ledger_info::LedgerInfo, validator_verifier::random_validator_verifier, }; - use futures_channel::mpsc::unbounded; use itertools::Itertools; - use std::sync::Arc; /// Creates a random ledger info for epoch 1 and round 1. fn random_ledger_info() -> LedgerInfo { @@ -440,12 +426,7 @@ mod tests { // set up 4 validators let (signers, validator) = random_validator_verifier(4, Some(2), false); - let (delayed_qc_tx, _) = unbounded(); - let mut pending_votes = PendingVotes::new( - Arc::new(SimulatedTimeService::new()), - delayed_qc_tx, - QcAggregatorType::NoDelay, - ); + let mut pending_votes = PendingVotes::new(); // create random vote from validator[0] let li1 = random_ledger_info(); @@ -512,12 +493,7 @@ mod tests { // set up 4 validators let (signers, validator) = random_validator_verifier(4, None, false); - let (delayed_qc_tx, _) = unbounded(); - let mut pending_votes = PendingVotes::new( - Arc::new(SimulatedTimeService::new()), - delayed_qc_tx, - QcAggregatorType::NoDelay, - ); + let mut pending_votes = PendingVotes::new(); // submit a new vote from validator[0] -> VoteAdded let li0 = random_ledger_info(); diff --git a/consensus/src/pipeline/buffer_item.rs b/consensus/src/pipeline/buffer_item.rs index f44cf291c04..4854574575a 100644 --- a/consensus/src/pipeline/buffer_item.rs +++ b/consensus/src/pipeline/buffer_item.rs @@ -16,7 +16,7 @@ use aptos_reliable_broadcast::DropGuard; use aptos_types::{ aggregate_signature::PartialSignatures, block_info::BlockInfo, - ledger_info::{LedgerInfo, LedgerInfoWithPartialSignatures, LedgerInfoWithSignatures}, + ledger_info::{LedgerInfo, LedgerInfoWithSignatures, LedgerInfoWithVerifiedSignatures}, validator_verifier::ValidatorVerifier, }; use futures::future::BoxFuture; @@ -68,7 +68,7 @@ fn generate_executed_item_from_ordered( order_vote_enabled: bool, ) -> BufferItem { debug!("{} advance to executed from ordered", commit_info); - let partial_commit_proof = LedgerInfoWithPartialSignatures::new( + let partial_commit_proof = LedgerInfoWithVerifiedSignatures::new( generate_commit_ledger_info(&commit_info, &ordered_proof, order_vote_enabled), verified_signatures, ); @@ -87,7 +87,7 @@ fn aggregate_commit_proof( validator: &ValidatorVerifier, ) -> LedgerInfoWithSignatures { let aggregated_sig = validator - .aggregate_signatures(verified_signatures) + .aggregate_signatures(verified_signatures.signatures_iter()) .expect("Failed to generate aggregated signature"); LedgerInfoWithSignatures::new(commit_ledger_info.clone(), aggregated_sig) } @@ -106,7 +106,7 @@ pub struct OrderedItem { pub struct ExecutedItem { pub executed_blocks: Vec, - pub partial_commit_proof: LedgerInfoWithPartialSignatures, + pub partial_commit_proof: LedgerInfoWithVerifiedSignatures, pub callback: StateComputerCommitCallBackType, pub commit_info: BlockInfo, pub ordered_proof: LedgerInfoWithSignatures, @@ -114,7 +114,7 @@ pub struct ExecutedItem { pub struct SignedItem { pub executed_blocks: Vec, - pub partial_commit_proof: LedgerInfoWithPartialSignatures, + pub partial_commit_proof: LedgerInfoWithVerifiedSignatures, pub callback: StateComputerCommitCallBackType, pub commit_vote: CommitVote, pub rb_handle: Option<(Instant, DropGuard)>, @@ -146,9 +146,10 @@ impl BufferItem { ordered_blocks: Vec, ordered_proof: LedgerInfoWithSignatures, callback: StateComputerCommitCallBackType, + unverified_signatures: PartialSignatures, ) -> Self { Self::Ordered(Box::new(OrderedItem { - unverified_signatures: PartialSignatures::empty(), + unverified_signatures, commit_proof: None, callback, ordered_blocks, diff --git a/consensus/src/pipeline/buffer_manager.rs b/consensus/src/pipeline/buffer_manager.rs index b3ebe706f60..38d5aa85788 100644 --- a/consensus/src/pipeline/buffer_manager.rs +++ b/consensus/src/pipeline/buffer_manager.rs @@ -28,6 +28,7 @@ use aptos_bounded_executor::BoundedExecutor; use aptos_config::config::ConsensusObserverConfig; use aptos_consensus_types::{ common::{Author, Round}, + pipeline::commit_vote::CommitVote, pipelined_block::PipelinedBlock, }; use aptos_crypto::HashValue; @@ -37,8 +38,8 @@ use aptos_network::protocols::{rpc::error::RpcError, wire::handshake::v1::Protoc use aptos_reliable_broadcast::{DropGuard, ReliableBroadcast}; use aptos_time_service::TimeService; use aptos_types::{ - account_address::AccountAddress, epoch_change::EpochChangeProof, epoch_state::EpochState, - ledger_info::LedgerInfoWithSignatures, + account_address::AccountAddress, aggregate_signature::PartialSignatures, + epoch_change::EpochChangeProof, epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures, }; use bytes::Bytes; use futures::{ @@ -51,7 +52,7 @@ use futures::{ }; use once_cell::sync::OnceCell; use std::{ - collections::BTreeMap, + collections::{BTreeMap, HashMap}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, Arc, @@ -164,6 +165,11 @@ pub struct BufferManager { consensus_publisher: Option>, pending_commit_proofs: BTreeMap, + + max_pending_rounds_in_commit_vote_cache: u64, + // If the buffer manager receives a commit vote for a block that is not in buffer items, then + // the vote will be cached. We can cache upto max_pending_rounds_in_commit_vote_cache (100) blocks. + pending_commit_votes: BTreeMap>, } impl BufferManager { @@ -194,6 +200,7 @@ impl BufferManager { highest_committed_round: Round, consensus_observer_config: ConsensusObserverConfig, consensus_publisher: Option>, + max_pending_rounds_in_commit_vote_cache: u64, ) -> Self { let buffer = Buffer::::new(); @@ -257,6 +264,9 @@ impl BufferManager { consensus_publisher, pending_commit_proofs: BTreeMap::new(), + + max_pending_rounds_in_commit_vote_cache, + pending_commit_votes: BTreeMap::new(), } } @@ -333,6 +343,30 @@ impl BufferManager { } } + fn try_add_pending_commit_vote(&mut self, vote: CommitVote) -> bool { + let block_id = vote.commit_info().id(); + let round = vote.commit_info().round(); + + // Store the commit vote only if it is for one of the next 100 rounds. + if round > self.highest_committed_round + && self.highest_committed_round + self.max_pending_rounds_in_commit_vote_cache > round + { + self.pending_commit_votes + .entry(round) + .or_default() + .insert(vote.author(), vote); + true + } else { + debug!( + round = round, + highest_committed_round = self.highest_committed_round, + block_id = block_id, + "Received a commit vote not in the next 100 rounds, ignored." + ); + false + } + } + fn drain_pending_commit_proof_till( &mut self, round: Round, @@ -381,7 +415,23 @@ impl BufferManager { .await .expect("Failed to send execution schedule request"); - let item = BufferItem::new_ordered(ordered_blocks, ordered_proof, callback); + let mut unverified_signatures = PartialSignatures::empty(); + if let Some(block) = ordered_blocks.last() { + if let Some(votes) = self.pending_commit_votes.remove(&block.round()) { + votes + .values() + .filter(|vote| vote.commit_info().id() == block.id()) + .for_each(|vote| { + unverified_signatures.add_signature(vote.author(), vote.signature().clone()) + }); + } + } + let item = BufferItem::new_ordered( + ordered_blocks, + ordered_proof, + callback, + unverified_signatures, + ); self.buffer.push_back(item); } @@ -708,7 +758,7 @@ impl BufferManager { // find the corresponding item let author = vote.author(); let commit_info = vote.commit_info().clone(); - info!("Receive commit vote {} from {}", commit_info, author); + trace!("Receive commit vote {} from {}", commit_info, author); let target_block_id = vote.commit_info().id(); let current_cursor = self .buffer @@ -741,6 +791,8 @@ impl BufferManager { } else { return None; } + } else if self.try_add_pending_commit_vote(vote) { + reply_ack(protocol, response_sender); } else { reply_nack(protocol, response_sender); // TODO: send_commit_vote() doesn't care about the response and this should be direct send not RPC } @@ -944,6 +996,7 @@ impl BufferManager { }, Some(Ok(round)) = self.persisting_phase_rx.next() => { // see where `need_backpressure()` is called. + self.pending_commit_votes.retain(|rnd, _| *rnd > round); self.highest_committed_round = round }, Some(rpc_request) = verified_commit_msg_rx.next() => { diff --git a/consensus/src/pipeline/decoupled_execution_utils.rs b/consensus/src/pipeline/decoupled_execution_utils.rs index 039834497bc..8178d871e7e 100644 --- a/consensus/src/pipeline/decoupled_execution_utils.rs +++ b/consensus/src/pipeline/decoupled_execution_utils.rs @@ -44,6 +44,7 @@ pub fn prepare_phases_and_buffer_manager( highest_committed_round: u64, consensus_observer_config: ConsensusObserverConfig, consensus_publisher: Option>, + max_pending_rounds_in_commit_vote_cache: u64, ) -> ( PipelinePhase, PipelinePhase, @@ -134,6 +135,7 @@ pub fn prepare_phases_and_buffer_manager( highest_committed_round, consensus_observer_config, consensus_publisher, + max_pending_rounds_in_commit_vote_cache, ), ) } diff --git a/consensus/src/pipeline/execution_client.rs b/consensus/src/pipeline/execution_client.rs index 9228c2dcaed..9d50fe08e4a 100644 --- a/consensus/src/pipeline/execution_client.rs +++ b/consensus/src/pipeline/execution_client.rs @@ -282,6 +282,8 @@ impl ExecutionProxyClient { highest_committed_round, consensus_observer_config, consensus_publisher, + self.consensus_config + .max_pending_rounds_in_commit_vote_cache, ); tokio::spawn(execution_schedule_phase.start()); diff --git a/consensus/src/pipeline/tests/buffer_manager_tests.rs b/consensus/src/pipeline/tests/buffer_manager_tests.rs index d8ca6523d1c..9ef9ed94600 100644 --- a/consensus/src/pipeline/tests/buffer_manager_tests.rs +++ b/consensus/src/pipeline/tests/buffer_manager_tests.rs @@ -161,6 +161,7 @@ pub fn prepare_buffer_manager( 0, ConsensusObserverConfig::default(), None, + 100, ); ( diff --git a/consensus/src/qc_aggregator.rs b/consensus/src/qc_aggregator.rs deleted file mode 100644 index 2f695c65192..00000000000 --- a/consensus/src/qc_aggregator.rs +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright © Aptos Foundation -// Parts of the project are originally copyright © Meta Platforms, Inc. -// SPDX-License-Identifier: Apache-2.0 - -use crate::{ - pending_votes::{PendingVotes, VoteReceptionResult}, - util::time_service::TimeService, -}; -use aptos_config::config::{DelayedQcAggregatorConfig, QcAggregatorType}; -use aptos_consensus_types::{delayed_qc_msg::DelayedQcMsg, vote::Vote}; -use aptos_logger::{error, info}; -use aptos_types::{ - ledger_info::LedgerInfoWithPartialSignatures, validator_verifier::ValidatorVerifier, -}; -use futures::SinkExt; -use futures_channel::mpsc::UnboundedSender; -use std::{sync::Arc, time::Duration}; -use tokio::time::sleep; - -pub trait QcAggregator: Send + Sync { - fn handle_aggregated_qc( - &mut self, - validator_verifier: &ValidatorVerifier, - aggregated_voting_power: u128, - vote: &Vote, - li_with_sig: &LedgerInfoWithPartialSignatures, - ) -> VoteReceptionResult; -} - -struct NoDelayQcAggregator {} - -pub fn create_qc_aggregator( - qc_aggregator_type: QcAggregatorType, - time_service: Arc, - delayed_qc_tx: UnboundedSender, -) -> Box { - match qc_aggregator_type { - QcAggregatorType::NoDelay => Box::new(NoDelayQcAggregator {}), - QcAggregatorType::Delayed(delay_config) => { - let DelayedQcAggregatorConfig { - max_delay_after_round_start_ms, - aggregated_voting_power_pct_to_wait, - pct_delay_after_qc_aggregated, - } = delay_config; - Box::new(DelayedQcAggregator::new( - Duration::from_millis(max_delay_after_round_start_ms), - aggregated_voting_power_pct_to_wait, - pct_delay_after_qc_aggregated, - time_service, - delayed_qc_tx, - )) - }, - } -} - -impl QcAggregator for NoDelayQcAggregator { - fn handle_aggregated_qc( - &mut self, - validator_verifier: &ValidatorVerifier, - aggregated_voting_power: u128, - vote: &Vote, - li_with_sig: &LedgerInfoWithPartialSignatures, - ) -> VoteReceptionResult { - assert!( - aggregated_voting_power >= validator_verifier.quorum_voting_power(), - "QC aggregation should not be triggered if we don't have enough votes to form a QC" - ); - PendingVotes::aggregate_qc_now(validator_verifier, li_with_sig, vote.vote_data()) - } -} - -struct DelayedQcAggregator { - round_start_time: Duration, - max_delay_after_round_start: Duration, - aggregated_voting_power_pct_to_wait: usize, - pct_delay_after_qc_aggregated: usize, - time_service: Arc, - // True, if we already have enough vote to aggregate a QC, but we have trigged a delayed QC - // aggregation event to collect as many votes as possible. - qc_aggregation_delayed: bool, - // To send delayed QC aggregation events to the round manager. - delayed_qc_tx: UnboundedSender, -} - -impl DelayedQcAggregator { - pub fn new( - max_delay_after_round_start: Duration, - aggregated_voting_power_pct_to_wait: usize, - pct_delay_after_qc_aggregated: usize, - time_service: Arc, - delayed_qc_tx: UnboundedSender, - ) -> Self { - let round_start_time = time_service.get_current_timestamp(); - Self { - round_start_time, - max_delay_after_round_start, - aggregated_voting_power_pct_to_wait, - pct_delay_after_qc_aggregated, - time_service, - qc_aggregation_delayed: false, - delayed_qc_tx, - } - } -} - -impl QcAggregator for DelayedQcAggregator { - fn handle_aggregated_qc( - &mut self, - validator_verifier: &ValidatorVerifier, - aggregated_voting_power: u128, - vote: &Vote, - li_with_sig: &LedgerInfoWithPartialSignatures, - ) -> VoteReceptionResult { - assert!( - aggregated_voting_power >= validator_verifier.quorum_voting_power(), - "QC aggregation should not be triggered if we don't have enough votes to form a QC" - ); - let current_time = self.time_service.get_current_timestamp(); - - // If we have reached the aggregated voting power threshold, we should aggregate the QC now. - if aggregated_voting_power - >= self.aggregated_voting_power_pct_to_wait as u128 - * validator_verifier.total_voting_power() - / 100 - { - // Voting power is u128 so there is no overflow here. - info!( - "QC aggregation triggered by aggregated voting power: {}", - aggregated_voting_power - ); - return PendingVotes::aggregate_qc_now( - validator_verifier, - li_with_sig, - vote.vote_data(), - ); - } - - // If we have not reached the aggregated voting power threshold and have - // already triggered a delayed QC aggregation event, we should not trigger another - // one. - if self.qc_aggregation_delayed { - return VoteReceptionResult::VoteAddedQCDelayed(aggregated_voting_power); - } - - let time_since_round_start = current_time - self.round_start_time; - if time_since_round_start >= self.max_delay_after_round_start { - info!( - "QC aggregation triggered by time: {} ms", - time_since_round_start.as_millis() - ); - return PendingVotes::aggregate_qc_now( - validator_verifier, - li_with_sig, - vote.vote_data(), - ); - } - - let wait_time = (self.max_delay_after_round_start - time_since_round_start) - .min(time_since_round_start * self.pct_delay_after_qc_aggregated as u32 / 100); - - let delayed_qc_event = DelayedQcMsg::new(vote.clone()); - self.qc_aggregation_delayed = true; - - let mut delayed_qc_sender = self.delayed_qc_tx.clone(); - - info!( - "QC aggregation delayed by {} ms, wait time: {} ms", - time_since_round_start.as_millis(), - wait_time.as_millis() - ); - - tokio::spawn(async move { - sleep(wait_time).await; - if let Err(e) = delayed_qc_sender.send(delayed_qc_event).await { - error!("Failed to send event to round manager {:?}", e); - } - }); - - VoteReceptionResult::VoteAddedQCDelayed(aggregated_voting_power) - } -} diff --git a/consensus/src/quorum_store/batch_proof_queue.rs b/consensus/src/quorum_store/batch_proof_queue.rs index 382625eb246..cfff3bb9c70 100644 --- a/consensus/src/quorum_store/batch_proof_queue.rs +++ b/consensus/src/quorum_store/batch_proof_queue.rs @@ -310,6 +310,9 @@ impl BatchProofQueue { self.author_to_batches .get_mut(&item.info.author()) .map(|queue| queue.remove(&BatchSortKey::from_info(&item.info))); + counters::GARBAGE_COLLECTED_IN_PROOF_QUEUE_COUNTER + .with_label_values(&["expired_batch_without_proof"]) + .inc(); false } }); @@ -625,6 +628,11 @@ impl BatchProofQueue { "Decreasing block timestamp" ); self.latest_block_timestamp = block_timestamp; + if let Some(time_lag) = aptos_infallible::duration_since_epoch() + .checked_sub(Duration::from_micros(block_timestamp)) + { + counters::TIME_LAG_IN_BATCH_PROOF_QUEUE.observe_duration(time_lag); + } let expired = self.expirations.expire(block_timestamp); let mut num_expired_but_not_committed = 0; @@ -653,6 +661,9 @@ impl BatchProofQueue { } } self.dec_remaining_proofs(&batch.author(), batch.num_txns()); + counters::GARBAGE_COLLECTED_IN_PROOF_QUEUE_COUNTER + .with_label_values(&["expired_proof"]) + .inc(); } claims::assert_some!(self.items.remove(&key.batch_key)); } @@ -754,6 +765,9 @@ impl BatchProofQueue { insertion_time.elapsed().as_secs_f64(), ); self.dec_remaining_proofs(&batch.author(), batch.num_txns()); + counters::GARBAGE_COLLECTED_IN_PROOF_QUEUE_COUNTER + .with_label_values(&["committed_proof"]) + .inc(); } let item = self .items @@ -773,7 +787,13 @@ impl BatchProofQueue { }; } } + } else if !item.is_committed() { + counters::GARBAGE_COLLECTED_IN_PROOF_QUEUE_COUNTER + .with_label_values(&["committed_batch_without_proof"]) + .inc(); } + // The item is just marked committed for now. + // When the batch is expired, then it will be removed from items. item.mark_committed(); } else { let batch_sort_key = BatchSortKey::from_info(batch.info()); diff --git a/consensus/src/quorum_store/counters.rs b/consensus/src/quorum_store/counters.rs index 858dbdf60b5..0be7db69d78 100644 --- a/consensus/src/quorum_store/counters.rs +++ b/consensus/src/quorum_store/counters.rs @@ -809,6 +809,15 @@ pub static EMPTY_BATCH_CREATION_DURATION: Lazy = Lazy::new(|| ) }); +pub static GARBAGE_COLLECTED_IN_PROOF_QUEUE_COUNTER: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "quorum_store_garbage_collected_batch_count", + "Count of the number of garbage collected batches.", + &["reason"] + ) + .unwrap() +}); + /// Histogram of the time it takes to compute bucketed batches after txns are pulled from mempool. pub static BATCH_CREATION_COMPUTE_LATENCY: Lazy = Lazy::new(|| { DurationHistogram::new( @@ -859,6 +868,16 @@ pub static QUORUM_STORE_MSG_COUNT: Lazy = Lazy::new(|| { .unwrap() }); +pub static TIME_LAG_IN_BATCH_PROOF_QUEUE: Lazy = Lazy::new(|| { + DurationHistogram::new( + register_histogram!( + "quorum_store_time_lag_in_proof_queue", + "Time lag between txn timestamp and current time when txn is added to proof queue", + ) + .unwrap(), + ) +}); + /// Number of validators for which we received signed replies pub static BATCH_RECEIVED_REPLIES_COUNT: Lazy = Lazy::new(|| { register_histogram!( diff --git a/consensus/src/quorum_store/proof_coordinator.rs b/consensus/src/quorum_store/proof_coordinator.rs index f13a898ba4a..73a1ebabe9c 100644 --- a/consensus/src/quorum_store/proof_coordinator.rs +++ b/consensus/src/quorum_store/proof_coordinator.rs @@ -14,9 +14,7 @@ use aptos_consensus_types::proof_of_store::{ }; use aptos_crypto::bls12381; use aptos_logger::prelude::*; -use aptos_types::{ - aggregate_signature::PartialSignatures, validator_verifier::ValidatorVerifier, PeerId, -}; +use aptos_types::{validator_verifier::ValidatorVerifier, PeerId}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, sync::Arc, @@ -122,9 +120,7 @@ impl IncrementalProofState { } self.completed = true; - match validator_verifier - .aggregate_signatures(&PartialSignatures::new(self.aggregated_signature.clone())) - { + match validator_verifier.aggregate_signatures(self.aggregated_signature.iter()) { Ok(sig) => ProofOfStore::new(self.info.clone(), sig), Err(e) => unreachable!("Cannot aggregate signatures on digest err = {:?}", e), } diff --git a/consensus/src/quorum_store/proof_manager.rs b/consensus/src/quorum_store/proof_manager.rs index 6a278c5b522..a33e0c11652 100644 --- a/consensus/src/quorum_store/proof_manager.rs +++ b/consensus/src/quorum_store/proof_manager.rs @@ -77,8 +77,7 @@ impl ProofManager { batch_summaries: Vec<(BatchInfo, Vec)>, ) { self.batch_proof_queue.insert_batches(batch_summaries); - (self.remaining_total_txn_num, self.remaining_total_proof_num) = - self.batch_proof_queue.remaining_txns_and_proofs(); + self.update_remaining_txns_and_proofs(); } pub(crate) fn handle_commit_notification( diff --git a/consensus/src/quorum_store/tests/batch_requester_test.rs b/consensus/src/quorum_store/tests/batch_requester_test.rs index e9975b35eae..018fce0c486 100644 --- a/consensus/src/quorum_store/tests/batch_requester_test.rs +++ b/consensus/src/quorum_store/tests/batch_requester_test.rs @@ -149,7 +149,7 @@ fn create_ledger_info_with_timestamp( ValidatorVerifier::new_with_quorum_voting_power(validator_infos, NUM_SIGNERS as u128) .expect("Incorrect quorum size."); let aggregated_signature = validator_verifier - .aggregate_signatures(&partial_signature) + .aggregate_signatures(partial_signature.signatures_iter()) .unwrap(); let ledger_info_with_signatures = LedgerInfoWithSignatures::new(ledger_info, aggregated_signature); diff --git a/consensus/src/rand/rand_gen/reliable_broadcast_state.rs b/consensus/src/rand/rand_gen/reliable_broadcast_state.rs index 3639c523f2d..d7d0d464bfc 100644 --- a/consensus/src/rand/rand_gen/reliable_broadcast_state.rs +++ b/consensus/src/rand/rand_gen/reliable_broadcast_state.rs @@ -58,7 +58,7 @@ impl BroadcastStatus, RandMessag let aggregated_signature = self .epoch_state .verifier - .aggregate_signatures(&parital_signatures_guard) + .aggregate_signatures(parital_signatures_guard.signatures_iter()) .expect("Signature aggregation should succeed"); CertifiedAugData::new(self.aug_data.clone(), aggregated_signature) }); diff --git a/consensus/src/round_manager.rs b/consensus/src/round_manager.rs index 748d01f29ad..f423d93d1e0 100644 --- a/consensus/src/round_manager.rs +++ b/consensus/src/round_manager.rs @@ -9,8 +9,8 @@ use crate::{ }, counters::{ self, ORDER_CERT_CREATED_WITHOUT_BLOCK_IN_BLOCK_STORE, ORDER_VOTE_ADDED, - ORDER_VOTE_BROADCASTED, ORDER_VOTE_OTHER_ERRORS, ORDER_VOTE_VERY_OLD, PROPOSAL_VOTE_ADDED, - PROPOSAL_VOTE_BROADCASTED, PROPOSED_VTXN_BYTES, PROPOSED_VTXN_COUNT, + ORDER_VOTE_BROADCASTED, ORDER_VOTE_NOT_IN_RANGE, ORDER_VOTE_OTHER_ERRORS, + PROPOSAL_VOTE_ADDED, PROPOSAL_VOTE_BROADCASTED, PROPOSED_VTXN_BYTES, PROPOSED_VTXN_COUNT, QC_AGGREGATED_FROM_VOTES, SYNC_INFO_RECEIVED_WITH_NEWER_CERT, }, error::{error_kind, VerifyError}, @@ -39,7 +39,6 @@ use aptos_consensus_types::{ block::Block, block_data::BlockType, common::{Author, Round}, - delayed_qc_msg::DelayedQcMsg, order_vote_msg::OrderVoteMsg, proof_of_store::{ProofCache, ProofOfStoreMsg, SignedBatchInfoMsg}, proposal_msg::ProposalMsg, @@ -51,7 +50,7 @@ use aptos_consensus_types::{ vote_msg::VoteMsg, wrapped_ledger_info::WrappedLedgerInfo, }; -use aptos_crypto::HashValue; +use aptos_crypto::{hash::CryptoHash, HashValue}; use aptos_infallible::{checked, Mutex}; use aptos_logger::prelude::*; #[cfg(test)] @@ -70,7 +69,6 @@ use aptos_types::{ }; use fail::fail_point; use futures::{channel::oneshot, stream::FuturesUnordered, Future, FutureExt, StreamExt}; -use futures_channel::mpsc::UnboundedReceiver; use lru::LruCache; use serde::Serialize; use std::{mem::Discriminant, pin::Pin, sync::Arc, time::Duration}; @@ -555,20 +553,28 @@ impl RoundManager { block_parent_hash = proposal_msg.proposal().quorum_cert().certified_block().id(), ); - ensure!( - self.ensure_round_and_sync_up( + let in_correct_round = self + .ensure_round_and_sync_up( proposal_msg.proposal().round(), proposal_msg.sync_info(), proposal_msg.proposer(), ) .await - .context("[RoundManager] Process proposal")?, - "Stale proposal {}, current round {}", - proposal_msg.proposal(), - self.round_state.current_round() - ); - - self.process_proposal(proposal_msg.take_proposal()).await + .context("[RoundManager] Process proposal")?; + if in_correct_round { + self.process_proposal(proposal_msg.take_proposal()).await + } else { + sample!( + SampleRate::Duration(Duration::from_secs(30)), + warn!( + "[sampled] Stale proposal {}, current round {}", + proposal_msg.proposal(), + self.round_state.current_round() + ) + ); + counters::ERROR_COUNT.inc(); + Ok(()) + } } pub async fn process_delayed_proposal_msg(&mut self, proposal: Block) -> anyhow::Result<()> { @@ -583,25 +589,6 @@ impl RoundManager { self.process_verified_proposal(proposal).await } - pub async fn process_delayed_qc_msg(&mut self, msg: DelayedQcMsg) -> anyhow::Result<()> { - ensure!( - msg.vote.vote_data().proposed().round() == self.round_state.current_round(), - "Discarding stale delayed QC for round {}, current round {}", - msg.vote.vote_data().proposed().round(), - self.round_state.current_round() - ); - let vote = msg.vote().clone(); - let vote_reception_result = self - .round_state - .process_delayed_qc_msg(&self.epoch_state.verifier, msg); - trace!( - "Received delayed QC message and vote reception result is {:?}", - vote_reception_result - ); - self.process_vote_reception_result(&vote, vote_reception_result) - .await - } - /// Sync to the sync info sending from peer if it has newer certificates. async fn sync_up(&mut self, sync_info: &SyncInfo, author: Author) -> anyhow::Result<()> { let local_sync_info = self.block_store.sync_info(); @@ -1082,8 +1069,6 @@ impl RoundManager { }); let order_vote = order_vote_msg.order_vote(); - self.new_qc_from_order_vote_msg(&order_vote_msg).await?; - debug!( self.new_log(LogEvent::ReceiveOrderVote) .remote_peer(order_vote.author()), @@ -1099,21 +1084,56 @@ impl RoundManager { return Ok(()); } - if order_vote_msg.order_vote().ledger_info().round() - > self.block_store.sync_info().highest_ordered_round() + let highest_ordered_round = self.block_store.sync_info().highest_ordered_round(); + let order_vote_round = order_vote_msg.order_vote().ledger_info().round(); + let li_digest = order_vote_msg.order_vote().ledger_info().hash(); + if order_vote_round > highest_ordered_round + && order_vote_round < highest_ordered_round + 100 { - let vote_reception_result = self - .pending_order_votes - .insert_order_vote(order_vote_msg.order_vote(), &self.epoch_state.verifier); - self.process_order_vote_reception_result(vote_reception_result) - .await?; + // If it is the first order vote received for the block, verify the QC and insert along with QC. + // For the subsequent order votes for the same block, we don't have to verify the QC. Just inserting the + // order vote is enough. + let vote_reception_result = if !self.pending_order_votes.exists(&li_digest) { + let start = Instant::now(); + order_vote_msg + .quorum_cert() + .verify(&self.epoch_state().verifier) + .context("[OrderVoteMsg QuorumCert verification failed")?; + counters::VERIFY_MSG + .with_label_values(&["order_vote_qc"]) + .observe(start.elapsed().as_secs_f64()); + self.pending_order_votes.insert_order_vote( + order_vote_msg.order_vote(), + &self.epoch_state.verifier, + Some(order_vote_msg.quorum_cert().clone()), + ) + } else { + self.pending_order_votes.insert_order_vote( + order_vote_msg.order_vote(), + &self.epoch_state.verifier, + None, + ) + }; + self.process_order_vote_reception_result( + vote_reception_result, + order_vote_msg.order_vote().author(), + ) + .await?; } else { - ORDER_VOTE_VERY_OLD.inc(); - info!( - "Received old order vote. Order vote round: {:?}, Highest ordered round: {:?}", + ORDER_VOTE_NOT_IN_RANGE.inc(); + sample!( + SampleRate::Duration(Duration::from_secs(1)), + info!( + "[sampled] Received an order vote not in the 100 rounds. Order vote round: {:?}, Highest ordered round: {:?}", + order_vote_msg.order_vote().ledger_info().round(), + self.block_store.sync_info().highest_ordered_round() + ) + ); + debug!( + "Received an order vote not in the next 100 rounds. Order vote round: {:?}, Highest ordered round: {:?}", order_vote_msg.order_vote().ledger_info().round(), self.block_store.sync_info().highest_ordered_round() - ); + ) } } Ok(()) @@ -1289,9 +1309,7 @@ impl RoundManager { PROPOSAL_VOTE_ADDED.inc(); Ok(()) }, - VoteReceptionResult::VoteAddedQCDelayed(_) - | VoteReceptionResult::EchoTimeout(_) - | VoteReceptionResult::DuplicateVote => Ok(()), + VoteReceptionResult::EchoTimeout(_) | VoteReceptionResult::DuplicateVote => Ok(()), e => Err(anyhow::anyhow!("{:?}", e)), } } @@ -1299,13 +1317,18 @@ impl RoundManager { async fn process_order_vote_reception_result( &mut self, result: OrderVoteReceptionResult, + preferred_peer: Author, ) -> anyhow::Result<()> { match result { - OrderVoteReceptionResult::NewLedgerInfoWithSignatures(ledger_info_with_signatures) => { - self.new_ordered_cert(WrappedLedgerInfo::new( - VoteData::dummy(), - ledger_info_with_signatures, - )) + OrderVoteReceptionResult::NewLedgerInfoWithSignatures(( + verified_qc, + ledger_info_with_signatures, + )) => { + self.new_ordered_cert( + WrappedLedgerInfo::new(VoteData::dummy(), ledger_info_with_signatures), + verified_qc, + preferred_peer, + ) .await }, OrderVoteReceptionResult::VoteAdded(_) => { @@ -1335,49 +1358,61 @@ impl RoundManager { async fn new_qc_from_order_vote_msg( &mut self, - order_vote_msg: &OrderVoteMsg, + verified_qc: Arc, + preferred_peer: Author, ) -> anyhow::Result<()> { - if let NeedFetchResult::QCAlreadyExist = self + match self .block_store - .need_fetch_for_quorum_cert(order_vote_msg.quorum_cert()) + .need_fetch_for_quorum_cert(verified_qc.as_ref()) { - return Ok(()); + NeedFetchResult::QCAlreadyExist => Ok(()), + NeedFetchResult::QCBlockExist => { + // If the block is already in the block store, but QC isn't available in the block store, insert QC. + let result = self + .block_store + .insert_quorum_cert( + verified_qc.as_ref(), + &mut self.create_block_retriever(preferred_peer), + ) + .await + .context("[RoundManager] Failed to process the QC from order vote msg"); + self.process_certificates().await?; + result + }, + NeedFetchResult::NeedFetch => { + // If the block doesn't exist, we could ideally do sync up based on the qc. + // But this could trigger fetching a lot of past blocks in case the node is lagging behind. + // So, we just log a warning here to avoid a long sequence of block fetchs. + // One of the subsequence syncinfo messages will trigger the block fetch or state sync if required. + ORDER_CERT_CREATED_WITHOUT_BLOCK_IN_BLOCK_STORE.inc(); + sample!( + SampleRate::Duration(Duration::from_millis(200)), + info!( + "Ordered certificate created without block in block store: {:?}", + verified_qc.certified_block() + ); + ); + Err(anyhow::anyhow!( + "Ordered certificate created without block in block store" + )) + }, + NeedFetchResult::QCRoundBeforeRoot => { + Err(anyhow::anyhow!("Ordered certificate is old")) + }, } - - let start = Instant::now(); - order_vote_msg - .quorum_cert() - .verify(&self.epoch_state().verifier) - .context("[OrderVoteMsg QuorumCert verification failed")?; - counters::VERIFY_MSG - .with_label_values(&["order_vote_qc"]) - .observe(start.elapsed().as_secs_f64()); - - let result = self - .block_store - .insert_quorum_cert( - order_vote_msg.quorum_cert(), - &mut self.create_block_retriever(order_vote_msg.order_vote().author()), - ) - .await - .context("[RoundManager] Failed to process the QC from order vote msg"); - self.process_certificates().await?; - result } // Insert ordered certificate formed by aggregating order votes - async fn new_ordered_cert(&mut self, ordered_cert: WrappedLedgerInfo) -> anyhow::Result<()> { - if self - .block_store - .get_block(ordered_cert.commit_info().id()) - .is_none() - { - ORDER_CERT_CREATED_WITHOUT_BLOCK_IN_BLOCK_STORE.inc(); - error!( - "Ordered certificate created without block in block store: {:?}", - ordered_cert - ); - } + async fn new_ordered_cert( + &mut self, + ordered_cert: WrappedLedgerInfo, + verified_qc: Arc, + preferred_peer: Author, + ) -> anyhow::Result<()> { + self.new_qc_from_order_vote_msg(verified_qc, preferred_peer) + .await?; + + // If the block and qc now exist in the quorum store, insert the ordered cert let result = self .block_store .insert_ordered_cert(&ordered_cert) @@ -1453,7 +1488,6 @@ impl RoundManager { (Author, VerifiedEvent), >, mut buffered_proposal_rx: aptos_channel::Receiver, - mut delayed_qc_rx: UnboundedReceiver, close_rx: oneshot::Receiver>, ) { info!(epoch = self.epoch_state().epoch, "RoundManager started"); @@ -1466,19 +1500,6 @@ impl RoundManager { ack_sender.send(()).expect("[RoundManager] Fail to ack shutdown"); } break; - } - delayed_qc_msg = delayed_qc_rx.select_next_some() => { - let result = monitor!( - "process_delayed_qc", - self.process_delayed_qc_msg(delayed_qc_msg).await - ); - match result { - Ok(_) => trace!(RoundStateLogSchema::new(self.round_state())), - Err(e) => { - counters::ERROR_COUNT.inc(); - warn!(error = ?e, kind = error_kind(&e), RoundStateLogSchema::new(self.round_state())); - } - } }, proposal = buffered_proposal_rx.select_next_some() => { let mut proposals = vec![proposal]; @@ -1519,7 +1540,7 @@ impl RoundManager { Ok(_) => trace!(RoundStateLogSchema::new(round_state)), Err(e) => { counters::ERROR_COUNT.inc(); - warn!(error = ?e, kind = error_kind(&e), RoundStateLogSchema::new(round_state)); + warn!(kind = error_kind(&e), RoundStateLogSchema::new(round_state), "Error: {:#}", e); } } } @@ -1567,7 +1588,7 @@ impl RoundManager { Ok(_) => trace!(RoundStateLogSchema::new(round_state)), Err(e) => { counters::ERROR_COUNT.inc(); - warn!(error = ?e, kind = error_kind(&e), RoundStateLogSchema::new(round_state)); + warn!(kind = error_kind(&e), RoundStateLogSchema::new(round_state), "Error: {:#}", e); } } }, diff --git a/consensus/src/round_manager_fuzzing.rs b/consensus/src/round_manager_fuzzing.rs index ab7a1474062..2eefa70b075 100644 --- a/consensus/src/round_manager_fuzzing.rs +++ b/consensus/src/round_manager_fuzzing.rs @@ -24,10 +24,7 @@ use crate::{ util::{mock_time_service::SimulatedTimeService, time_service::TimeService}, }; use aptos_channels::{self, aptos_channel, message_queues::QueueStyle}; -use aptos_config::{ - config::{ConsensusConfig, QcAggregatorType}, - network_id::NetworkId, -}; +use aptos_config::{config::ConsensusConfig, network_id::NetworkId}; use aptos_consensus_types::{proposal_msg::ProposalMsg, utils::PayloadTxnsSize}; use aptos_infallible::Mutex; use aptos_network::{ @@ -50,7 +47,6 @@ use aptos_types::{ validator_verifier::ValidatorVerifier, }; use futures::{channel::mpsc, executor::block_on}; -use futures_channel::mpsc::unbounded; use maplit::hashmap; use once_cell::sync::Lazy; use std::{sync::Arc, time::Duration}; @@ -113,16 +109,9 @@ fn create_round_state() -> RoundState { let base_timeout = std::time::Duration::new(60, 0); let time_interval = Box::new(ExponentialTimeInterval::fixed(base_timeout)); let (round_timeout_sender, _) = aptos_channels::new_test(1_024); - let (delayed_qc_tx, _) = unbounded(); let time_service = Arc::new(SimulatedTimeService::new()); - RoundState::new( - time_interval, - time_service, - round_timeout_sender, - delayed_qc_tx, - QcAggregatorType::NoDelay, - ) + RoundState::new(time_interval, time_service, round_timeout_sender) } // Creates an RoundManager for fuzzing diff --git a/consensus/src/round_manager_test.rs b/consensus/src/round_manager_test.rs index cf34840d95a..c12e476a7f5 100644 --- a/consensus/src/round_manager_test.rs +++ b/consensus/src/round_manager_test.rs @@ -4,6 +4,7 @@ use crate::{ block_storage::{pending_blocks::PendingBlocks, BlockReader, BlockStore}, + counters, liveness::{ proposal_generator::{ ChainHealthBackoffConfig, PipelineBackpressureConfig, ProposalGenerator, @@ -29,7 +30,7 @@ use crate::{ }; use aptos_channels::{self, aptos_channel, message_queues::QueueStyle}; use aptos_config::{ - config::{ConsensusConfig, QcAggregatorType}, + config::ConsensusConfig, network_id::{NetworkId, PeerNetworkId}, }; use aptos_consensus_types::{ @@ -82,7 +83,6 @@ use futures::{ stream::select, FutureExt, Stream, StreamExt, }; -use futures_channel::mpsc::unbounded; use maplit::hashmap; use std::{ iter::FromIterator, @@ -123,14 +123,7 @@ impl NodeSetup { let base_timeout = Duration::new(60, 0); let time_interval = Box::new(ExponentialTimeInterval::fixed(base_timeout)); let (round_timeout_sender, _) = aptos_channels::new_test(1_024); - let (delayed_qc_tx, _) = unbounded(); - RoundState::new( - time_interval, - time_service, - round_timeout_sender, - delayed_qc_tx, - QcAggregatorType::NoDelay, - ) + RoundState::new(time_interval, time_service, round_timeout_sender) } fn create_proposer_election(proposers: Vec) -> Arc { @@ -1147,11 +1140,13 @@ fn new_round_on_timeout_certificate() { None, ), ); + let before = counters::ERROR_COUNT.get(); assert!(node .round_manager .process_proposal_msg(old_good_proposal) .await - .is_err()); + .is_ok()); // we eat the error + assert_eq!(counters::ERROR_COUNT.get(), before + 1); // but increase the counter }); } diff --git a/consensus/src/state_computer.rs b/consensus/src/state_computer.rs index 0038929bbfa..b06d254aea4 100644 --- a/consensus/src/state_computer.rs +++ b/consensus/src/state_computer.rs @@ -315,6 +315,7 @@ impl StateComputer for ExecutionProxy { let blocks = blocks.to_vec(); let wrapped_callback = move || { + payload_manager.notify_commit(block_timestamp, payloads); callback(&blocks, finality_proof); }; self.async_state_sync_notifier @@ -324,7 +325,6 @@ impl StateComputer for ExecutionProxy { .expect("Failed to send async state sync notification"); *latest_logical_time = logical_time; - payload_manager.notify_commit(block_timestamp, payloads); Ok(()) } diff --git a/crates/aptos-faucet/core/Cargo.toml b/crates/aptos-faucet/core/Cargo.toml index 0653a1eacc5..7d3ffa848d9 100644 --- a/crates/aptos-faucet/core/Cargo.toml +++ b/crates/aptos-faucet/core/Cargo.toml @@ -39,7 +39,6 @@ serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } tokio = { workspace = true } -url = { workspace = true } [features] integration-tests = [] diff --git a/crates/aptos-faucet/core/src/funder/transfer.rs b/crates/aptos-faucet/core/src/funder/transfer.rs index d78344e9b14..c47fdad80fe 100644 --- a/crates/aptos-faucet/core/src/funder/transfer.rs +++ b/crates/aptos-faucet/core/src/funder/transfer.rs @@ -22,7 +22,7 @@ use aptos_sdk::{ account_address::AccountAddress, chain_id::ChainId, transaction::{authenticator::AuthenticationKey, SignedTransaction, TransactionPayload}, - LocalAccount, + AptosCoinType, LocalAccount, }, }; use async_trait::async_trait; @@ -314,7 +314,7 @@ impl FunderTrait for TransferFunder { let account_address = self.faucet_account.read().await.address(); let funder_balance = match self .get_api_client() - .get_account_balance_bcs(account_address, "0x1::aptos_coin::AptosCoin") + .get_account_balance_bcs::(account_address) .await { Ok(response) => response.into_inner(), diff --git a/crates/aptos-jwk-consensus/src/observation_aggregation/mod.rs b/crates/aptos-jwk-consensus/src/observation_aggregation/mod.rs index ad69bed4743..f43f17b35f5 100644 --- a/crates/aptos-jwk-consensus/src/observation_aggregation/mod.rs +++ b/crates/aptos-jwk-consensus/src/observation_aggregation/mod.rs @@ -110,7 +110,7 @@ impl BroadcastStatus for Arc { if power_check_result.is_err() { return Ok(None); } - let multi_sig = self.epoch_state.verifier.aggregate_signatures(&partial_sigs).map_err(|e|anyhow!("adding peer observation failed with partial-to-aggregated conversion error: {e}"))?; + let multi_sig = self.epoch_state.verifier.aggregate_signatures(partial_sigs.signatures_iter()).map_err(|e|anyhow!("adding peer observation failed with partial-to-aggregated conversion error: {e}"))?; Ok(Some(QuorumCertifiedUpdate { update: peer_view, diff --git a/crates/aptos-metrics-core/src/const_metric.rs b/crates/aptos-metrics-core/src/const_metric.rs index c354b0eed49..744ff1b7c98 100644 --- a/crates/aptos-metrics-core/src/const_metric.rs +++ b/crates/aptos-metrics-core/src/const_metric.rs @@ -37,7 +37,7 @@ impl ConstMetric { let mut metric = Metric::default(); metric.set_counter(counter); - metric.set_label(labels); + metric.set_label(labels.into()); Ok(ConstMetric { desc, @@ -63,7 +63,7 @@ impl ConstMetric { let mut metric = Metric::default(); metric.set_gauge(guage); - metric.set_label(labels); + metric.set_label(labels.into()); Ok(ConstMetric { desc, @@ -84,7 +84,7 @@ impl Collector for ConstMetric { met.set_name(self.desc.fq_name.clone()); met.set_help(self.desc.help.clone()); met.set_field_type(self.metric_type); - met.set_metric(vec![self.metric.clone()]); + met.set_metric(vec![self.metric.clone()].into()); vec![met] } diff --git a/crates/aptos-rest-client/src/lib.rs b/crates/aptos-rest-client/src/lib.rs index 199023c8771..ef4ca6d8ff6 100644 --- a/crates/aptos-rest-client/src/lib.rs +++ b/crates/aptos-rest-client/src/lib.rs @@ -38,6 +38,7 @@ use aptos_types::{ contract_event::EventWithVersion, state_store::state_key::StateKey, transaction::SignedTransaction, + CoinType, }; use move_core_types::language_storage::StructTag; use reqwest::{ @@ -220,16 +221,12 @@ impl Client { }) } - pub async fn get_account_balance_bcs( + pub async fn get_account_balance_bcs( &self, address: AccountAddress, - coin_type: &str, ) -> AptosResult> { let resp = self - .get_account_resource_bcs::( - address, - &format!("0x1::coin::CoinStore<{}>", coin_type), - ) + .get_account_resource_bcs::>(address, &C::type_tag().to_string()) .await?; resp.and_then(|resource| Ok(resource.coin())) } diff --git a/crates/aptos-rosetta/src/account.rs b/crates/aptos-rosetta/src/account.rs index f60151ce372..f75322c2fd9 100644 --- a/crates/aptos-rosetta/src/account.rs +++ b/crates/aptos-rosetta/src/account.rs @@ -18,7 +18,7 @@ use crate::{ use aptos_logger::{debug, trace, warn}; use aptos_types::{ account_address::AccountAddress, - account_config::{AccountResource, CoinStoreResource}, + account_config::{AccountResource, CoinStoreResourceUntyped}, }; use std::{collections::HashSet, str::FromStr}; use warp::Filter; @@ -165,7 +165,7 @@ async fn get_balances( (AccountAddress::ONE, COIN_MODULE, COIN_STORE_RESOURCE) => { // Only show coins on the base account if account.is_base_account() { - let coin_store: CoinStoreResource = bcs::from_bytes(&bytes)?; + let coin_store: CoinStoreResourceUntyped = bcs::from_bytes(&bytes)?; if let Some(coin_type) = struct_tag.type_args.first() { // Only display supported coins if coin_type == &native_coin_tag() { diff --git a/crates/aptos-rosetta/src/types/objects.rs b/crates/aptos-rosetta/src/types/objects.rs index 29122925f66..f58c1fb50a7 100644 --- a/crates/aptos-rosetta/src/types/objects.rs +++ b/crates/aptos-rosetta/src/types/objects.rs @@ -28,7 +28,7 @@ use aptos_logger::warn; use aptos_rest_client::aptos_api_types::{TransactionOnChainData, U64}; use aptos_types::{ account_address::AccountAddress, - account_config::{AccountResource, CoinStoreResource, WithdrawEvent}, + account_config::{AccountResource, CoinStoreResourceUntyped, WithdrawEvent}, contract_event::{ContractEvent, FEE_STATEMENT_EVENT_TYPE}, event::EventKey, fee_statement::FeeStatement, @@ -1793,7 +1793,7 @@ async fn parse_coinstore_changes( events: &[ContractEvent], mut operation_index: u64, ) -> ApiResult> { - let coin_store: CoinStoreResource = if let Ok(coin_store) = bcs::from_bytes(data) { + let coin_store: CoinStoreResourceUntyped = if let Ok(coin_store) = bcs::from_bytes(data) { coin_store } else { warn!( diff --git a/crates/aptos/CHANGELOG.md b/crates/aptos/CHANGELOG.md index 79547763c1b..db5ed7ada5e 100644 --- a/crates/aptos/CHANGELOG.md +++ b/crates/aptos/CHANGELOG.md @@ -3,6 +3,20 @@ All notable changes to the Aptos CLI will be captured in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) and the format set out by [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## Unreleased +- `aptos move fmt` formats move files inside the `tests` and `examples` directory of a package. + +## [4.2.3] - 2024/09/20 +- Fix the broken indexer in localnet in 4.2.2, which migrates table info from sycn to async ways. + +## [4.2.2] - 2024/09/20 +- Fix localnet indexer processors that were emitting spamming logs in 4.2.1. + +## [4.2.1] - 2024/09/19 +- Fix localnet indexer processors that were failing to startup in 4.2.0 + +## [4.2.0] - 2024/09/16 +- Update latest VM and associated changes +- Update to latest compiler ## [4.1.0] - 2024/08/30 - Marks Move 2 and compiler v2 as stable. diff --git a/crates/aptos/CONTRIBUTING.md b/crates/aptos/CONTRIBUTING.md new file mode 100644 index 00000000000..7bd0fe7d976 --- /dev/null +++ b/crates/aptos/CONTRIBUTING.md @@ -0,0 +1,247 @@ +# Aptos CLI Development Guide + +This is a list of design decisions and guidelines for adding commands to the Aptos CLI. + +## Command Groups + +Commands should be grouped into the existing categories. The current categories are: + +- account +- config +- genesis +- governance +- key +- move +- multisig +- node +- stake +- update + +All categories must have a doc comment that describes the command. It must also derive `Parser` and `Subcommand`. For +example: + +```rust +/// Tool for interacting with accounts +/// +/// This tool is used to create accounts, get information about the +/// account's resources, and transfer resources between accounts. +#[derive(Debug, Subcommand)] +pub enum AccountTool { + Create(create::CreateAccount), + CreateResourceAccount(create_resource_account::CreateResourceAccount), + DeriveResourceAccountAddress(derive_resource_account::DeriveResourceAccount), + FundWithFaucet(fund::FundWithFaucet), + Balance(balance::Balance), + List(list::ListAccount), + LookupAddress(key_rotation::LookupAddress), + RotateKey(key_rotation::RotateKey), + Transfer(transfer::TransferCoins), +} +``` + +Then it must also be added to the top level command structure: + +```rust +/// Command Line Interface (CLI) for developing and interacting with the Aptos blockchain +#[derive(Parser)] +#[clap(name = "aptos", author, version, propagate_version = true, styles = aptos_cli_common::aptos_cli_style())] +pub enum Tool { + #[clap(subcommand)] + Account(account::AccountTool), + #[clap(subcommand)] + Config(config::ConfigTool), + #[clap(subcommand)] + Genesis(genesis::GenesisTool), + #[clap(subcommand)] + Governance(governance::GovernanceTool), + Info(InfoTool), + Init(common::init::InitTool), + #[clap(subcommand)] + Key(op::key::KeyTool), + #[clap(subcommand)] + Move(move_tool::MoveTool), + #[clap(subcommand)] + Multisig(account::MultisigAccountTool), + #[clap(subcommand)] + Node(node::NodeTool), + #[clap(subcommand)] + Stake(stake::StakeTool), + #[clap(subcommand)] + Update(update::UpdateTool), +} +``` + +## Commands + +A command is a single top level command for the CLI. The CLI command must complete it's action in the single command +execution. + +### Command Names + +```rust +/// Compiles a package and returns the associated ModuleIds +#[derive(Parser)] +pub struct CompilePackage { + /// Save the package metadata in the package's build directory + /// + /// If set, package metadata should be generated and stored in the package's build directory. + /// This metadata can be used to construct a transaction to publish a package. + #[clap(long)] + pub(crate) save_metadata: bool, + + #[clap(flatten)] + pub(crate) included_artifacts_args: IncludedArtifactsArgs, + #[clap(flatten)] + pub(crate) move_options: MovePackageDir, +} +``` + +Command names should be simple, identifiable, and easy to use. For example, compilation is grouped in `move` and uses +the subcommand `compile`. + +```bash +aptos move compile +``` + +Once the new command is created, it should have `#[derive(Parser)]` added above. Additionally, it will need to be added +the higher level tool: + +```rust +#[derive(Subcommand)] +pub enum MoveTool { + #[clap(alias = "build")] + Compile(CompilePackage), + #[clap(alias = "build-script")] + CompileScript(CompileScript), + Init(Init), + // ... +} + +impl MoveTool { + pub async fn execute(self) -> CliResult { + match self { + MoveTool::Compile(tool) => tool.execute_serialized().await, + MoveTool::CompileScript(tool) => tool.execute_serialized().await, + MoveTool::Init(tool) => tool.execute_serialized_success().await, + } + } +} +``` + +Note that, there are two types of commands here `execute_serialized()` and `execute_serialized_success()`, if the +command must be returning a value, then it should call `execute_serialized()`, which will convert the input type as JSON +to `stdout`. + +Additionally, `alias` is allowed, but discouraged for new commands. This is mostly to provide either backwards +compatibility or reduce confusion for new users. + +### Command flags + +```rust +#[derive(Parser)] +pub struct CompilePackage { + /// Save the package metadata in the package's build directory + /// + /// If set, package metadata should be generated and stored in the package's build directory. + /// This metadata can be used to construct a transaction to publish a package. + #[clap(long)] + pub(crate) save_metadata: bool, + + // ... +} +``` + +Command inputs should always be documented for help to show up in the CLI. for example, below is the example for +`save_metadata`. They should be snake case, and will show up as a flag. Do not use `short` commands, as they can be +confused between different commands. + +```bash +aptos move compile --save-metadata +``` + +### Command flag groupings + +```rust +/// Compiles a package and returns the associated ModuleIds +#[derive(Parser)] +pub struct CompilePackage { + // ... + #[clap(flatten)] + pub(crate) included_artifacts_args: IncludedArtifactsArgs, + #[clap(flatten)] + pub(crate) move_options: MovePackageDir, +} +``` + +Command flags can be grouped into common structs to be used across multiple commands. These should be flattened by +adding the struct associated and using `#[clap(flatten)]` like above. These should not have a doc comment, and any doc +comments will not end up in the command. Instead, document the structs directly like so: + +```rust +#[derive(Parser)] +pub struct IncludedArtifactsArgs { + /// Artifacts to be generated when building the package + /// + /// Which artifacts to include in the package. This can be one of `none`, `sparse`, and + /// `all`. `none` is the most compact form and does not allow to reconstruct a source + /// package from chain; `sparse` is the minimal set of artifacts needed to reconstruct + /// a source package; `all` includes all available artifacts. The choice of included + /// artifacts heavily influences the size and therefore gas cost of publishing: `none` + /// is the size of bytecode alone; `sparse` is roughly 2 times as much; and `all` 3-4 + /// as much. + #[clap(long, default_value_t = IncludedArtifacts::Sparse)] + pub(crate) included_artifacts: IncludedArtifacts, +} +``` + +### Command Implementation + +```rust +#[async_trait] +impl CliCommand> for CompilePackage { + fn command_name(&self) -> &'static str { + "CompilePackage" + } + + async fn execute(self) -> CliTypedResult> { + let build_options = BuildOptions { + install_dir: self.move_options.output_dir.clone(), + ..self + .included_artifacts_args + .included_artifacts + .build_options( + self.move_options.dev, + self.move_options.skip_fetch_latest_git_deps, + self.move_options.named_addresses(), + self.move_options.override_std.clone(), + self.move_options.bytecode_version, + self.move_options.compiler_version, + self.move_options.language_version, + self.move_options.skip_attribute_checks, + self.move_options.check_test_code, + ) + }; + let pack = BuiltPackage::build(self.move_options.get_package_path()?, build_options) + .map_err(|e| CliError::MoveCompilationError(format!("{:#}", e)))?; + if self.save_metadata { + pack.extract_metadata_and_save()?; + } + let ids = pack + .modules() + .map(|m| m.self_id().to_string()) + .collect::>(); + // TODO: Also say how many scripts are compiled + Ok(ids) + } +} +``` + +Commands should implement the `CliCommand` trait for the package. This allows it to be called upstream generically +and `T` will automatically be serialized to JSON for the output. This allows for typed testing in unit tests, while +still having output converted for the total CLI. + +It's an anti-pattern to `panic`, please avoid panicking, and instead provide `CliError` or `CliError` conversion for the +current types. + +All output from the CLI should use `eprintln!()`, rather than `println!()`. `stdout` is reserved for the JSON output at +the end of the command, `stderr` is used for the rest of the output. diff --git a/crates/aptos/Cargo.toml b/crates/aptos/Cargo.toml index 17f95169b81..df8ba8c7871 100644 --- a/crates/aptos/Cargo.toml +++ b/crates/aptos/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "aptos" description = "Aptos tool for management of nodes and interacting with the blockchain" -version = "4.1.0" +version = "4.2.3" # Workspace inherited keys authors = { workspace = true } @@ -55,6 +55,7 @@ bollard = { workspace = true } chrono = { workspace = true } clap = { workspace = true, features = ["env", "unstable-styles", "wrap_help"] } clap_complete = { workspace = true } +colored = { workspace = true } dashmap = { workspace = true } diesel = { workspace = true, features = [ "postgres_backend", @@ -84,7 +85,7 @@ pathsearch = { workspace = true } poem = { workspace = true } # We set default-features to false so we don't onboard the libpq dep. See more here: # https://github.com/aptos-labs/aptos-core/pull/12568 -processor = { git = "https://github.com/aptos-labs/aptos-indexer-processors.git", rev = "fa1ce4947f4c2be57529f1c9732529e05a06cb7f", default-features = false } +processor = { git = "https://github.com/aptos-labs/aptos-indexer-processors.git", rev = "51a34901b40d7f75767ac907b4d2478104d6a515", default-features = false } rand = { workspace = true } regex = { workspace = true } reqwest = { workspace = true } @@ -92,7 +93,7 @@ self_update = { git = "https://github.com/banool/self_update.git", rev = "830615 serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } -server-framework = { git = "https://github.com/aptos-labs/aptos-indexer-processors.git", rev = "fa1ce4947f4c2be57529f1c9732529e05a06cb7f" } +server-framework = { git = "https://github.com/aptos-labs/aptos-indexer-processors.git", rev = "51a34901b40d7f75767ac907b4d2478104d6a515" } tempfile = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } diff --git a/crates/aptos/src/account/balance.rs b/crates/aptos/src/account/balance.rs index dbb6dd19314..8176b22beba 100644 --- a/crates/aptos/src/account/balance.rs +++ b/crates/aptos/src/account/balance.rs @@ -5,7 +5,7 @@ use crate::common::types::{ CliCommand, CliConfig, CliError, CliTypedResult, ConfigSearchMode, ProfileOptions, RestOptions, }; use aptos_api_types::ViewFunction; -use aptos_types::{account_address::AccountAddress, APTOS_COIN_TYPE}; +use aptos_types::{account_address::AccountAddress, AptosCoinType, CoinType}; use async_trait::async_trait; use clap::Parser; use move_core_types::{ident_str, language_storage::ModuleId, parser::parse_type_tag}; @@ -66,7 +66,7 @@ impl CliCommand> for Balance { })? } else { // If nothing is given, use the default APT - APTOS_COIN_TYPE.to_owned() + AptosCoinType::type_tag() }; let client = self.rest_options.client(&self.profile_options)?; diff --git a/crates/aptos/src/account/fund.rs b/crates/aptos/src/account/fund.rs index 5bc6be9df11..227cb70c0c4 100644 --- a/crates/aptos/src/account/fund.rs +++ b/crates/aptos/src/account/fund.rs @@ -13,20 +13,20 @@ use clap::Parser; /// /// This will create an account if it doesn't exist with the faucet. This is mostly useful /// for local development and devnet. -#[derive(Debug, Parser)] +#[derive(Debug, Default, Parser)] pub struct FundWithFaucet { /// Address to fund /// /// If the account wasn't previously created, it will be created when being funded #[clap(long, value_parser = crate::common::types::load_account_arg)] - pub(crate) account: Option, + pub account: Option, /// Number of Octas to fund the account from the faucet /// /// The amount added to the account may be limited by the faucet, and may be less /// than the amount requested. #[clap(long, default_value_t = DEFAULT_FUNDED_COINS)] - pub(crate) amount: u64, + pub amount: u64, #[clap(flatten)] pub(crate) faucet_options: FaucetOptions, diff --git a/crates/aptos/src/common/types.rs b/crates/aptos/src/common/types.rs index 106783926e1..c9a6be08e4c 100644 --- a/crates/aptos/src/common/types.rs +++ b/crates/aptos/src/common/types.rs @@ -106,6 +106,14 @@ pub enum CliError { MoveTestError, #[error("Move Prover failed: {0}")] MoveProverError(String), + #[error( + "The package is larger than {1} bytes ({0} bytes)! \ + To lower the size you may want to include less artifacts via `--included-artifacts`. \ + You can also override this check with `--override-size-check`. \ + Alternatively, you can use the `--chunked-publish` to enable chunked publish mode, \ + which chunks down the package and deploys it in several stages." + )] + PackageSizeExceeded(usize, usize), #[error("Unable to parse '{0}': error: {1}")] UnableToParse(&'static str, String), #[error("Unable to read file '{0}', error: {1}")] @@ -131,6 +139,7 @@ impl CliError { CliError::MoveCompilationError(_) => "MoveCompilationError", CliError::MoveTestError => "MoveTestError", CliError::MoveProverError(_) => "MoveProverError", + CliError::PackageSizeExceeded(_, _) => "PackageSizeExceeded", CliError::UnableToParse(_, _) => "UnableToParse", CliError::UnableToReadFile(_, _) => "UnableToReadFile", CliError::UnexpectedError(_) => "UnexpectedError", @@ -966,7 +975,7 @@ impl SaveFile { } /// Options specific to using the Rest endpoint -#[derive(Debug, Default, Parser)] +#[derive(Debug, Parser)] pub struct RestOptions { /// URL to a fullnode on the network /// @@ -985,6 +994,16 @@ pub struct RestOptions { pub node_api_key: Option, } +impl Default for RestOptions { + fn default() -> Self { + Self { + url: None, + connection_timeout_secs: DEFAULT_EXPIRATION_SECS, + node_api_key: None, + } + } +} + impl RestOptions { pub fn new(url: Option, connection_timeout_secs: Option) -> Self { RestOptions { @@ -1604,7 +1623,7 @@ pub struct TransactionOptions { #[clap(flatten)] pub(crate) gas_options: GasOptions, #[clap(flatten)] - pub(crate) prompt_options: PromptOptions, + pub prompt_options: PromptOptions, /// If this option is set, simulate the transaction locally. #[clap(long)] @@ -2018,7 +2037,7 @@ pub struct MultisigAccountWithSequenceNumber { pub(crate) sequence_number: u64, } -#[derive(Debug, Parser)] +#[derive(Debug, Default, Parser)] pub struct TypeArgVec { /// TypeTag arguments separated by spaces. /// @@ -2057,7 +2076,7 @@ impl TryInto> for TypeArgVec { } } -#[derive(Clone, Debug, Parser)] +#[derive(Clone, Debug, Default, Parser)] pub struct ArgWithTypeVec { /// Arguments combined with their type separated by spaces. /// @@ -2223,7 +2242,7 @@ impl TryInto for EntryFunctionArguments { } /// Common options for constructing a script payload -#[derive(Debug, Parser)] +#[derive(Debug, Default, Parser)] pub struct ScriptFunctionArguments { #[clap(flatten)] pub(crate) type_arg_vec: TypeArgVec, @@ -2312,3 +2331,13 @@ pub struct OverrideSizeCheckOption { #[clap(long)] pub(crate) override_size_check: bool, } + +#[derive(Parser)] +pub struct ChunkedPublishOption { + /// Whether to publish a package in a chunked mode. This may require more than one transaction + /// for publishing the Move package. + /// + /// Use this option for publishing large packages exceeding `MAX_PUBLISH_PACKAGE_SIZE`. + #[clap(long)] + pub(crate) chunked_publish: bool, +} diff --git a/crates/aptos/src/governance/mod.rs b/crates/aptos/src/governance/mod.rs index 3ea547676f9..e5506839ff7 100644 --- a/crates/aptos/src/governance/mod.rs +++ b/crates/aptos/src/governance/mod.rs @@ -905,10 +905,10 @@ pub struct CompileScriptFunction { pub compiled_script_path: Option, #[clap(flatten)] - pub(crate) framework_package_args: FrameworkPackageArgs, + pub framework_package_args: FrameworkPackageArgs, #[clap(long, default_value_if("move_2", "true", "7"))] - pub(crate) bytecode_version: Option, + pub bytecode_version: Option, #[clap(long, value_parser = clap::value_parser!(CompilerVersion), default_value_if("move_2", "true", "2.0"))] diff --git a/crates/aptos/src/move_tool/bytecode.rs b/crates/aptos/src/move_tool/bytecode.rs index 7a80844d5b5..6a662300b20 100644 --- a/crates/aptos/src/move_tool/bytecode.rs +++ b/crates/aptos/src/move_tool/bytecode.rs @@ -39,7 +39,7 @@ const DECOMPILER_EXTENSION: &str = "mv.move"; /// /// For example, if you want to disassemble an on-chain package `PackName` at account `0x42`: /// 1. Download the package with `aptos move download --account 0x42 --package PackName --bytecode` -/// 2. Disassemble the package bytecode with `aptos disassemble --package-path PackName/bytecode_modules` +/// 2. Disassemble the package bytecode with `aptos move disassemble --package-path PackName/bytecode_modules` #[derive(Debug, Parser)] pub struct Disassemble { #[clap(flatten)] diff --git a/crates/aptos/src/move_tool/fmt.rs b/crates/aptos/src/move_tool/fmt.rs index ae1766c6528..995a79efd3e 100644 --- a/crates/aptos/src/move_tool/fmt.rs +++ b/crates/aptos/src/move_tool/fmt.rs @@ -125,6 +125,14 @@ impl FmtCommand { if scripts_path.exists() { path_vec.push(scripts_path.clone()); } + let tests_path = root_package_path.join(SourcePackageLayout::Tests.path()); + if tests_path.exists() { + path_vec.push(tests_path.clone()); + } + let examples_path = root_package_path.join(SourcePackageLayout::Examples.path()); + if examples_path.exists() { + path_vec.push(examples_path.clone()); + } if let Ok(move_sources) = find_move_filenames(&path_vec, false) { for source in &move_sources { let mut cur_cmd = create_cmd(); diff --git a/crates/aptos/src/move_tool/mod.rs b/crates/aptos/src/move_tool/mod.rs index 3abe1f72846..72b8c243b89 100644 --- a/crates/aptos/src/move_tool/mod.rs +++ b/crates/aptos/src/move_tool/mod.rs @@ -6,8 +6,8 @@ use crate::{ common::{ local_simulation, types::{ - load_account_arg, ArgWithTypeJSON, CliConfig, CliError, CliTypedResult, - ConfigSearchMode, EntryFunctionArguments, EntryFunctionArgumentsJSON, + load_account_arg, ArgWithTypeJSON, ChunkedPublishOption, CliConfig, CliError, + CliTypedResult, ConfigSearchMode, EntryFunctionArguments, EntryFunctionArgumentsJSON, MoveManifestAccountWrapper, MovePackageDir, OptimizationLevel, OverrideSizeCheckOption, ProfileOptions, PromptOptions, RestOptions, SaveFile, ScriptFunctionArguments, TransactionOptions, TransactionSummary, @@ -27,15 +27,24 @@ use crate::{ }, CliCommand, CliResult, }; +use aptos_api_types::AptosErrorCode; use aptos_crypto::HashValue; use aptos_framework::{ - docgen::DocgenOptions, extended_checks, natives::code::UpgradePolicy, prover::ProverOptions, + chunked_publish::{ + chunk_package_and_create_payloads, large_packages_cleanup_staging_area, PublishType, + LARGE_PACKAGES_MODULE_ADDRESS, + }, + docgen::DocgenOptions, + extended_checks, + natives::code::UpgradePolicy, + prover::ProverOptions, BuildOptions, BuiltPackage, }; use aptos_gas_schedule::{MiscGasParameters, NativeGasParameters}; use aptos_move_debugger::aptos_debugger::AptosDebugger; use aptos_rest_client::{ aptos_api_types::{EntryFunctionId, HexEncodedBytes, IdentifierWrapper, MoveModuleId}, + error::RestError, Client, }; use aptos_types::{ @@ -47,6 +56,7 @@ use aptos_types::{ use aptos_vm::data_cache::AsMoveResolver; use async_trait::async_trait; use clap::{Parser, Subcommand, ValueEnum}; +use colored::Colorize; use itertools::Itertools; use move_cli::{self, base::test::UnitTestResult}; use move_command_line_common::{address::NumericalAddress, env::MOVE_HOME}; @@ -68,7 +78,7 @@ pub use stored_package::*; use tokio::task; use url::Url; -mod aptos_debug_natives; +pub mod aptos_debug_natives; mod bytecode; pub mod coverage; mod fmt; @@ -91,6 +101,7 @@ const HELLO_BLOCKCHAIN_EXAMPLE: &str = include_str!( pub enum MoveTool { BuildPublishPayload(BuildPublishPayload), Clean(CleanPackage), + ClearStagingArea(ClearStagingArea), #[clap(alias = "build")] Compile(CompilePackage), #[clap(alias = "build-script")] @@ -129,6 +140,7 @@ impl MoveTool { match self { MoveTool::BuildPublishPayload(tool) => tool.execute_serialized().await, MoveTool::Clean(tool) => tool.execute_serialized().await, + MoveTool::ClearStagingArea(tool) => tool.execute_serialized().await, MoveTool::Compile(tool) => tool.execute_serialized().await, MoveTool::CompileScript(tool) => tool.execute_serialized().await, MoveTool::Coverage(tool) => tool.execute().await, @@ -162,7 +174,7 @@ impl MoveTool { } } -#[derive(Parser, Default)] +#[derive(Default, Parser)] pub struct FrameworkPackageArgs { /// Git revision or branch for the Aptos framework /// @@ -367,12 +379,12 @@ pub struct CompilePackage { /// If set, package metadata should be generated and stored in the package's build directory. /// This metadata can be used to construct a transaction to publish a package. #[clap(long)] - pub(crate) save_metadata: bool, + pub save_metadata: bool, #[clap(flatten)] - pub(crate) included_artifacts_args: IncludedArtifactsArgs, + pub included_artifacts_args: IncludedArtifactsArgs, #[clap(flatten)] - pub(crate) move_options: MovePackageDir, + pub move_options: MovePackageDir, } #[async_trait] @@ -714,7 +726,7 @@ pub struct IncludedArtifactsArgs { /// is the size of bytecode alone; `sparse` is roughly 2 times as much; and `all` 3-4 /// as much. #[clap(long, default_value_t = IncludedArtifacts::Sparse)] - pub(crate) included_artifacts: IncludedArtifacts, + pub included_artifacts: IncludedArtifacts, } /// Publishes the modules in a Move package to the Aptos blockchain @@ -722,7 +734,8 @@ pub struct IncludedArtifactsArgs { pub struct PublishPackage { #[clap(flatten)] pub(crate) override_size_check_option: OverrideSizeCheckOption, - + #[clap(flatten)] + pub(crate) chunked_publish_option: ChunkedPublishOption, #[clap(flatten)] pub(crate) included_artifacts_args: IncludedArtifactsArgs, #[clap(flatten)] @@ -731,12 +744,16 @@ pub struct PublishPackage { pub(crate) txn_options: TransactionOptions, } -struct PackagePublicationData { +pub(crate) struct PackagePublicationData { metadata_serialized: Vec, compiled_units: Vec>, payload: TransactionPayload, } +pub(crate) struct ChunkedPublishPayloads { + payloads: Vec, +} + /// Build a publication transaction payload and store it in a JSON output file. #[derive(Parser)] pub struct BuildPublishPayload { @@ -751,35 +768,51 @@ impl TryInto for &PublishPackage { type Error = CliError; fn try_into(self) -> Result { - let package_path = self.move_options.get_package_path()?; - let options = self - .included_artifacts_args - .included_artifacts - .build_options(&self.move_options)?; - let package = BuiltPackage::build(package_path, options) - .map_err(|e| CliError::MoveCompilationError(format!("{:#}", e)))?; - let compiled_units = package.extract_code(); - let metadata_serialized = - bcs::to_bytes(&package.extract_metadata()?).expect("PackageMetadata has BCS"); - let payload = aptos_cached_packages::aptos_stdlib::code_publish_package_txn( - metadata_serialized.clone(), - compiled_units.clone(), - ); - let size = bcs::serialized_size(&payload)?; + let package = + build_package_options(&self.move_options, &self.included_artifacts_args).unwrap(); + + let package_publication_data = + create_package_publication_data(package, PublishType::AccountDeploy, None)?; + + let size = bcs::serialized_size(&package_publication_data.payload)?; println!("package size {} bytes", size); if !self.override_size_check_option.override_size_check && size > MAX_PUBLISH_PACKAGE_SIZE { - return Err(CliError::UnexpectedError(format!( - "The package is larger than {} bytes ({} bytes)! To lower the size \ - you may want to include fewer artifacts via `--included-artifacts`. \ - You can also override this check with `--override-size-check", - MAX_PUBLISH_PACKAGE_SIZE, size - ))); + return Err(CliError::PackageSizeExceeded( + size, + MAX_PUBLISH_PACKAGE_SIZE, + )); } - Ok(PackagePublicationData { - metadata_serialized, - compiled_units, - payload, - }) + + Ok(package_publication_data) + } +} + +#[async_trait] +pub trait AsyncTryInto { + type Error; + + async fn async_try_into(self) -> Result; +} + +#[async_trait] +impl AsyncTryInto for &PublishPackage { + type Error = CliError; + + async fn async_try_into(self) -> Result { + let package = + build_package_options(&self.move_options, &self.included_artifacts_args).unwrap(); + + let chunked_publish_payloads = + create_chunked_publish_payloads(package, PublishType::AccountDeploy, None)?; + + let size = &chunked_publish_payloads + .payloads + .iter() + .map(bcs::serialized_size) + .sum::>()?; + println!("package size {} bytes", size); + + Ok(chunked_publish_payloads) } } @@ -815,7 +848,7 @@ impl FromStr for IncludedArtifacts { } } -pub(crate) fn experiments_from_opt_level(optlevel: &Option) -> Vec { +pub fn experiments_from_opt_level(optlevel: &Option) -> Vec { match optlevel { None | Some(OptimizationLevel::Default) => { vec![format!("{}=on", Experiment::OPTIMIZE.to_string())] @@ -917,6 +950,71 @@ impl IncludedArtifacts { pub const MAX_PUBLISH_PACKAGE_SIZE: usize = 60_000; +// Get publication data for standard publish mode, which submits a single transaction for publishing. +fn create_package_publication_data( + package: BuiltPackage, + publish_type: PublishType, + object_address: Option, +) -> CliTypedResult { + let compiled_units = package.extract_code(); + let metadata = package.extract_metadata()?; + let metadata_serialized = bcs::to_bytes(&metadata).expect("PackageMetadata has BCS"); + + let payload = match publish_type { + PublishType::AccountDeploy => { + aptos_cached_packages::aptos_stdlib::code_publish_package_txn( + metadata_serialized.clone(), + compiled_units.clone(), + ) + }, + PublishType::ObjectDeploy => { + aptos_cached_packages::aptos_stdlib::object_code_deployment_publish( + metadata_serialized.clone(), + compiled_units.clone(), + ) + }, + PublishType::ObjectUpgrade => { + aptos_cached_packages::aptos_stdlib::object_code_deployment_upgrade( + metadata_serialized.clone(), + compiled_units.clone(), + object_address.expect("Object address must be provided for upgrading object code."), + ) + }, + }; + + Ok(PackagePublicationData { + metadata_serialized, + compiled_units, + payload, + }) +} + +// Get publication data for chunked publish mode, which submits multiple transactions for publishing. +fn create_chunked_publish_payloads( + package: BuiltPackage, + publish_type: PublishType, + object_address: Option, +) -> CliTypedResult { + let compiled_units = package.extract_code(); + let metadata = package.extract_metadata()?; + let metadata_serialized = bcs::to_bytes(&metadata).expect("PackageMetadata has BCS"); + + let maybe_object_address = if let PublishType::ObjectUpgrade = publish_type { + object_address + } else { + None + }; + + let payloads = chunk_package_and_create_payloads( + metadata_serialized, + compiled_units, + publish_type, + maybe_object_address, + ); + + Ok(ChunkedPublishPayloads { payloads }) +} + #[async_trait] impl CliCommand for PublishPackage { fn command_name(&self) -> &'static str { @@ -924,8 +1022,20 @@ impl CliCommand for PublishPackage { } async fn execute(self) -> CliTypedResult { - let package_publication_data: PackagePublicationData = (&self).try_into()?; - profile_or_submit(package_publication_data.payload, &self.txn_options).await + if self.chunked_publish_option.chunked_publish { + let chunked_package_payloads: ChunkedPublishPayloads = (&self).async_try_into().await?; + + let message = format!("Publishing package in chunked mode will submit {} transactions for staging and publishing code.\n", &chunked_package_payloads.payloads.len()); + println!("{}", message.bold()); + submit_chunked_publish_transactions( + chunked_package_payloads.payloads, + &self.txn_options, + ) + .await + } else { + let package_publication_data: PackagePublicationData = (&self).try_into()?; + profile_or_submit(package_publication_data.payload, &self.txn_options).await + } } } @@ -985,7 +1095,7 @@ impl CliCommand for BuildPublishPayload { } } -/// Publishes the modules in a Move package to the Aptos blockchain, under an object. +/// Publishes the modules in a Move package to the Aptos blockchain, under an object (legacy version of `deploy-object`) #[derive(Parser)] pub struct CreateObjectAndPublishPackage { /// The named address for compiling and using in the contract @@ -996,6 +1106,8 @@ pub struct CreateObjectAndPublishPackage { #[clap(flatten)] pub(crate) override_size_check_option: OverrideSizeCheckOption, #[clap(flatten)] + pub(crate) chunked_publish_option: ChunkedPublishOption, + #[clap(flatten)] pub(crate) included_artifacts_args: IncludedArtifactsArgs, #[clap(flatten)] pub(crate) move_options: MovePackageDir, @@ -1011,48 +1123,77 @@ impl CliCommand for CreateObjectAndPublishPackage { async fn execute(mut self) -> CliTypedResult { let sender_address = self.txn_options.get_public_key_and_address()?.1; - let sequence_number = self.txn_options.sequence_number(sender_address).await? + 1; + + let sequence_number = if self.chunked_publish_option.chunked_publish { + // Perform a preliminary build to determine the number of transactions needed for chunked publish mode. + // This involves building the package with mock account address `0xcafe` to calculate the transaction count. + let mock_object_address = AccountAddress::from_hex_literal("0xcafe").unwrap(); + self.move_options + .add_named_address(self.address_name.clone(), mock_object_address.to_string()); + let package = + build_package_options(&self.move_options, &self.included_artifacts_args).unwrap(); + let mock_payloads = + create_chunked_publish_payloads(package, PublishType::AccountDeploy, None)? + .payloads; + let staging_tx_count = (mock_payloads.len() - 1) as u64; + self.txn_options.sequence_number(sender_address).await? + staging_tx_count + 1 + } else { + self.txn_options.sequence_number(sender_address).await? + 1 + }; + let object_address = create_object_code_deployment_address(sender_address, sequence_number); self.move_options .add_named_address(self.address_name, object_address.to_string()); - let options = self - .included_artifacts_args - .included_artifacts - .build_options(&self.move_options)?; - let package = BuiltPackage::build(self.move_options.get_package_path()?, options)?; + let package = + build_package_options(&self.move_options, &self.included_artifacts_args).unwrap(); let message = format!( "Do you want to publish this package at object address {}", object_address ); prompt_yes_with_override(&message, self.txn_options.prompt_options)?; - let payload = aptos_cached_packages::aptos_stdlib::object_code_deployment_publish( - bcs::to_bytes(&package.extract_metadata()?) - .expect("Failed to serialize PackageMetadata"), - package.extract_code(), - ); - let size = bcs::serialized_size(&payload)?; - println!("package size {} bytes", size); + let result = if self.chunked_publish_option.chunked_publish { + let payloads = + create_chunked_publish_payloads(package, PublishType::ObjectDeploy, None)?.payloads; - if !self.override_size_check_option.override_size_check && size > MAX_PUBLISH_PACKAGE_SIZE { - return Err(CliError::UnexpectedError(format!( - "The package is larger than {} bytes ({} bytes)! To lower the size \ - you may want to include less artifacts via `--included-artifacts`. \ - You can also override this check with `--override-size-check", - MAX_PUBLISH_PACKAGE_SIZE, size - ))); - } - let result = self - .txn_options - .submit_transaction(payload) - .await - .map(TransactionSummary::from); + let size = &payloads + .iter() + .map(bcs::serialized_size) + .sum::>()?; + println!("package size {} bytes", size); + let message = format!("Publishing package in chunked mode will submit {} transactions for staging and publishing code.\n", &payloads.len()); + println!("{}", message.bold()); + + submit_chunked_publish_transactions(payloads, &self.txn_options).await + } else { + let payload = create_package_publication_data( + package, + PublishType::ObjectDeploy, + Some(object_address), + )? + .payload; + let size = bcs::serialized_size(&payload)?; + println!("package size {} bytes", size); + + if !self.override_size_check_option.override_size_check + && size > MAX_PUBLISH_PACKAGE_SIZE + { + return Err(CliError::PackageSizeExceeded( + size, + MAX_PUBLISH_PACKAGE_SIZE, + )); + } + self.txn_options + .submit_transaction(payload) + .await + .map(TransactionSummary::from) + }; if result.is_ok() { println!( - "Code was successfully deployed to object address {}.", + "Code was successfully deployed to object address {}", object_address ); } @@ -1060,6 +1201,7 @@ impl CliCommand for CreateObjectAndPublishPackage { } } +/// Upgrades the modules in a Move package deployed under an object (legacy version of `upgrade-object`) #[derive(Parser)] pub struct UpgradeObjectPackage { /// Address of the object the package was deployed to @@ -1071,6 +1213,8 @@ pub struct UpgradeObjectPackage { #[clap(flatten)] pub(crate) override_size_check_option: OverrideSizeCheckOption, #[clap(flatten)] + pub(crate) chunked_publish_option: ChunkedPublishOption, + #[clap(flatten)] pub(crate) included_artifacts_args: IncludedArtifactsArgs, #[clap(flatten)] pub(crate) move_options: MovePackageDir, @@ -1085,11 +1229,8 @@ impl CliCommand for UpgradeObjectPackage { } async fn execute(self) -> CliTypedResult { - let options = self - .included_artifacts_args - .included_artifacts - .build_options(&self.move_options)?; - let built_package = BuiltPackage::build(self.move_options.get_package_path()?, options)?; + let built_package = + build_package_options(&self.move_options, &self.included_artifacts_args).unwrap(); let url = self .txn_options .rest_options @@ -1115,32 +1256,50 @@ impl CliCommand for UpgradeObjectPackage { ); prompt_yes_with_override(&message, self.txn_options.prompt_options)?; - let payload = aptos_cached_packages::aptos_stdlib::object_code_deployment_upgrade( - bcs::to_bytes(&built_package.extract_metadata()?) - .expect("Failed to serialize PackageMetadata"), - built_package.extract_code(), - self.object_address, - ); - let size = bcs::serialized_size(&payload)?; - println!("package size {} bytes", size); + let result = if self.chunked_publish_option.chunked_publish { + let payloads = create_chunked_publish_payloads( + built_package, + PublishType::ObjectUpgrade, + Some(self.object_address), + )? + .payloads; + + let size = &payloads + .iter() + .map(bcs::serialized_size) + .sum::>()?; + println!("package size {} bytes", size); + let message = format!("Upgrading package in chunked mode will submit {} transactions for staging and upgrading code.\n", &payloads.len()); + println!("{}", message.bold()); + submit_chunked_publish_transactions(payloads, &self.txn_options).await + } else { + let payload = create_package_publication_data( + built_package, + PublishType::ObjectUpgrade, + Some(self.object_address), + )? + .payload; - if !self.override_size_check_option.override_size_check && size > MAX_PUBLISH_PACKAGE_SIZE { - return Err(CliError::UnexpectedError(format!( - "The package is larger than {} bytes ({} bytes)! To lower the size \ - you may want to include less artifacts via `--included-artifacts`. \ - You can also override this check with `--override-size-check", - MAX_PUBLISH_PACKAGE_SIZE, size - ))); - } - let result = self - .txn_options - .submit_transaction(payload) - .await - .map(TransactionSummary::from); + let size = bcs::serialized_size(&payload)?; + println!("package size {} bytes", size); + + if !self.override_size_check_option.override_size_check + && size > MAX_PUBLISH_PACKAGE_SIZE + { + return Err(CliError::PackageSizeExceeded( + size, + MAX_PUBLISH_PACKAGE_SIZE, + )); + } + self.txn_options + .submit_transaction(payload) + .await + .map(TransactionSummary::from) + }; if result.is_ok() { println!( - "Code was successfully upgraded at object address {}.", + "Code was successfully upgraded at object address {}", self.object_address ); } @@ -1159,6 +1318,8 @@ pub struct DeployObjectCode { #[clap(flatten)] pub(crate) override_size_check_option: OverrideSizeCheckOption, #[clap(flatten)] + pub(crate) chunked_publish_option: ChunkedPublishOption, + #[clap(flatten)] pub(crate) included_artifacts_args: IncludedArtifactsArgs, #[clap(flatten)] pub(crate) move_options: MovePackageDir, @@ -1174,7 +1335,23 @@ impl CliCommand for DeployObjectCode { async fn execute(mut self) -> CliTypedResult { let sender_address = self.txn_options.get_public_key_and_address()?.1; - let sequence_number = self.txn_options.sequence_number(sender_address).await? + 1; + let sequence_number = if self.chunked_publish_option.chunked_publish { + // Perform a preliminary build to determine the number of transactions needed for chunked publish mode. + // This involves building the package with mock account address `0xcafe` to calculate the transaction count. + let mock_object_address = AccountAddress::from_hex_literal("0xcafe").unwrap(); + self.move_options + .add_named_address(self.address_name.clone(), mock_object_address.to_string()); + let package = + build_package_options(&self.move_options, &self.included_artifacts_args).unwrap(); + let mock_payloads = + create_chunked_publish_payloads(package, PublishType::AccountDeploy, None)? + .payloads; + let staging_tx_count = (mock_payloads.len() - 1) as u64; + self.txn_options.sequence_number(sender_address).await? + staging_tx_count + 1 + } else { + self.txn_options.sequence_number(sender_address).await? + 1 + }; + let object_address = create_object_code_deployment_address(sender_address, sequence_number); self.move_options @@ -1188,23 +1365,55 @@ impl CliCommand for DeployObjectCode { ); prompt_yes_with_override(&message, self.txn_options.prompt_options)?; - let payload = aptos_cached_packages::aptos_stdlib::object_code_deployment_publish( - bcs::to_bytes(&package.extract_metadata()?) - .expect("Failed to serialize PackageMetadata"), - package.extract_code(), - ); + let result = if self.chunked_publish_option.chunked_publish { + let payloads = + create_chunked_publish_payloads(package, PublishType::ObjectDeploy, None)?.payloads; - submit_tx_and_check( - &self.txn_options, - payload, - &object_address.to_string(), - self.override_size_check_option.override_size_check, - "Code was successfully deployed to object address {}.", - ) - .await + let size = &payloads + .iter() + .map(bcs::serialized_size) + .sum::>()?; + println!("package size {} bytes", size); + let message = format!("Publishing package in chunked mode will submit {} transactions for staging and publishing code.\n", &payloads.len()); + println!("{}", message.bold()); + + submit_chunked_publish_transactions(payloads, &self.txn_options).await + } else { + let payload = create_package_publication_data( + package, + PublishType::ObjectDeploy, + Some(object_address), + )? + .payload; + + let size = bcs::serialized_size(&payload)?; + println!("package size {} bytes", size); + + if !self.override_size_check_option.override_size_check + && size > MAX_PUBLISH_PACKAGE_SIZE + { + return Err(CliError::PackageSizeExceeded( + size, + MAX_PUBLISH_PACKAGE_SIZE, + )); + } + self.txn_options + .submit_transaction(payload) + .await + .map(TransactionSummary::from) + }; + + if result.is_ok() { + println!( + "Code was successfully deployed to object address {}", + object_address + ); + } + result } } +/// Upgrades the modules in a Move package deployed under an object. #[derive(Parser)] pub struct UpgradeCodeObject { /// The named address for compiling and using in the contract @@ -1219,6 +1428,8 @@ pub struct UpgradeCodeObject { #[clap(flatten)] pub(crate) override_size_check_option: OverrideSizeCheckOption, #[clap(flatten)] + pub(crate) chunked_publish_option: ChunkedPublishOption, + #[clap(flatten)] pub(crate) included_artifacts_args: IncludedArtifactsArgs, #[clap(flatten)] pub(crate) move_options: MovePackageDir, @@ -1263,21 +1474,54 @@ impl CliCommand for UpgradeCodeObject { ); prompt_yes_with_override(&message, self.txn_options.prompt_options)?; - let payload = aptos_cached_packages::aptos_stdlib::object_code_deployment_upgrade( - bcs::to_bytes(&package.extract_metadata()?) - .expect("Failed to serialize PackageMetadata"), - package.extract_code(), - self.object_address, - ); + let result = if self.chunked_publish_option.chunked_publish { + let payloads = create_chunked_publish_payloads( + package, + PublishType::ObjectUpgrade, + Some(self.object_address), + )? + .payloads; + + let size = &payloads + .iter() + .map(bcs::serialized_size) + .sum::>()?; + println!("package size {} bytes", size); + let message = format!("Upgrading package in chunked mode will submit {} transactions for staging and upgrading code.\n", &payloads.len()); + println!("{}", message.bold()); + submit_chunked_publish_transactions(payloads, &self.txn_options).await + } else { + let payload = create_package_publication_data( + package, + PublishType::ObjectUpgrade, + Some(self.object_address), + )? + .payload; - submit_tx_and_check( - &self.txn_options, - payload, - &self.object_address.to_string(), - self.override_size_check_option.override_size_check, - "Code was successfully upgraded at object address {}.", - ) - .await + let size = bcs::serialized_size(&payload)?; + println!("package size {} bytes", size); + + if !self.override_size_check_option.override_size_check + && size > MAX_PUBLISH_PACKAGE_SIZE + { + return Err(CliError::PackageSizeExceeded( + size, + MAX_PUBLISH_PACKAGE_SIZE, + )); + } + self.txn_options + .submit_transaction(payload) + .await + .map(TransactionSummary::from) + }; + + if result.is_ok() { + println!( + "Code was successfully upgraded at object address {}", + self.object_address + ); + } + result } } @@ -1291,34 +1535,131 @@ fn build_package_options( BuiltPackage::build(move_options.get_package_path()?, options) } -async fn submit_tx_and_check( +async fn submit_chunked_publish_transactions( + payloads: Vec, txn_options: &TransactionOptions, - payload: TransactionPayload, - object_address: &str, - override_size_check: bool, - success_message: &str, ) -> CliTypedResult { - let size = bcs::serialized_size(&payload)?; - println!("package size {} bytes", size); + let mut publishing_result = Err(CliError::UnexpectedError( + "No payload provided for batch transaction run".to_string(), + )); + let payloads_length = payloads.len() as u64; + let mut tx_hashes = vec![]; + + let account_address = txn_options.profile_options.account_address()?; - if !override_size_check && size > MAX_PUBLISH_PACKAGE_SIZE { - return Err(CliError::UnexpectedError(format!( - "The package is larger than {} bytes ({} bytes)! To lower the size \ - you may want to include fewer artifacts via `--included-artifacts`. \ - You can also override this check with `--override-size-check", - MAX_PUBLISH_PACKAGE_SIZE, size - ))); + if !is_staging_area_empty(txn_options).await? { + let message = format!( + "The resource {}::large_packages::StagingArea under account {} is not empty.\ + \nThis may cause package publishing to fail if the data is unexpected. \ + \nUse the `aptos move clear-staging-area` command to clean up the `StagingArea` resource under the account.", + LARGE_PACKAGES_MODULE_ADDRESS, account_address, + ) + .bold(); + println!("{}", message); + prompt_yes_with_override("Do you want to proceed?", txn_options.prompt_options)?; } - let result = txn_options - .submit_transaction(payload) - .await - .map(TransactionSummary::from); + for (idx, payload) in payloads.into_iter().enumerate() { + println!("Transaction {} of {}", idx + 1, payloads_length); + let result = txn_options + .submit_transaction(payload) + .await + .map(TransactionSummary::from); + + match result { + Ok(tx_summary) => { + let tx_hash = tx_summary.transaction_hash.to_string(); + let status = tx_summary.success.map_or("".to_string(), |success| { + if success { + "Success".to_string() + } else { + "Failed".to_string() + } + }); + println!("Transaction executed: {} ({})\n", status, &tx_hash); + tx_hashes.push(tx_hash); + publishing_result = Ok(tx_summary); + }, + + Err(e) => { + println!("{}", "Caution: An error occurred while submitting chunked publish transactions. \ + \nDue to this error, there may be incomplete data left in the `StagingArea` resource. \ + \nThis could cause further errors if you attempt to run the chunked publish command again. \ + \nTo avoid this, use the `aptos move clear-staging-area` command to clean up the `StagingArea` resource under your account before retrying.".bold()); + return Err(e); + }, + } + } - if result.is_ok() { - println!("{} {}", success_message, object_address); + println!( + "{}", + "All Transactions Submitted Successfully.".bold().green() + ); + let tx_hash_formatted = format!( + "Submitted Transactions:\n[\n {}\n]", + tx_hashes + .iter() + .map(|tx| format!("\"{}\"", tx)) + .collect::>() + .join(",\n ") + ); + println!("\n{}\n", tx_hash_formatted); + publishing_result +} + +async fn is_staging_area_empty(txn_options: &TransactionOptions) -> CliTypedResult { + let url = txn_options.rest_options.url(&txn_options.profile_options)?; + let client = Client::new(url); + + let staging_area_response = client + .get_account_resource( + txn_options.profile_options.account_address()?, + &format!( + "{}::large_packages::StagingArea", + LARGE_PACKAGES_MODULE_ADDRESS + ), + ) + .await; + + match staging_area_response { + Ok(response) => match response.into_inner() { + Some(_) => Ok(false), // StagingArea is not empty + None => Ok(true), // TODO: determine which case this is + }, + Err(RestError::Api(aptos_error_response)) + if aptos_error_response.error.error_code == AptosErrorCode::ResourceNotFound => + { + Ok(true) // The resource doesn't exist + }, + Err(rest_err) => Err(CliError::from(rest_err)), + } +} + +/// Cleans up the `StagingArea` resource under an account, which is used for chunked publish operations +#[derive(Parser)] +pub struct ClearStagingArea { + #[clap(flatten)] + pub(crate) txn_options: TransactionOptions, +} + +#[async_trait] +impl CliCommand for ClearStagingArea { + fn command_name(&self) -> &'static str { + "ClearStagingArea" + } + + async fn execute(self) -> CliTypedResult { + println!( + "Cleaning up resource {}::large_packages::StagingArea under account {}.", + LARGE_PACKAGES_MODULE_ADDRESS, + self.txn_options.profile_options.account_address()? + ); + let payload = large_packages_cleanup_staging_area(); + self.txn_options + .submit_transaction(payload) + .await + .map(TransactionSummary::from) } - result } /// Publishes the modules in a Move package to the Aptos blockchain under a resource account @@ -1715,11 +2056,11 @@ impl CliCommand> for ViewFunction { #[derive(Parser)] pub struct RunScript { #[clap(flatten)] - pub(crate) txn_options: TransactionOptions, + pub txn_options: TransactionOptions, #[clap(flatten)] - pub(crate) compile_proposal_args: CompileScriptFunction, + pub compile_proposal_args: CompileScriptFunction, #[clap(flatten)] - pub(crate) script_function_args: ScriptFunctionArguments, + pub script_function_args: ScriptFunctionArguments, } #[async_trait] diff --git a/crates/aptos/src/node/local_testnet/faucet.rs b/crates/aptos/src/node/local_testnet/faucet.rs index 5a334c729fb..64f7f3befe9 100644 --- a/crates/aptos/src/node/local_testnet/faucet.rs +++ b/crates/aptos/src/node/local_testnet/faucet.rs @@ -40,8 +40,8 @@ pub struct FaucetArgs { #[derive(Clone, Debug)] pub struct FaucetManager { - config: RunConfig, - prerequisite_health_checkers: HashSet, + pub config: RunConfig, + pub prerequisite_health_checkers: HashSet, } impl FaucetManager { diff --git a/crates/aptos/src/node/local_testnet/hasura_metadata.json b/crates/aptos/src/node/local_testnet/hasura_metadata.json index 7a4906b33ec..d5f1a87a594 100644 --- a/crates/aptos/src/node/local_testnet/hasura_metadata.json +++ b/crates/aptos/src/node/local_testnet/hasura_metadata.json @@ -1,5 +1,5 @@ { - "resource_version": 11, + "resource_version": 38, "metadata": { "version": 3, "sources": [ @@ -1225,6 +1225,7 @@ "columns": [ "collection_id", "collection_name", + "collection_properties", "creator_address", "current_supply", "description", @@ -1349,6 +1350,59 @@ } ] }, + { + "table": { + "name": "current_fungible_asset_balances", + "schema": "public" + }, + "object_relationships": [ + { + "name": "metadata", + "using": { + "manual_configuration": { + "column_mapping": { + "asset_type": "asset_type" + }, + "insertion_order": null, + "remote_table": { + "name": "fungible_asset_metadata", + "schema": "public" + } + } + } + } + ], + "select_permissions": [ + { + "role": "anonymous", + "permission": { + "columns": [ + "amount", + "amount_v1", + "amount_v2", + "asset_type", + "asset_type_v1", + "asset_type_v2", + "is_frozen", + "is_primary", + "last_transaction_timestamp", + "last_transaction_timestamp_v1", + "last_transaction_timestamp_v2", + "last_transaction_version", + "last_transaction_version_v1", + "last_transaction_version_v2", + "owner_address", + "storage_id", + "token_standard" + ], + "filter": {}, + "limit": 100, + "allow_aggregations": true + }, + "comment": "" + } + ] + }, { "table": { "name": "current_objects", @@ -1485,6 +1539,21 @@ } } } + }, + { + "name": "current_royalty_v1", + "using": { + "manual_configuration": { + "column_mapping": { + "token_data_id": "token_data_id" + }, + "insertion_order": null, + "remote_table": { + "name": "current_token_royalty_v1", + "schema": "public" + } + } + } } ], "array_relationships": [ @@ -1708,50 +1777,23 @@ }, { "table": { - "name": "current_unified_fungible_asset_balances_to_be_renamed", + "name": "current_token_royalty_v1", "schema": "public" }, - "configuration": { - "column_config": {}, - "custom_column_names": {}, - "custom_name": "current_fungible_asset_balances", - "custom_root_fields": {} - }, - "object_relationships": [ - { - "name": "metadata", - "using": { - "manual_configuration": { - "column_mapping": { - "asset_type": "asset_type" - }, - "insertion_order": null, - "remote_table": { - "name": "fungible_asset_metadata", - "schema": "public" - } - } - } - } - ], "select_permissions": [ { "role": "anonymous", "permission": { "columns": [ - "amount", - "asset_type", - "is_frozen", - "is_primary", "last_transaction_timestamp", "last_transaction_version", - "owner_address", - "storage_id", - "token_standard" + "payee_address", + "royalty_points_denominator", + "royalty_points_numerator", + "token_data_id" ], "filter": {}, - "limit": 100, - "allow_aggregations": true + "limit": 100 }, "comment": "" } @@ -2308,6 +2350,10 @@ "from_env": "INDEXER_V2_POSTGRES_URL" }, "isolation_level": "read-committed", + "pool_settings": { + "connection_lifetime": 600, + "max_connections": 100 + }, "use_prepared_statements": false } } diff --git a/crates/aptos/src/node/local_testnet/mod.rs b/crates/aptos/src/node/local_testnet/mod.rs index cf1a3081370..752851dde43 100644 --- a/crates/aptos/src/node/local_testnet/mod.rs +++ b/crates/aptos/src/node/local_testnet/mod.rs @@ -2,17 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 mod docker; -mod faucet; -mod health_checker; mod indexer_api; mod logging; -mod node; mod postgres; mod processors; mod ready_server; -mod traits; mod utils; +// This is to allow external crates to use the localnode. +pub mod faucet; +pub mod health_checker; +pub mod node; +pub mod traits; + use self::{ faucet::FaucetArgs, health_checker::HealthChecker, @@ -190,13 +192,8 @@ impl CliCommand<()> for RunLocalnet { setup_logging(None); } - let global_config = GlobalConfig::load().context("Failed to load global config")?; - let test_dir = match &self.test_dir { - Some(test_dir) => test_dir.clone(), - None => global_config - .get_config_location(ConfigSearchMode::CurrentDirAndParents)? - .join(TESTNET_FOLDER), - }; + // Based on the input and global config, get the test directory. + let test_dir = get_derived_test_dir(&self.test_dir)?; // If asked, remove the current test directory and start with a new node. if self.force_restart && test_dir.exists() { @@ -462,3 +459,13 @@ async fn run_shutdown_steps(shutdown_steps: Vec>) -> Resul } Ok(()) } + +pub fn get_derived_test_dir(input_test_dir: &Option) -> Result { + let global_config = GlobalConfig::load().context("Failed to load global config")?; + match input_test_dir { + Some(test_dir) => Ok(test_dir.clone()), + None => Ok(global_config + .get_config_location(ConfigSearchMode::CurrentDirAndParents)? + .join(TESTNET_FOLDER)), + } +} diff --git a/crates/aptos/src/node/local_testnet/node.rs b/crates/aptos/src/node/local_testnet/node.rs index 62696da50e1..e51a63a73fe 100644 --- a/crates/aptos/src/node/local_testnet/node.rs +++ b/crates/aptos/src/node/local_testnet/node.rs @@ -90,6 +90,29 @@ pub struct NodeManager { no_node: bool, } +pub fn build_node_config( + rng: StdRng, + config_path: &Option, + test_config_override: &Option, + performance: bool, + test_dir: PathBuf, +) -> Result { + // If there is a config on disk, this function will use that. If not, it will + // create a new one, taking the config_path and test_config_override arguments + // into account. + load_node_config( + config_path, + test_config_override, + &test_dir, + false, + false, + performance, + aptos_cached_packages::head_release_bundle(), + rng, + ) + .context("Failed to load / create config for node") +} + impl NodeManager { pub fn new(args: &RunLocalnet, bind_to: Ipv4Addr, test_dir: PathBuf) -> Result { let rng = args @@ -98,35 +121,43 @@ impl NodeManager { .map(StdRng::from_seed) .unwrap_or_else(StdRng::from_entropy); - // If there is a config on disk, this function will use that. If not, it will - // create a new one, taking the config_path and test_config_override arguments - // into account. - let mut node_config = load_node_config( + let node_config = build_node_config( + rng, &args.node_args.config_path, &args.node_args.test_config_override, - &test_dir, - false, - false, args.node_args.performance, - aptos_cached_packages::head_release_bundle(), - rng, + test_dir.clone(), + )?; + Self::new_with_config( + node_config, + bind_to, + test_dir, + !args.node_args.no_txn_stream, + args.node_args.txn_stream_port, + args.node_args.no_node, ) - .context("Failed to load / create config for node")?; + } + pub fn new_with_config( + mut node_config: NodeConfig, + bind_to: Ipv4Addr, + test_dir: PathBuf, + run_txn_stream: bool, + txn_stream_port: u16, + no_node: bool, + ) -> Result { eprintln!(); // Enable the grpc stream on the node if we will run a txn stream service. - let run_txn_stream = !args.node_args.no_txn_stream; node_config.indexer_grpc.enabled = run_txn_stream; node_config.indexer_grpc.use_data_service_interface = run_txn_stream; - node_config - .indexer_grpc - .address - .set_port(args.node_args.txn_stream_port); + node_config.indexer_grpc.address.set_port(txn_stream_port); - // So long as the indexer relies on storage indexing tables, this must be set - // for the indexer GRPC stream on the node to work. - node_config.storage.enable_indexer = run_txn_stream; + node_config.indexer_table_info.table_info_service_mode = match run_txn_stream { + // Localnet should be responsible for backup or restore of table info tables. + true => aptos_config::config::TableInfoServiceMode::IndexingOnly, + false => aptos_config::config::TableInfoServiceMode::Disabled, + }; // Bind to the requested address. node_config.api.address.set_ip(IpAddr::V4(bind_to)); @@ -137,7 +168,7 @@ impl NodeManager { Ok(NodeManager { config: node_config, test_dir, - no_node: args.node_args.no_node, + no_node, }) } diff --git a/crates/aptos/src/node/local_testnet/processors.rs b/crates/aptos/src/node/local_testnet/processors.rs index efd9baf8c89..f9654f9c662 100644 --- a/crates/aptos/src/node/local_testnet/processors.rs +++ b/crates/aptos/src/node/local_testnet/processors.rs @@ -92,6 +92,18 @@ impl ProcessorManager { ProcessorName::ParquetFungibleAssetProcessor => { bail!("ParquetFungibleAssetProcessor is not supported in the localnet") }, + ProcessorName::ParquetTransactionMetadataProcessor => { + bail!("ParquetTransactionMetadataProcessor is not supported in the localnet") + }, + ProcessorName::ParquetAnsProcessor => { + bail!("ParquetAnsProcessor is not supported in the localnet") + }, + ProcessorName::ParquetEventsProcessor => { + bail!("ParquetEventsProcessor is not supported in the localnet") + }, + ProcessorName::ParquetTokenV2Processor => { + bail!("ParquetTokenV2Processor is not supported in the localnet") + }, ProcessorName::StakeProcessor => { ProcessorConfig::StakeProcessor(StakeProcessorConfig { query_retries: Default::default(), diff --git a/crates/aptos/src/test/mod.rs b/crates/aptos/src/test/mod.rs index 25873452c73..7f2b139885f 100644 --- a/crates/aptos/src/test/mod.rs +++ b/crates/aptos/src/test/mod.rs @@ -15,11 +15,12 @@ use crate::{ init::{InitTool, Network}, types::{ account_address_from_public_key, AccountAddressWrapper, ArgWithTypeVec, - AuthenticationKeyInputOptions, CliError, CliTypedResult, EncodingOptions, - EntryFunctionArguments, FaucetOptions, GasOptions, KeyType, MoveManifestAccountWrapper, - MovePackageDir, OptionalPoolAddressArgs, OverrideSizeCheckOption, PoolAddressArgs, - PrivateKeyInputOptions, PromptOptions, PublicKeyInputOptions, RestOptions, RngArgs, - SaveFile, ScriptFunctionArguments, TransactionOptions, TransactionSummary, TypeArgVec, + AuthenticationKeyInputOptions, ChunkedPublishOption, CliError, CliTypedResult, + EncodingOptions, EntryFunctionArguments, FaucetOptions, GasOptions, KeyType, + MoveManifestAccountWrapper, MovePackageDir, OptionalPoolAddressArgs, + OverrideSizeCheckOption, PoolAddressArgs, PrivateKeyInputOptions, PromptOptions, + PublicKeyInputOptions, RestOptions, RngArgs, SaveFile, ScriptFunctionArguments, + TransactionOptions, TransactionSummary, TypeArgVec, }, utils::write_to_file, }, @@ -892,6 +893,9 @@ impl CliTestFramework { included_artifacts_args: IncludedArtifactsArgs { included_artifacts: included_artifacts.unwrap_or(IncludedArtifacts::Sparse), }, + chunked_publish_option: ChunkedPublishOption { + chunked_publish: false, + }, } .execute() .await diff --git a/crates/indexer/src/models/coin_models/coin_activities.rs b/crates/indexer/src/models/coin_models/coin_activities.rs index 50a419a966c..cb51cb6d3d0 100644 --- a/crates/indexer/src/models/coin_models/coin_activities.rs +++ b/crates/indexer/src/models/coin_models/coin_activities.rs @@ -19,7 +19,7 @@ use aptos_api_types::{ Event as APIEvent, Transaction as APITransaction, TransactionInfo as APITransactionInfo, TransactionPayload, UserTransactionRequest, WriteSetChange as APIWriteSetChange, }; -use aptos_types::APTOS_COIN_TYPE; +use aptos_types::{AptosCoinType, CoinType as CoinTypeTrait}; use bigdecimal::BigDecimal; use field_count::FieldCount; use serde::{Deserialize, Serialize}; @@ -267,7 +267,7 @@ impl CoinActivity { event_creation_number: BURN_GAS_EVENT_CREATION_NUM, event_sequence_number: user_transaction_request.sequence_number.0 as i64, owner_address: standardize_address(&user_transaction_request.sender.to_string()), - coin_type: APTOS_COIN_TYPE.to_string(), + coin_type: AptosCoinType::type_tag().to_string(), amount: aptos_coin_burned, activity_type: GAS_FEE_EVENT.to_string(), is_gas_fee: true, diff --git a/crates/indexer/src/processors/coin_processor.rs b/crates/indexer/src/processors/coin_processor.rs index 9c65d422713..45bb1e34208 100644 --- a/crates/indexer/src/processors/coin_processor.rs +++ b/crates/indexer/src/processors/coin_processor.rs @@ -19,7 +19,7 @@ use crate::{ schema, }; use aptos_api_types::Transaction as APITransaction; -use aptos_types::APTOS_COIN_TYPE; +use aptos_types::{AptosCoinType, CoinType}; use async_trait::async_trait; use diesel::{pg::upsert::excluded, result::Error, ExpressionMethods, PgConnection}; use field_count::FieldCount; @@ -280,7 +280,8 @@ impl TransactionProcessor for CoinTransactionProcessor { // get aptos_coin info for supply tracking // TODO: This only needs to be fetched once. Need to persist somehow let maybe_aptos_coin_info = - &CoinInfoQuery::get_by_coin_type(APTOS_COIN_TYPE.to_string(), &mut conn).unwrap(); + &CoinInfoQuery::get_by_coin_type(AptosCoinType::type_tag().to_string(), &mut conn) + .unwrap(); let mut all_coin_activities = vec![]; let mut all_coin_balances = vec![]; diff --git a/crates/reliable-broadcast/src/lib.rs b/crates/reliable-broadcast/src/lib.rs index a46e806f9ac..7246f2b729a 100644 --- a/crates/reliable-broadcast/src/lib.rs +++ b/crates/reliable-broadcast/src/lib.rs @@ -210,13 +210,13 @@ where fn log_rpc_failure(error: anyhow::Error, receiver: Author) { // Log a sampled warning (to prevent spam) sample!( - SampleRate::Duration(Duration::from_secs(1)), - warn!(error = ?error, "rpc to {} failed, error {}", receiver, error) + SampleRate::Duration(Duration::from_secs(30)), + warn!("[sampled] rpc to {} failed, error {:#}", receiver, error) ); // Log at the debug level (this is useful for debugging // and won't spam the logs in a production environment). - debug!(error = ?error, "rpc to {} failed, error {}", receiver, error); + debug!("rpc to {} failed, error {:#}", receiver, error); } pub struct DropGuard { diff --git a/devtools/aptos-cargo-cli/src/lib.rs b/devtools/aptos-cargo-cli/src/lib.rs index 15688ea0ace..4dea19a6ec0 100644 --- a/devtools/aptos-cargo-cli/src/lib.rs +++ b/devtools/aptos-cargo-cli/src/lib.rs @@ -34,8 +34,12 @@ const RELEVANT_FILE_PATHS_FOR_EXECUTION_PERFORMANCE_TESTS: [&str; 5] = [ "execution/aptos-executor-benchmark", "testsuite/single_node_performance.py", ]; -const RELEVANT_FILE_PATHS_FOR_FRAMEWORK_UPGRADE_TESTS: [&str; 2] = - ["aptos-move/aptos-release-builder", "aptos-move/framework"]; +const RELEVANT_FILE_PATHS_FOR_FRAMEWORK_UPGRADE_TESTS: [&str; 4] = [ + ".github", + "testsuite", + "aptos-move/aptos-release-builder", + "aptos-move/framework", +]; // Relevant packages to monitor when deciding to run the targeted tests const RELEVANT_PACKAGES_FOR_COMPILER_V2: [&str; 2] = ["aptos-framework", "e2e-move-tests"]; diff --git a/docker/builder/docker-bake-rust-all.hcl b/docker/builder/docker-bake-rust-all.hcl index a5c01f2e616..63c658dc59f 100644 --- a/docker/builder/docker-bake-rust-all.hcl +++ b/docker/builder/docker-bake-rust-all.hcl @@ -69,7 +69,7 @@ target "debian-base" { dockerfile = "docker/builder/debian-base.Dockerfile" contexts = { # Run `docker buildx imagetools inspect debian:bullseye` to find the latest multi-platform hash - debian = "docker-image://debian:bullseye@sha256:0bb606aad3307370c8b4502eff11fde298e5b7721e59a0da3ce9b30cb92045ed" + debian = "docker-image://debian:bullseye@sha256:152b9a5dc2a03f18ddfd88fbe7b1df41bd2b16be9f2df573a373caf46ce78c08" } } diff --git a/ecosystem/indexer-grpc/indexer-grpc-table-info/src/internal_indexer_db_service.rs b/ecosystem/indexer-grpc/indexer-grpc-table-info/src/internal_indexer_db_service.rs index ca26fd0c6e4..2feaf9c3b57 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-table-info/src/internal_indexer_db_service.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-table-info/src/internal_indexer_db_service.rs @@ -9,26 +9,34 @@ use aptos_db_indexer::{ indexer_reader::IndexerReaders, }; use aptos_indexer_grpc_utils::counters::{log_grpc_step, IndexerGrpcStep}; +use aptos_logger::info; use aptos_storage_interface::DbReader; use aptos_types::{indexer::indexer_db_reader::IndexerReader, transaction::Version}; use std::{ path::{Path, PathBuf}, sync::Arc, + time::Duration, }; -use tokio::runtime::Handle; +use tokio::{runtime::Handle, sync::watch::Receiver as WatchReceiver}; const SERVICE_TYPE: &str = "internal_indexer_db_service"; const INTERNAL_INDEXER_DB: &str = "internal_indexer_db"; pub struct InternalIndexerDBService { pub db_indexer: Arc, + pub update_receiver: WatchReceiver, } impl InternalIndexerDBService { - pub fn new(db_reader: Arc, internal_indexer_db: InternalIndexerDB) -> Self { + pub fn new( + db_reader: Arc, + internal_indexer_db: InternalIndexerDB, + update_receiver: WatchReceiver, + ) -> Self { let internal_db_indexer = Arc::new(DBIndexer::new(internal_indexer_db, db_reader)); Self { db_indexer: internal_db_indexer, + update_receiver, } } @@ -140,9 +148,17 @@ impl InternalIndexerDBService { let next_version = self.db_indexer.process_a_batch(start_version)?; if next_version == start_version { - tokio::time::sleep(std::time::Duration::from_millis(100)).await; + if let Ok(recv_res) = + tokio::time::timeout(Duration::from_millis(100), self.update_receiver.changed()) + .await + { + if recv_res.is_err() { + info!("update sender is dropped"); + return Ok(()); + } + } continue; - } + }; log_grpc_step( SERVICE_TYPE, IndexerGrpcStep::InternalIndexerDBProcessed, @@ -166,7 +182,11 @@ pub struct MockInternalIndexerDBService { } impl MockInternalIndexerDBService { - pub fn new_for_test(db_reader: Arc, node_config: &NodeConfig) -> Self { + pub fn new_for_test( + db_reader: Arc, + node_config: &NodeConfig, + update_receiver: WatchReceiver, + ) -> Self { if !node_config .indexer_db_config .is_internal_indexer_db_enabled() @@ -179,7 +199,8 @@ impl MockInternalIndexerDBService { let db = InternalIndexerDBService::get_indexer_db(node_config).unwrap(); let handle = Handle::current(); - let mut internal_indexer_db_service = InternalIndexerDBService::new(db_reader, db); + let mut internal_indexer_db_service = + InternalIndexerDBService::new(db_reader, db, update_receiver); let db_indexer = internal_indexer_db_service.get_db_indexer(); let config_clone = node_config.to_owned(); handle.spawn(async move { diff --git a/ecosystem/indexer-grpc/indexer-grpc-table-info/src/runtime.rs b/ecosystem/indexer-grpc/indexer-grpc-table-info/src/runtime.rs index ff5c17d5d9b..cfd9dfc2ce5 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-table-info/src/runtime.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-table-info/src/runtime.rs @@ -14,9 +14,9 @@ use aptos_db_indexer::{ }; use aptos_mempool::MempoolClientSender; use aptos_storage_interface::DbReaderWriter; -use aptos_types::chain_id::ChainId; +use aptos_types::{chain_id::ChainId, transaction::Version}; use std::sync::Arc; -use tokio::runtime::Runtime; +use tokio::{runtime::Runtime, sync::watch::Receiver as WatchReceiver}; const INDEX_ASYNC_V2_DB_NAME: &str = "index_indexer_async_v2_db"; @@ -24,14 +24,18 @@ pub fn bootstrap_internal_indexer_db( config: &NodeConfig, db_rw: DbReaderWriter, internal_indexer_db: Option, + update_receiver: Option>, ) -> Option<(Runtime, Arc)> { if !config.indexer_db_config.is_internal_indexer_db_enabled() || internal_indexer_db.is_none() { return None; } let runtime = aptos_runtimes::spawn_named_runtime("index-db".to_string(), None); // Set up db config and open up the db initially to read metadata - let mut indexer_service = - InternalIndexerDBService::new(db_rw.reader, internal_indexer_db.unwrap()); + let mut indexer_service = InternalIndexerDBService::new( + db_rw.reader, + internal_indexer_db.unwrap(), + update_receiver.expect("Internal indexer db update receiver is missing"), + ); let db_indexer = indexer_service.get_db_indexer(); // Spawn task for db indexer let config_clone = config.to_owned(); diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/Cargo.toml b/ecosystem/indexer-grpc/indexer-transaction-generator/Cargo.toml new file mode 100644 index 00000000000..68fa26769da --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "aptos-indexer-transaction-generator" +description = "Indexer integration testing framework." +version = "1.0.0" + +# Workspace inherited keys +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +anyhow = { workspace = true } +# used for localnode. +aptos = { workspace = true } +aptos-config = { workspace = true } +aptos-faucet-core = { workspace = true } +aptos-indexer-grpc-utils = { workspace = true } +aptos-protos ={ workspace = true } +clap = { workspace = true } +futures = { workspace = true } +rand = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +tokio = { workspace = true } +toml = { workspace = true } +tonic = { workspace = true } +url = { workspace = true } + +[dev-dependencies] +itertools = { workspace = true } +tempfile = { workspace = true } +tokio-stream = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/README.md b/ecosystem/indexer-grpc/indexer-transaction-generator/README.md new file mode 100644 index 00000000000..f15ca877e7b --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/README.md @@ -0,0 +1,69 @@ +# Indexer Transaction Generator + +This tool is to generate transactions for testing purpose. + +## Usage + +Under root folder, i.e., `aptos-core`, run + +```bash +cargo run -p aptos-indexer-transaction-generator -- \ + --testing-folder ecosystem/indexer-grpc/indexer-transaction-generator/example_tests \ + --output-folder ecosystem/indexer-grpc/indexer-transaction-generator/example_tests +``` + +**You can also use absolute path, run(using binary as an example)** + +```bash +./aptos-indexer-transaction-generator \ + --testing-folder /your/aptos-core/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests \ + --output-folder /tmp/ttt +``` + +### Config overview + +* Your testing folder should contain: + * One file called `testing_accounts.yaml`, which contains testing accounts used. + ```yaml + accounts: + - private_key: "0x99978d48e7b2d50d0a7a3273db0929447ae59635e71118fa256af654c0ce56c9" + public_key: "0x39b4acc85e026dc056464a5ea00b98f858260eaad2b74dd30b86ae0d4d94ddf5" + account: a531b7fdd7917f73ca216d89a8d9ce0cf7e7cfb9086ca6f6cbf9521532748d16 + - ... + ``` + * One file called `imported_transactions.yaml`, which is used for importing transactions. + + ```yaml + testnet: + # Transaction Stream endpoint addresss. + transaction_stream_endpoint: https://grpc.testnet.aptoslabs.com:443 + # (Optional) The key to use with developers.aptoslabs.com + api_key: YOUR_KEY_HERE + # A map from versions to dump and their output names. + versions_to_import: + 123: testnet_v1.json + mainnet: + ... + ``` + * One folder called `move_fixtures`, which contains move scripts and configs. + * An example script transaction config looks like: + ```yaml + transactions: + - output_name: simple_user_script1 + script_path: simple_user_script + sender_address: __ACCOUNT_A__ + - output_name: simple_user_script2 + script_path: simple_user_script2 + sender_address: __ACCOUNT_A__ + ``` + + +You can check the example [here](example_tests). + + +### Account Management +Each sender_address specified in script transaction config is a place holder string; +the actual account address will be allocated by account manager. + +TODO: account manager handles address as script argument. + diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/README.md b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/README.md new file mode 100644 index 00000000000..eb25fd5788f --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/README.md @@ -0,0 +1 @@ +This folder demonstrates the testing folder structure. \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/imported_transactions.yaml b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/imported_transactions.yaml new file mode 100644 index 00000000000..5eb53e8a4c4 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/imported_transactions.yaml @@ -0,0 +1,3 @@ +# Empty configs to import. +{ +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/README.md b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/README.md new file mode 100644 index 00000000000..eac2ef54363 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/README.md @@ -0,0 +1 @@ +This folder hosts all the example move scripts. \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script.yaml b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script.yaml new file mode 100644 index 00000000000..07b7428a361 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script.yaml @@ -0,0 +1,8 @@ +transactions: + - output_name: simple_user_script1 + script_path: simple_user_script + sender_address: __ACCOUNT_A__ + - output_name: simple_user_script2 + script_path: simple_user_script2 + sender_address: __ACCOUNT_A__ + \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script/Move.toml b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script/Move.toml new file mode 100644 index 00000000000..4431bebd35a --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script/Move.toml @@ -0,0 +1,15 @@ +[package] +name = "test_case" +version = "1.0.0" +authors = [] + +[addresses] + +[dev-addresses] + +[dependencies.AptosFramework] +git = "https://github.com/aptos-labs/aptos-core.git" +rev = "mainnet" +subdir = "aptos-move/framework/aptos-framework" + +[dev-dependencies] diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script/script.mv b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script/script.mv new file mode 100644 index 00000000000..6e2db2756ea Binary files /dev/null and b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script/script.mv differ diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script/sources/main.move b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script/sources/main.move new file mode 100644 index 00000000000..68fa0f4d90f --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script/sources/main.move @@ -0,0 +1,4 @@ +script { + fun main(src: &signer) { + } +} diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script2.yaml b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script2.yaml new file mode 100644 index 00000000000..548e0a53f27 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script2.yaml @@ -0,0 +1,8 @@ +transactions: + - output_name: simple_user_script3 + script_path: simple_user_script + sender_address: __ACCOUNT_A__ + - output_name: simple_user_script4 + script_path: simple_user_script2 + sender_address: __ACCOUNT_B__ + \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script2/Move.toml b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script2/Move.toml new file mode 100644 index 00000000000..4431bebd35a --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script2/Move.toml @@ -0,0 +1,15 @@ +[package] +name = "test_case" +version = "1.0.0" +authors = [] + +[addresses] + +[dev-addresses] + +[dependencies.AptosFramework] +git = "https://github.com/aptos-labs/aptos-core.git" +rev = "mainnet" +subdir = "aptos-move/framework/aptos-framework" + +[dev-dependencies] diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script2/script.mv b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script2/script.mv new file mode 100644 index 00000000000..6e2db2756ea Binary files /dev/null and b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script2/script.mv differ diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script2/sources/main.move b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script2/sources/main.move new file mode 100644 index 00000000000..68fa0f4d90f --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/move_fixtures/simple_user_script2/sources/main.move @@ -0,0 +1,4 @@ +script { + fun main(src: &signer) { + } +} diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/testing_accounts.yaml b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/testing_accounts.yaml new file mode 100644 index 00000000000..cec489415cd --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/example_tests/testing_accounts.yaml @@ -0,0 +1,16 @@ +# This file serves the accounts to use during account generation. +# These are generated in localnet and safe to share. +accounts: + - private_key: "0x99978d48e7b2d50d0a7a3273db0929447ae59635e71118fa256af654c0ce56c9" + public_key: "0x39b4acc85e026dc056464a5ea00b98f858260eaad2b74dd30b86ae0d4d94ddf5" + account: a531b7fdd7917f73ca216d89a8d9ce0cf7e7cfb9086ca6f6cbf9521532748d16 + - private_key: "0xe77498ac20ca67e8f642a6521077c8d5cba54853e7bed1e2c33b67e5a7b6c76e" + public_key: "0xc92c8e7b4467e629ca8cd201a21564de39eea7cbe45b59bfd37f10b56e0a728c" + account: 501b015c58f2a1a62a330a6da80dfee723f528f719d25a4232751986f9a9f43f + - private_key: "0x76a641118ffb5ca5f9de4fe414a7d216d89616e74bec2c445324a2f0ab609ab6" + public_key: "0xef05bede15f422e16c0002e3cee8b4d4341518d99c4695352a1869b0779864fb" + account: 8f0de18409d6fca18c72fac4062fc0f9baa6404296fed93a3ad0250fb671f8b3 + - private_key: "0xf24423f014e6f2fdd1914d6961b49a2cfc9eac59b88a57457e4cd9424fc140c8" + public_key: "0xb298975d27dbff3020e5ee7fdbbad8a969d4f2a2d5286e097d1db9760d04dd31" + account: 765d8c8d4d5859f43a56e2756fbf5f3d2483dbaa14f3fb62872df820d6e64eff + \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/src/accont_manager.rs b/ecosystem/indexer-grpc/indexer-transaction-generator/src/accont_manager.rs new file mode 100644 index 00000000000..393573a3e51 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/src/accont_manager.rs @@ -0,0 +1,112 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::Context; +use serde::{Deserialize, Serialize}; +use std::path::Path; + +const FAUCET_URL: &str = "http://localhost:8081"; +const REST_URL: &str = "http://localhost:8080"; + +#[derive(Debug, Serialize, Deserialize)] +pub struct AccountManager { + pub accounts: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Account { + pub public_key: String, + pub private_key: String, + pub account: String, +} + +impl Account { + pub async fn save_as_profile_file(&self, profile_file_path: &Path) -> anyhow::Result<()> { + // TODO: refactor this to use serde to write the file. + let content = format!( + "---\nprofiles:\n default:\n public_key: {}\n private_key: {}\n account: {}\n rest_url: {REST_URL}\n faucet_url: {FAUCET_URL}", + self.public_key, self.private_key, self.account + ); + // create the folder. + let account_folder = profile_file_path.join(".aptos"); + tokio::fs::create_dir_all(account_folder.clone()) + .await + .context(format!( + "[Account] Failed to create account profile folder at path: {:?}", + profile_file_path + ))?; + tokio::fs::write(account_folder.join("config.yaml"), content) + .await + .context(format!( + "[Account] Failed to save account profile to path: {:?}", + profile_file_path + ))?; + Ok(()) + } + + pub async fn delete_profile_file(&self, profile_file_path: &Path) -> anyhow::Result<()> { + let account_folder = profile_file_path.join(".aptos"); + tokio::fs::remove_dir_all(account_folder) + .await + .context(format!( + "[Account] Failed to delete account profile folder at path: {:?}", + profile_file_path + ))?; + Ok(()) + } +} + +impl AccountManager { + pub async fn load(account_manager_file_path: &Path) -> anyhow::Result { + let file = tokio::fs::read_to_string(account_manager_file_path) + .await + .context(format!( + "[Account Manager] The account list file is not found or readable at path: {:?}", + account_manager_file_path + ))?; + let account_manager: AccountManager = serde_yaml::from_str(&file) + .context("[Account Manager] Failed to parse account list file")?; + Ok(account_manager) + } + + pub fn allocate_account(&mut self) -> anyhow::Result { + match self.accounts.pop() { + Some(account) => Ok(account), + None => { + anyhow::bail!("[Account Manager] No more account to allocate; please add more.") + }, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_load() { + // create a temp file to load. + let testing_folder_root_path = std::env::temp_dir().join("root_folder"); + let _ = tokio::fs::create_dir(&testing_folder_root_path).await; + // Example content of the file. + let content = r#"accounts: + - private_key: "0x99978d48e7b2d50d0a7a3273db0929447ae59635e71118fa256af654c0ce56c9" + public_key: "0x39b4acc85e026dc056464a5ea00b98f858260eaad2b74dd30b86ae0d4d94ddf5" + account: a531b7fdd7917f73ca216d89a8d9ce0cf7e7cfb9086ca6f6cbf9521532748d16 + - private_key: "0xe77498ac20ca67e8f642a6521077c8d5cba54853e7bed1e2c33b67e5a7b6c76e" + public_key: "0xc92c8e7b4467e629ca8cd201a21564de39eea7cbe45b59bfd37f10b56e0a728c" + account: 501b015c58f2a1a62a330a6da80dfee723f528f719d25a4232751986f9a9f43f + "#; + let account_manager_file_path = testing_folder_root_path + .clone() + .join("testing_accounts.yaml"); + tokio::fs::write(&account_manager_file_path, content) + .await + .unwrap(); + // create an account manager. + let account_manager = AccountManager::load(&account_manager_file_path).await; + assert!(account_manager.is_ok()); + let account_manager = account_manager.unwrap(); + assert_eq!(account_manager.accounts.len(), 2); + } +} diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/src/config.rs b/ecosystem/indexer-grpc/indexer-transaction-generator/src/config.rs new file mode 100644 index 00000000000..2164d1b729a --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/src/config.rs @@ -0,0 +1,305 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{accont_manager::AccountManager, managed_node::ManagedNode}; +use anyhow::Context; +use clap::Parser; +use serde::{Deserialize, Serialize}; +use std::{ + collections::{HashMap, HashSet}, + path::{Path, PathBuf}, +}; +use url::Url; + +const IMPORTED_TRANSACTIONS_FOLDER: &str = "imported_transactions"; +const SCRIPTED_TRANSACTIONS_FOLDER: &str = "scripted_transactions"; +const MOVE_SCRIPTS_FOLDER: &str = "move_fixtures"; +const IMPORTED_TRANSACTION_CONFIG_FILE: &str = "imported_transactions.yaml"; +const ACCOUNT_MANAGER_FILE_NAME: &str = "testing_accounts.yaml"; + +#[derive(Parser)] +pub struct IndexerCliArgs { + /// Path to the testing folder, which includes: + /// - The configuration file for importing transactions: `imported_transactions.yaml`. + /// - The folder containing the Move scripts to generate transactions: `move_fixtures`. + /// - The file containing the accounts for testing: `testing_accounts.yaml`. + #[clap(long)] + pub testing_folder: PathBuf, + + /// Path to the output folder where the generated transactions will be saved. + #[clap(long)] + pub output_folder: PathBuf, +} + +impl IndexerCliArgs { + pub async fn run(&self) -> anyhow::Result<()> { + let output_folder = convert_relative_path_to_absolute_path(&self.output_folder); + let testing_folder = convert_relative_path_to_absolute_path(&self.testing_folder); + + // Run the transaction importer. + let imported_transactions_output_folder = output_folder.join(IMPORTED_TRANSACTIONS_FOLDER); + let imported_transactions_config_path = + testing_folder.join(IMPORTED_TRANSACTION_CONFIG_FILE); + + // TODO: refactor this further to reduce the nesting. + // if the imported transactions config file exists, run the transaction importer. + if imported_transactions_config_path.exists() { + let imported_transactions_config_raw: String = + tokio::fs::read_to_string(&imported_transactions_config_path).await?; + let imported_transactions_config: TransactionImporterConfig = + serde_yaml::from_str(&imported_transactions_config_raw)?; + // Check if the output folder exists. + if !imported_transactions_output_folder.exists() { + tokio::fs::create_dir_all(&imported_transactions_output_folder).await?; + } + imported_transactions_config + .validate_and_run(&imported_transactions_output_folder) + .await + .context("Importing transactions failed.")?; + } + + // Run the script transaction generator. + let script_transactions_output_folder = output_folder.join(SCRIPTED_TRANSACTIONS_FOLDER); + let move_folder_path = testing_folder.join(MOVE_SCRIPTS_FOLDER); + // If the move fixtures folder does not exist, skip the script transaction generator. + if !move_folder_path.exists() { + return Ok(()); + } + if !script_transactions_output_folder.exists() { + tokio::fs::create_dir_all(&script_transactions_output_folder).await?; + } + // 1. Validate. + // Scan all yaml files in the move folder path. + let mut script_transactions_vec: Vec<(String, ScriptTransactions)> = vec![]; + let move_files = std::fs::read_dir(&move_folder_path)?; + for entry in move_files { + let entry = entry?; + // entry has to be a file. + if !entry.file_type()?.is_file() { + continue; + } + let path = entry.path(); + if path.extension().unwrap_or_default() == "yaml" { + let file_name = path.file_name().unwrap().to_str().unwrap(); + let script_transactions_raw: String = tokio::fs::read_to_string(&path).await?; + let script_transactions: ScriptTransactions = + serde_yaml::from_str(&script_transactions_raw)?; + script_transactions_vec.push((file_name.to_string(), script_transactions)); + } + } + // Validate the configuration. + let mut output_script_transactions_set = HashSet::new(); + for (file_name, script_transactions) in script_transactions_vec.iter() { + if script_transactions.transactions.is_empty() { + return Err(anyhow::anyhow!( + "[Script Transaction Generator] No transactions found in file `{}`", + file_name + )); + } + for script_transaction in script_transactions.transactions.iter() { + if let Some(output_name) = &script_transaction.output_name { + if !output_script_transactions_set.insert(output_name.clone()) { + return Err(anyhow::anyhow!( + "[Script Transaction Generator] Output file name `{}` is duplicated in file `{}`", + output_name.clone(), + file_name + )); + } + } + } + } + + // Run each config. + let account_manager_file_path = testing_folder.join(ACCOUNT_MANAGER_FILE_NAME); + let mut account_manager = AccountManager::load(&account_manager_file_path).await?; + let mut managed_node = ManagedNode::start(None, None).await?; + for (file_name, script_transactions) in script_transactions_vec { + script_transactions + .run( + &move_folder_path, + &script_transactions_output_folder, + &mut account_manager, + ) + .await + .context(format!( + "Failed to generate script transaction for file `{}`", + file_name + ))?; + } + // Stop the localnet. + managed_node.stop().await + } +} + +/// Configuration for importing transactions from multiple networks. +#[derive(Debug, Default, Serialize, Deserialize)] +pub struct TransactionImporterConfig { + // Config is a map from network name to the configuration for that network. + #[serde(flatten)] + pub configs: HashMap, +} + +impl TransactionImporterConfig { + fn validate(&self) -> anyhow::Result<()> { + // Validate the configuration. This is to make sure that no output file shares the same name. + let mut output_files = HashSet::new(); + for (_, network_config) in self.configs.iter() { + for output_file in network_config.versions_to_import.values() { + if !output_files.insert(output_file) { + return Err(anyhow::anyhow!( + "[Transaction Importer] Output file name {} is duplicated", + output_file + )); + } + } + } + Ok(()) + } + + pub async fn validate_and_run(&self, output_path: &Path) -> anyhow::Result<()> { + // Validate the configuration. + self.validate()?; + + // Run the transaction importer for each network. + for (network_name, network_config) in self.configs.iter() { + // Modify the output path by appending the network name to the base path + let modified_output_path = match network_name.as_str() { + "mainnet" => output_path.join("imported_mainnet_txns"), + "testnet" => output_path.join("imported_testnet_txns"), + _ => { + return Err(anyhow::anyhow!( + "[Transaction Importer] Unknown network: {}", + network_name + )); + }, + }; + + tokio::fs::create_dir_all(&modified_output_path) + .await + .context(format!( + "[Transaction Importer] Failed to create output directory for network: {}", + network_name + ))?; + + network_config + .run(modified_output_path.as_path()) + .await + .context(format!( + "[Transaction Importer] Failed for network: {}", + network_name + ))?; + } + Ok(()) + } +} + +/// Configuration for importing transactions from a network. +/// This includes the URL of the network, the API key, the version of the transaction to fetch, +#[derive(Debug, Serialize, Deserialize)] +pub struct TransactionImporterPerNetworkConfig { + /// The endpoint of the transaction stream. + pub transaction_stream_endpoint: Url, + /// The API key to use for the transaction stream if required. + pub api_key: Option, + /// The version of the transaction to fetch and their output file names. + pub versions_to_import: HashMap, +} + +/// Configuration for generating transactions from a script. +/// `ScriptTransactions` will generate a list of transactions and output if specified. +/// A managed-node will be used to execute the scripts in sequence. +#[derive(Debug, Serialize, Deserialize)] +pub struct ScriptTransactions { + pub transactions: Vec, +} + +/// A step that can optionally output one transaction. +#[derive(Debug, Serialize, Deserialize)] +pub struct ScriptTransaction { + pub script_path: PathBuf, + pub output_name: Option, + // Fund the address and execute the script with the account. + pub sender_address: String, +} + +/// Convert relative path to absolute path. +fn convert_relative_path_to_absolute_path(path: &Path) -> PathBuf { + if path.is_relative() { + let current_dir = std::env::current_dir().unwrap(); + current_dir.join(path) + } else { + path.to_path_buf() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_import_transactions_duplicate_output_name() { + let importing_transactions_config = r#" + { + "mainnet": { + "transaction_stream_endpoint": "http://mainnet.com", + "api_key": "mainnet_api_key", + "versions_to_import": { + 1: "mainnet_v1.json" + } + }, + "testnet": { + "transaction_stream_endpoint": "http://testnet.com", + "api_key": "testnet_api_key", + "versions_to_import": { + 1: "mainnet_v1.json" + } + } + } + "#; + let transaction_generator_config: TransactionImporterConfig = + serde_yaml::from_str(importing_transactions_config).unwrap(); + // create a temporary folder for the output. + let tempfile = tempfile::tempdir().unwrap(); + let result = transaction_generator_config + .validate_and_run(tempfile.path()) + .await; + assert!(result.is_err()); + } + + #[tokio::test] + async fn test_script_transactions_duplicate_output_name() { + // Create a temporary folder for the move scripts. + let tempfile = tempfile::tempdir().unwrap(); + let output_folder = tempfile.path().join("output"); + tokio::fs::create_dir(&output_folder).await.unwrap(); + let move_folder_path = tempfile.path().join("move_fixtures"); + tokio::fs::create_dir(&move_folder_path).await.unwrap(); + + let first_script_transactions = r#" + transactions: + - script_path: "simple_script_1" + output_name: "output.json" + "#; + let second_script_transactions = r#" + transactions: + - script_path: "simple_script_2" + output_name: "output.json" + "#; + let first_script_transactions_path = + move_folder_path.join("first_script_transactions.yaml"); + let second_script_transactions_path = + move_folder_path.join("second_script_transactions.yaml"); + tokio::fs::write(&first_script_transactions_path, first_script_transactions) + .await + .unwrap(); + tokio::fs::write(&second_script_transactions_path, second_script_transactions) + .await + .unwrap(); + let indexer_cli_args = IndexerCliArgs { + testing_folder: tempfile.path().to_path_buf(), + output_folder, + }; + let result = indexer_cli_args.run().await; + assert!(result.is_err()); + } +} diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/src/config_template.yaml b/ecosystem/indexer-grpc/indexer-transaction-generator/src/config_template.yaml new file mode 100644 index 00000000000..bb5780d4538 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/src/config_template.yaml @@ -0,0 +1,28 @@ +import_config: + testnet: + # Transaction Stream endpoint addresss. + transaction_stream_endpoint: https://grpc.testnet.aptoslabs.com:443 + # (Optional) The key to use with developers.aptoslabs.com + api_key: YOUR_KEY_HERE + # A map from versions to dump and their output names. + versions_to_import: + 123: testnet_v1.json + # mainnet: + # # Transaction Stream endpoint addresss. + # transaction_stream_endpoint: https://grpc.mainnet.aptoslabs.com:443 + # # (Optional) The key to use with developers.aptoslabs.com + # api_key: YOUR_KEY_HERE + # versions_to_import: + # 123: mainnetnet_v1.json + # devnet: + # # Transaction Stream endpoint addresss. + # transaction_stream_endpoint: https://grpc.devnet.aptoslabs.com:443 + # # (Optional) The key to use with developers.aptoslabs.com + # api_key: YOUR_KEY_HERE + # versions_to_import: + # 123: devnet_v1.json + # custom: + # # Transaction Stream endpoint addresss. + # transaction_stream_endpoint: YOUR_CUSTOM_ENDPOINT + # versions_to_import: + # 123: custom_v1.json \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/src/lib.rs b/ecosystem/indexer-grpc/indexer-transaction-generator/src/lib.rs new file mode 100644 index 00000000000..b30a5b2879f --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/src/lib.rs @@ -0,0 +1,8 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod accont_manager; +pub mod config; +pub mod managed_node; +pub mod script_transaction_generator; +pub mod transaction_importer; diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/src/main.rs b/ecosystem/indexer-grpc/indexer-transaction-generator/src/main.rs new file mode 100644 index 00000000000..ce68a99c9ac --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/src/main.rs @@ -0,0 +1,13 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::Result; +use aptos_indexer_transaction_generator::config::IndexerCliArgs; +use clap::Parser; + +#[tokio::main] +async fn main() -> Result<()> { + // Parse the command line arguments. + let args = IndexerCliArgs::parse(); + args.run().await +} diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/src/managed_node.rs b/ecosystem/indexer-grpc/indexer-transaction-generator/src/managed_node.rs new file mode 100644 index 00000000000..7361f402d75 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/src/managed_node.rs @@ -0,0 +1,129 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::Context; +use aptos::node::local_testnet::{ + faucet::FaucetManager, + get_derived_test_dir, + health_checker::HealthChecker, + node::{build_node_config, NodeManager}, + traits::ServiceManager, +}; +use aptos_config::config::DEFAULT_GRPC_STREAM_PORT; +use aptos_faucet_core::server::{FunderKeyEnum, RunConfig}; +use rand::{rngs::StdRng, SeedableRng}; +use std::{collections::HashSet, net::Ipv4Addr, path::PathBuf}; +use tokio::{ + fs::{create_dir_all, remove_dir_all}, + task::JoinSet, +}; + +const DEFAULT_SEED: [u8; 32] = [123; 32]; +use url::Url; +/// Use a subfolder to store the indexer testing data, this is to avoid conflicts with localnet testing. +const INDEXER_TESTING_FOLDER: &str = "indexer-testing"; +const FAUCET_DEFAULT_PORT: u16 = 8081; + +/// ManagedNode is a managed node that can execute Move scripts and modules. +#[derive(Debug)] +pub struct ManagedNode { + pub transaction_stream_url: Url, + + pub node: JoinSet>, +} + +impl ManagedNode { + pub async fn start( + node_config_path: Option, + node_data_dir: Option, + ) -> anyhow::Result { + let node_dir = get_derived_test_dir(&node_data_dir)?.join(INDEXER_TESTING_FOLDER); + // By default, we don't reuse the testnet folder. + if node_dir.exists() { + remove_dir_all(node_dir.as_path()).await.context(format!( + "Failed to remove testnet folder at {:?}", + &node_dir + ))?; + } + create_dir_all(node_dir.as_path()).await.context(format!( + "Failed to create testnet folder at {:?}", + &node_dir + ))?; + let rng = StdRng::from_seed(DEFAULT_SEED); + let node = build_node_config(rng, &node_config_path, &None, false, node_dir.clone()) + .context("Failed to build node config")?; + + let node_manager = NodeManager::new_with_config( + node, + Ipv4Addr::LOCALHOST, + node_dir.clone(), + true, + DEFAULT_GRPC_STREAM_PORT, + false, + ) + .context("Failed to start node service manager")?; + + let node_health_checkers = node_manager.get_health_checkers(); + let faucet_manager = create_faucet_manager( + node_health_checkers.clone(), + FAUCET_DEFAULT_PORT, + node_dir.clone(), + node_manager.get_node_api_url(), + ) + .context("Failed to build faucet service manager")?; + let faucet_health_checkers = faucet_manager.get_health_checkers(); + + let managers: Vec> = + vec![Box::new(node_manager), Box::new(faucet_manager)]; + let mut join_set = JoinSet::new(); + for manager in managers { + join_set.spawn(manager.run()); + } + + let wait_for_startup_futures = faucet_health_checkers + .iter() + .map(|checker| checker.wait(None)); + for f in futures::future::join_all(wait_for_startup_futures).await { + f.context("Faucet service did not start up successfully")?; + } + + let transaction_stream_url = Url::parse("http://localhost:50051").unwrap(); + + println!("\nTransaction generator is ready to execute.\n"); + Ok(Self { + node: join_set, + transaction_stream_url, + }) + } + + /// Stops the node and the faucet. + pub async fn stop(&mut self) -> anyhow::Result<()> { + println!("Stopping node service task..."); + self.node.abort_all(); + // The tasks spawned are cancelled; so the errors here(Err::Cancelled) are expected and ignored. + while self.node.join_next().await.is_some() { + println!("Node service task stopped."); + } + println!("===================="); + Ok(()) + } +} + +fn create_faucet_manager( + prerequisite_health_checkers: HashSet, + faucet_port: u16, + test_dir: PathBuf, + node_api_url: Url, +) -> anyhow::Result { + Ok(FaucetManager { + config: RunConfig::build_for_cli( + node_api_url.clone(), + Ipv4Addr::LOCALHOST.to_string(), + faucet_port, + FunderKeyEnum::KeyFile(test_dir.join("mint.key")), + true, + None, + ), + prerequisite_health_checkers, + }) +} diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/src/script_transaction_generator.rs b/ecosystem/indexer-grpc/indexer-transaction-generator/src/script_transaction_generator.rs new file mode 100644 index 00000000000..79d404f65ea --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/src/script_transaction_generator.rs @@ -0,0 +1,251 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + accont_manager::{Account, AccountManager}, + config::{ScriptTransaction, ScriptTransactions}, +}; +use anyhow::Context; +use aptos::{ + account::fund::FundWithFaucet, + common::types::{CliCommand, MovePackageDir, ScriptFunctionArguments, TransactionOptions}, + governance::CompileScriptFunction, + move_tool::{CompileScript, RunScript}, +}; +use aptos_protos::{ + indexer::v1::{raw_data_client::RawDataClient, GetTransactionsRequest}, + transaction::v1::Transaction, +}; +use std::{ + collections::HashMap, + path::{Path, PathBuf}, +}; + +/// GRPC request metadata key for the token ID. +const LOCAL_INDEXER_GRPC_URL: &str = "http://127.0.0.1:50051"; +const TRANSACTION_STREAM_TIMEOUT_IN_SECS: u64 = 60; +const DEFAULT_FUND_AMOUNT_IN_OCTA: u64 = 100_000_000; + +impl ScriptTransactions { + /// Run the script transaction generator using a localnode. + pub async fn run( + &self, + move_folder_path: &Path, + output_path: &Path, + account_manager: &mut AccountManager, + ) -> anyhow::Result<()> { + // Get all accounts that'll be used in this run. + // TODO: improve this to support account address as argument. + let mut account_symbols: HashMap = HashMap::new(); + for transaction in &self.transactions { + account_symbols.insert( + transaction.sender_address.clone(), + account_manager.allocate_account()?, + ); + } + let mut versions_to_capture = vec![]; + for transaction in &self.transactions { + let sender_account = account_symbols + .get(transaction.sender_address.as_str()) + .unwrap(); + let version = self + .execute_script_transaction(move_folder_path, transaction, sender_account) + .await?; + if let Some(output_name) = &transaction.output_name { + versions_to_capture.push((version, output_name.clone())); + } + } + self.capture_transaction(output_path, versions_to_capture) + .await + } + + /// Prepare the script transaction run, including: + /// - Validate the script. + /// - Fund the account. + async fn prepare_script_transaction( + &self, + move_folder_path: &Path, + transaction: &ScriptTransaction, + sender_account: &Account, + ) -> anyhow::Result<()> { + let script_path = move_folder_path.join(&transaction.script_path); + std::env::set_current_dir(&script_path).context(format!( + "Failed to set the current directory to the script folder: {:?}", + script_path + ))?; + // Create temporary profile for the account. + sender_account + .save_as_profile_file(&script_path) + .await + .context("Failed to save the account profile.")?; + let fund_cmd = create_fund_cmd(DEFAULT_FUND_AMOUNT_IN_OCTA, sender_account); + let _ = fund_cmd.execute().await.context(format!( + "Failed to fund the account for account: {}.", + sender_account.account + ))?; + Ok(()) + } + + /// Execute a script transaction run and return the version of the transaction: + /// - Compile the script. + /// - Run the script. + async fn execute_script_transaction( + &self, + move_folder_path: &Path, + transaction: &ScriptTransaction, + sender_account: &Account, + ) -> anyhow::Result { + let script_path = move_folder_path.join(&transaction.script_path); + self.prepare_script_transaction(move_folder_path, transaction, sender_account) + .await?; + // Compile the setup script. + let script_current_dir = std::env::current_dir().unwrap(); + let cmd = create_compile_script_cmd(script_current_dir.clone()); + let _ = cmd.execute().await.context(format!( + "Failed to compile the script: {:?}", + &script_current_dir + ))?; + + // Read the content of the TOML file. This is to get the package name. + let content = std::fs::read_to_string(script_current_dir.join("Move.toml")) + .expect("Failed to read TOML file"); + let value = content + .parse::() + .expect("Failed to parse TOML"); + let package_name = value + .get("package") + .context("Malformed Move.toml file: No package.")? + .get("name") + .context("Malformed Move.toml file: No package name.")? + .as_str() + .context("Malformed package name.")?; + + // Run the compiled script. + let compiled_build_path = script_current_dir + .join("build") + .join(package_name) + .join("bytecode_scripts") + .join("main.mv"); + + let cmd = create_run_script_cmd(compiled_build_path); + let transaction_summary = cmd.execute().await.context(format!( + "Failed to run the script: {:?}", + &script_current_dir + ))?; + sender_account + .delete_profile_file(&script_path) + .await + .context("Failed to delete the account profile.")?; + + if let Some(true) = transaction_summary.success { + Ok(transaction_summary.version.unwrap()) + } else { + anyhow::bail!("Failed to execute the script: {:?}", &script_current_dir); + } + } + + /// Capture the transactions based on input versions and write them to the output files. + async fn capture_transaction( + &self, + output_path: &Path, + versions_to_capture: Vec<(u64, String)>, + ) -> anyhow::Result<()> { + if versions_to_capture.is_empty() { + anyhow::bail!("No transaction versions provided to capture."); + } + println!( + "Capturing transactions at versions: {:?}", + versions_to_capture + ); + // Build the request. + let first_version = versions_to_capture.first().unwrap().0; + let last_version = versions_to_capture.last().unwrap().0; + let transactions_count = last_version + 1 - first_version; + let request = tonic::Request::new(aptos_protos::indexer::v1::GetTransactionsRequest { + starting_version: Some(first_version), + transactions_count: Some(transactions_count), + ..GetTransactionsRequest::default() + }); + + // Create a client and send the request. + let mut client = RawDataClient::connect(LOCAL_INDEXER_GRPC_URL).await?; + let response = client.get_transactions(request).await?; + let mut response = response.into_inner(); + let mut transactions: Vec = Vec::new(); + + tokio::time::timeout( + std::time::Duration::from_secs(TRANSACTION_STREAM_TIMEOUT_IN_SECS), + async { + while let Ok(Some(resp_item)) = response.message().await { + for transaction in resp_item.transactions { + transactions.push(transaction); + } + } + }, + ) + .await + .context("Transaction stream timeout.")?; + // Create version to transaction hash map. + let transaction_versions_with_names: HashMap = + versions_to_capture.into_iter().collect(); + + // Write the transactions to the output files. + for txn in transactions { + let version = txn.version; + // This is not the transaction we want to capture. + if !transaction_versions_with_names.contains_key(&version) { + continue; + } + let output_name = transaction_versions_with_names.get(&version).unwrap(); + let json_string = serde_json::to_string_pretty(&txn).context(format!( + "[Script Transaction Generator] Transaction at version {} failed to serialized to json string.", + version + ))?; + let output_path = output_path.join(output_name).with_extension("json"); + tokio::fs::write(&output_path, json_string) + .await + .context(format!( + "[Script Transaction Generator] Failed to write transaction at version {} to file.", + version + ))?; + + // Output the transaction to the console. + println!( + "Transaction {} is captured, path\n\t {:?}", + output_name, output_path + ); + } + Ok(()) + } +} + +fn create_compile_script_cmd(package_dir: PathBuf) -> CompileScript { + let mut move_package_dir = MovePackageDir::default(); + move_package_dir.package_dir = Some(package_dir); + + CompileScript { + output_file: None, + move_options: move_package_dir, + } +} + +fn create_run_script_cmd(script_path: PathBuf) -> RunScript { + let mut transaction_options = TransactionOptions::default(); + transaction_options.prompt_options.assume_yes = true; + transaction_options.prompt_options.assume_no = false; + RunScript { + txn_options: transaction_options, + compile_proposal_args: CompileScriptFunction { + compiled_script_path: Some(script_path), + ..CompileScriptFunction::default() + }, + script_function_args: ScriptFunctionArguments::default(), + } +} + +fn create_fund_cmd(amount: u64, account: &Account) -> FundWithFaucet { + let mut fund_with_faucet = FundWithFaucet::default(); + fund_with_faucet.amount = amount; + fund_with_faucet.account = Some(account.account.clone().parse().unwrap()); + fund_with_faucet +} diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/src/transaction_importer.rs b/ecosystem/indexer-grpc/indexer-transaction-generator/src/transaction_importer.rs new file mode 100644 index 00000000000..3ac92ee95c2 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/src/transaction_importer.rs @@ -0,0 +1,146 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::config::TransactionImporterPerNetworkConfig; +use anyhow::Context; +use aptos_indexer_grpc_utils::create_data_service_grpc_client; +use aptos_protos::indexer::v1::GetTransactionsRequest; +use std::{path::Path, time::Duration}; + +/// GRPC request metadata key for the token ID. +const GRPC_API_GATEWAY_API_KEY_HEADER: &str = "authorization"; +const GRPC_REQUEST_NAME_HEADER: &str = "x-request-name"; +const GRPC_REQUEST_NAME_VALUE: &str = "testing-framework"; +const TRANSACTION_STREAM_TIMEOUT_IN_SECS: u64 = 60; + +impl TransactionImporterPerNetworkConfig { + pub async fn run(&self, output_path: &Path) -> anyhow::Result<()> { + let mut client = create_data_service_grpc_client( + self.transaction_stream_endpoint.clone(), + Some(Duration::from_secs(TRANSACTION_STREAM_TIMEOUT_IN_SECS)), + ) + .await?; + + for (version, output_file) in &self.versions_to_import { + let mut request = tonic::Request::new(GetTransactionsRequest { + starting_version: Some(*version), + transactions_count: Some(1), + ..GetTransactionsRequest::default() + }); + request.metadata_mut().insert( + GRPC_REQUEST_NAME_HEADER, + GRPC_REQUEST_NAME_VALUE.parse().unwrap(), + ); + if let Some(api_key) = &self.api_key { + request.metadata_mut().insert( + GRPC_API_GATEWAY_API_KEY_HEADER, + format!("Bearer {}", api_key.clone()).parse().unwrap(), + ); + } + let mut stream = client.get_transactions(request).await?.into_inner(); + while let Some(resp) = stream.message().await.context(format!( + "[Transaction Importer] Stream ended unexpected for endpoint {:?}", + self.transaction_stream_endpoint + ))? { + let transaction = resp.transactions.first().context(format!( + "[Transaction Importer] Transaction at version {} is not in response.", + version + ))?; + let json_string = serde_json::to_string_pretty(transaction).context( + format!("[Transaction Importer] Transaction at version {} failed to serialized to json string.", version))?; + let output_path = output_path.join(output_file).with_extension("json"); + // TODO: add a diffing process here. + tokio::fs::write(output_path, json_string) + .await + .context(format!( + "[Transaction Importer] Failed to write transaction at version {} to file.", + version + ))?; + } + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use crate::config::TransactionImporterPerNetworkConfig; + use aptos_protos::{ + indexer::v1::{ + raw_data_server::{RawData, RawDataServer}, + GetTransactionsRequest, TransactionsResponse, + }, + transaction::v1::Transaction, + }; + use futures::Stream; + use std::pin::Pin; + use tonic::{Request, Response, Status}; + + type ResponseStream = Pin> + Send>>; + + #[derive(Debug, Default)] + pub struct DummyServer { + pub transactions: Vec, + } + + #[tonic::async_trait] + impl RawData for DummyServer { + type GetTransactionsStream = ResponseStream; + + async fn get_transactions( + &self, + req: Request, + ) -> Result, Status> { + let version = req.into_inner().starting_version.unwrap(); + let transaction = self + .transactions + .iter() + .find(|t| t.transactions.first().unwrap().version == version) + .unwrap(); + let stream = futures::stream::iter(vec![Ok(transaction.clone())]); + Ok(Response::new(Box::pin(stream))) + } + } + + #[tokio::test] + async fn test_run() { + // Create a dummy transaction server. + let transaction = Transaction { + version: 1, + ..Transaction::default() + }; + let transactions = vec![TransactionsResponse { + transactions: vec![transaction], + ..TransactionsResponse::default() + }]; + let server = DummyServer { transactions }; + tokio::spawn(async move { + tonic::transport::Server::builder() + .add_service(RawDataServer::new(server)) + .serve("127.0.0.1:51254".parse().unwrap()) + .await + .unwrap(); + }); + // Note: do not sleep here; client connection will be retried. + + // create temp dir + let temp_dir = tempfile::tempdir().unwrap(); + + let config_json = r#" + transaction_stream_endpoint: "http://localhost:51254" + versions_to_import: + 1: "testing_transaction" + "#; + + let config = + serde_yaml::from_str::(config_json).unwrap(); + config.run(temp_dir.path()).await.unwrap(); + + // Validate the output. + let output_path = temp_dir.path().join("testing_transaction.json"); + let output = tokio::fs::read_to_string(output_path).await.unwrap(); + let transaction = serde_json::from_str::(&output).unwrap(); + assert_eq!(transaction.version, 1); + } +} diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/template_config.yaml b/ecosystem/indexer-grpc/indexer-transaction-generator/template_config.yaml new file mode 100644 index 00000000000..a0193e83604 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/template_config.yaml @@ -0,0 +1,25 @@ +# Config to import transactions onchain. +mainnet: + # Transaction Stream endpoint addresss. + transaction_stream_endpoint: https://grpc.mainnet.aptoslabs.com:443 + # (Optional) The key to use with developers.aptoslabs.com + api_key: YOUR_KEY_HERE + # A map from versions to dump and their output names. + versions_to_import: { + # example: + # 123: "example_transaction" + } +# testnet: +# transaction_stream_endpoint: https://grpc.testnet.aptoslabs.com:443 +# api_key: YOUR_KEY_HERE +# versions_to_import: { +# } +# devnet: +# transaction_stream_endpoint: https://grpc.devnet.aptoslabs.com:443 +# api_key: YOUR_KEY_HERE +# versions_to_import: { +# } +# custom: +# transaction_stream_endpoint: https://CUSTOM_ENDPOINT:443 +# versions_to_import: { +# } \ No newline at end of file diff --git a/ecosystem/typescript/sdk/.eslintignore b/ecosystem/typescript/sdk/.eslintignore deleted file mode 100644 index aa46a3a980c..00000000000 --- a/ecosystem/typescript/sdk/.eslintignore +++ /dev/null @@ -1,3 +0,0 @@ -node_modules -dist/** -**/*.test.ts diff --git a/ecosystem/typescript/sdk/.eslintrc.js b/ecosystem/typescript/sdk/.eslintrc.js deleted file mode 100644 index 86745e95fa5..00000000000 --- a/ecosystem/typescript/sdk/.eslintrc.js +++ /dev/null @@ -1,36 +0,0 @@ -module.exports = { - env: { - browser: true, - es2021: true, - node: true, - }, - ignorePatterns: ["*.js", "examples/*", "src/indexer/generated/**", "scripts/publish_ans_contract.ts"], - extends: ["airbnb-base", "airbnb-typescript/base", "prettier"], - parser: "@typescript-eslint/parser", - parserOptions: { - tsconfigRootDir: __dirname, - project: ["tsconfig.json"], - ecmaVersion: "latest", - sourceType: "module", - }, - plugins: ["@typescript-eslint"], - rules: { - quotes: ["error", "double"], - "max-len": ["error", 120], - "import/extensions": ["error", "never"], - "max-classes-per-file": ["error", 10], - "import/prefer-default-export": "off", - "object-curly-newline": "off", - "no-use-before-define": "off", - "no-unused-vars": "off", - "@typescript-eslint/no-use-before-define": ["error", { functions: false, classes: false }], - "@typescript-eslint/no-unused-vars": ["error"], - }, - settings: { - "import/resolver": { - node: { - extensions: [".js", ".jsx", ".ts", ".tsx"], - }, - }, - }, -}; diff --git a/ecosystem/typescript/sdk/.gitignore b/ecosystem/typescript/sdk/.gitignore deleted file mode 100644 index a40daefe756..00000000000 --- a/ecosystem/typescript/sdk/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -.env -.DS_Store -*/**/.DS_Store -npm-debug.log -.npm/ -/coverage -/tmp -node_modules -.idea/ -.history/ -.vscode/ -dist/ -.nyc_output/ -build/ - -# Doc generation output -docs/ - -# Other build systems that are not used -yarn.lock -yarn-error.log -package-lock.json diff --git a/ecosystem/typescript/sdk/.npmignore b/ecosystem/typescript/sdk/.npmignore deleted file mode 100644 index 74dad3b85ec..00000000000 --- a/ecosystem/typescript/sdk/.npmignore +++ /dev/null @@ -1,5 +0,0 @@ -coverage -node_modules -.aptos -.env -examples/ diff --git a/ecosystem/typescript/sdk/.nvmrc b/ecosystem/typescript/sdk/.nvmrc deleted file mode 100644 index bf79505bb85..00000000000 --- a/ecosystem/typescript/sdk/.nvmrc +++ /dev/null @@ -1 +0,0 @@ -v16.14.0 diff --git a/ecosystem/typescript/sdk/.prettierignore b/ecosystem/typescript/sdk/.prettierignore deleted file mode 100644 index 9543bddf671..00000000000 --- a/ecosystem/typescript/sdk/.prettierignore +++ /dev/null @@ -1,2 +0,0 @@ -src/generated/* -src/indexer/generated/** diff --git a/ecosystem/typescript/sdk/.versionrc.json b/ecosystem/typescript/sdk/.versionrc.json deleted file mode 100644 index 94df5f41131..00000000000 --- a/ecosystem/typescript/sdk/.versionrc.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "types": [ - { "type": "feat", "section": "Features" }, - { "type": "fix", "section": "Bug Fixes" }, - { "type": "chore", "hidden": true }, - { "type": "docs", "hidden": true }, - { "type": "style", "hidden": true }, - { "type": "refactor", "hidden": true }, - { "type": "perf", "hidden": true }, - { "type": "test", "hidden": true } - ], - "skip": { - "bump": true, - "commit": true, - "tag": true - }, - "path": ".", - "header": "# Changelog\n\nAll notable changes to this project will be documented in this file.\n\nNote: This changelog is generated automatically.\n\n" -} diff --git a/ecosystem/typescript/sdk/CHANGELOG.md b/ecosystem/typescript/sdk/CHANGELOG.md deleted file mode 100644 index 543353b4b0d..00000000000 --- a/ecosystem/typescript/sdk/CHANGELOG.md +++ /dev/null @@ -1,325 +0,0 @@ -# Aptos TS SDK Changelog - -All notable changes to the Aptos Node SDK will be captured in this file. This changelog is written by hand for now. It adheres to the format set out by [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - -## Unreleased - -## 1.21.0 (2023-12-13) -- Fix nested type tag parsing in `Object` types -- Update all dependencies including `@aptos-labs/aptos-client` - -## 1.20.0 (2023-09-22) - -- Add current objects queries support - `getAccountOwnedObjects` -- Add `burnObject` transaction support in `AptosToken` - -## 1.19.0 (2023-08-24) - -- Add fungible asset queries support - `getAccountCoinsData`, `getAccountCoinsDataCount` - -## 1.18.0 (2023-08-10) - -- Fix default behavior for coin client to transfer and create account by default -- Filter amount > 0 on `getTokenOwnersData` -- Include missing fields for all Indexer queries - -## 1.17.0 (2023-08-04) - -- Add support for a fee payer transaction -- Return transaction message error when transaction has failed when `checkSuccess` is set to true - -## 1.16.0 (2023-08-02) - -- Export all remaining types and functions in TS SDK -- Add improvements for `IndexerClient` - - Export indexer types - `export * from "/indexer/generated/types"` - - Support for token v2 activities - `getTokenActivities`, response structure has changed - - Aggregate query suports token v2 activities - `getTokenActivitiesCount` - - Support for sorting indexer queries - `orderBy` optional argument in `extraArgs` arguments - - Support for get owned tokens by token address or token data id - `getOwnedTokensByTokenData` -- Add support for local/custom networks without an indexer client -- Move to use `account_transactions` query in `getAccountTransactionsData` on `IndexerClient` -- Move to use `account_transaction_aggregate` query in `getAccountTransactionsCount` on `IndexerClient` -- Optional `startVersion` argument on `getUserTransactions` is not positional and part of the object param - -## 1.15.0 (2023-07-28) - -- Implementing `TransactionWorker` - a layer for managing and submitting as many transactions from a single account at once -- Memoize account modules by adding `memoize` decorator to `getAccountModules` - -## 1.14.0 (2023-07-20) - -- Introduce and use `@aptos-labs/aptos-client` package to manage and handle the client used in the SDK - -## 1.13.3 (2023-07-19) - -- Add support for `null` or `undefined` as absense of an Option -- Fix `Option` for input arguments in entry functions - -## 1.13.2 (2023-07-12) - -- Add support for `Option` and `vector>` in the SDK - -## 1.13.1 (2023-07-06) - -- Fixed serialization of arguments like `u16`, `u32` and `u256` -- Support `contentType` in request so custom headers would not override required headers -- Set `WITH_CREDENTIALS` to false on Indexer requests - -## 1.13.0 (2023-07-05) - -- Use client that is not generated by openAPI - -## 1.12.0 (2023-06-30) - -- Add token standard v2 support to `getTokenOwnersData` query - - `propertyVersion` parameter is optional to support fetching token v2 data -- Introduce `getTokenCurrentOwnerData` to fetch the current owner of a token -- Add `mintAptosSubdomain` and `setSubdomainAddress` functions to `AnsClient` - -## 1.11.0 (2023-06-22) - -- Export `bcsSerializeU256` from `bcs/helper.ts` -- Add `examples` folder to `.npmignore` -- Use `0x1::aptos_account::transfer` in tests -- Support transfer a fungible token. - - Add a `transfer` function to the `AptosToken` class that accepts `NonFungibleTokenParameters` or `FungibleTokenParameters` types. -- `getTokenData` query supports token standard v2. Return fields have changed. - -## 1.10.0 (2023-06-07) - -- Add `x-aptos-client` header to `IndexerClient` requests -- Add `standardizeAddress` static function to `AccountAddress` class to standardizes an address to the format "0x" followed by 64 lowercase hexadecimal digits. -- Change `indexerUrl` param on `Provider` class to an optional parameter -- Add `getCollectionsWithOwnedTokens` query to fetch all collections that an account has tokens for -- Support `tokenStandard` param in `getOwnedTokens` and `getTokenOwnedFromCollectionAddress` queries -- Add `FungibleAssetClient` plugin to support fungible assets -- Support fungible assets in `CoinClient` class operations - -## 1.9.1 (2023-05-24) - -- Add `x-aptos-client` header to `AptosClient` requests - -## 1.9.0 (2023-05-17) - -- Fix get number of delegators Indexer query -- Include static methods from `AptosClient` and `IndexerClient` classes in the `Provider` class -- Add Indexer queries for tokens - `getOwnedTokens`, `getTokenOwnedFromCollectionAddress`, `getTokenOwnedFromCollectionNameAndCreatorAddress`, `getCollectionData`, `getCollectionAddress` - -## 1.8.5 (2023-04-29) - -- Add local tests for `AnsClient` -- Add `AptosToken` plugin to support tokenv2 -- Add generic support to input params in move entry functions -- Add signature verification method to AptosAccount. - -## 1.8.4 (2023-04-13) - -- Move `TypeTagParser` from `builder_utils.ts` to `type_tag.ts` -- Update `StructTag.fromString()` to use and relies on TypeTagParser - -## 1.8.3 (2023-04-10) - -- Add `publish-ans-contract` script and pnpm command for tests -- Revert User-Agent header from both `AptosClient` and `IndexerClient` due to a browser error - -## 1.8.2 (2023-04-06) - -- Introduce `AnsClient` class to support ANS (Aptos Names Service) data fetching queries -- Add `User-Agent` header to `AptosClient` and `IndexerClient` queries -- Add Indexer queries to `IndexerClient` - `getAccountCoinsData`, `getAccountTokensCount`, `getAccountTransactionsCount`, `getAccountTransactionsData`, `getCurrentDelegatorBalancesCount`, `getDelegatedStakingActivities`, `getTokensActivitiesCount`, `getTokenData`, `getTokenOwnersData`, `getTopUserTransactions`, `getUserTransactions` -- Add convertion layer to `IndexerClient` queries to handle missing `0x` -- Add validation layer to `IndexerClient` to validate queried account address is in the long format, i.e 66 chars long (0x<64 chars>) -- Change `queryIndexer` function in `IndexerClient` class visibility to public -- Add mint Aptos Name function `mintAptosName()` to `AnsClient` class - -## 1.7.2 (2023-03-13) - -- `CoinClient` and `TokenClient` to use remote ABI instead of local ABIs -- Reorganize SDK files structure for a better readability and maintainability -- Add `getIndexerLedgerInfo` query to `IndexerClient` - -## 1.7.1 (2023-03-02) - -- Fix IndexerClient error parsing using JSON.stringify() to display the error message correctly on the console - -## 1.7.0 (2023-03-01) - -- Add Indexer support. We introduce a new class `IndexerClient` that queries our Indexer to support data shaping fetching and providing users with a seamless experience. -- Introduces a `Provider` class we can initialize and query our blockchain by hiding the underlying implementation (fullnode vs indexer) - -## 1.6.0 (2023-01-20) - -- Add support to Move view functions - -## 1.5.0 (2023-01-05) - -- Export classes from property_map_serde -- User can specify token string property type using "string", "String" or "0x1::string::String" to serde the string token property on-chain -- Use `getAccountResource` to replace `getAccountResources` in `CoinClient#checkBalance`, which can reduce network load. - -## 1.4.0 (2022-11-30) - -- Add missing fields to TokenData class -- Add PropertyMap and PropertyValue type to match on-chain data -- Support token property map deseralizer to read the property map in the original data format. -- Allow `checkBalance` in `CoinClient` to take in a `MaybeHexString` as well as `AptosAccount`, since users might want to check the balance of accounts they don't own (which is generally how you use `AptosAccount`). -- Similar to `checkBalance`, allow `transfer` in `CoinClient` to take in a `MaybeHexString` for the `receiver` argument. -- Add a new `createReceiverIfMissing` argument to `transfer` in `CoinClient`. If set, the `0x1::aptos_account::transfer` function will be called instead of `0x1::coin::transfer`, which will create the account on chain if it doesn't exist instead of failing. - -## 1.3.17 (2022-11-08) - -- Support computing resource account address based off a source address and a seed -- Exported ABI types -- `getAccountModules` and `getAccountResources` now use pagination under the hood. This addresses the issue raised here: https://github.com/aptos-labs/aptos-core/issues/5298. The changes are non-breaking, if you use these functions with an older node that hasn't updated to include the relevant support in its API service, it will still work as it did before. -- To support the above, the generated client has been updated to attach the headers to the response object, as per the changes here: https://github.com/aptos-labs/openapi-typescript-codegen/compare/v0.23.0...aptos-labs:openapi-typescript-codegen:0.24.0?expand=1. Consider this an implementation detail, not a supported part of the SDK interface. -- Add functions to token client support - - direct transfer with opt-in - - burn token by owner - - burn token by creator - - mutate token properties -- Add property map serializer to serialize input to BCS encode - -## 1.3.16 (2022-10-12) - -- Add `estimatePrioritizedGasUnitPrice` to the simulation interface. If set to true, the estimated gas unit price is higher than the original estimate. Therefore, transactions have a higher chance to be executed during congestion period. -- `esitmateGasPrice` now returns `deprioritized_gas_estimate` and `prioritized_gas_estimate` along with `gas_estimate`. `deprioritized_gas_estimate` is a conservative price estimate. Users might end up paying less gas eventually, but the transaction execution is deprioritized by the block chain. On the other hand, `prioritized_gas_estimate` is a higher price esitmate. Transactions need to be executed sooner could use `prioritized_gas_estimate`. - -## 1.3.15 (2022-09-30) - -- **[Breaking Changes]** Following the deprecation notice in the release notes of 1.3.13, the following breaking changes have landed in this release. Please see the notes from last release for information on the new endpoints you must migrate to: - - The `getEventsByEventKey` function has been removed. - - The `key` field in the `Event` struct has been removed. -- Turn on `strict` in tsconfig - -## 1.3.14 (2022-09-20) - -- Enable SDK axios client to carry cookies for both the browser and node environments. -- Added new functions `getBlockByHeight` and `getBlockByVersion`. - -## 1.3.13 (2022-09-15) - -- Increase the default wait time for `waitForTransactionWithResult` to 20s. -- A new function called `getEventsByCreationNumber` has been added, corresponding to the new endpoint on the API. For more information on this change, see the [API changelog](https://github.com/aptos-labs/aptos-core/blob/main/api/doc/CHANGELOG.md) for API version 1.1.0. -- **[Deprecated]** The `getEventsByEventKey` function is now deprecated. In the next release it will be removed entirely. You must migrate to the new function, `getEventsByCreationNumber`, by then. -- Included in the `Event` struct (which is what the events endpoints return) is a new field called `guid`. This is a more easily interpretable representation of an event identifier than the `key` field. See the [API changelog](https://github.com/aptos-labs/aptos-core/blob/main/api/doc/CHANGELOG.md) for an example of the new field. -- **[Deprecated]** The `key` field in the `Event` struct is now deprecated. In the next release it will be removed entirely. You must migrate to using the `guid` field by then. -- Removed NPM dependencies ed25519-hd-key and typescript-memoize. -- Added IIFE bundle that can be served from CDN. No NPM is required to use the SDK in browser environment. - -## 1.3.12 (2022-09-08) - -- Feature to rotate auth key for single signature account - -## 1.3.11 (2022-08-31) - -- Upgraded typescript version from 4.7.4 to 4.8.2, as well as linter package versions. -- **[Breaking Change]** ModuleBundle transaction support is removed. Instead, SDK users should use `AptosClient.publishPackage` to publish Move packages. -- Expose detailed API errors. -- Accept stringified values as transaction payload parameters. - -## 1.3.10 (2022-08-26) - -- Fix the bug in `waitForTransactionWithResult`. When API returns `404`, the function should continue waiting rather than returning early. The reason is that the txn might not be committed promptly. `waitForTransactionWithResult` should either timeout or get an error in such case. - -## 1.3.9 (2022-08-25) - -- **[Breaking Change]** Reimplemented the JSON transaction submission interfaces with BCS. This is a breaking change. `createSigningMessage` is removed. Before the changes, the transaction payloads take string aruguments. But now, Typescript payload arguments have to match the smart contract arugment types. e.g. `number` matches `u8`, `number | bigint` matches `u64` and `u128`, etc. -- **[Breaking Change]** `getTokenBalance` and `getTokenBalanceForAccount` have been renamed to `getToken` and `getTokenForAccount`, since they were never getting just the balance, but the full token. -- Added `CoinClient` to help working with coins. This contains common operations such as `transfer`, `checkBalance`, etc. -- Added `generateSignSubmitWaitForTransaction`, a function that provides a simple way to execute the full end to end transaction submission flow. You may also leverage `generateSignSubmit`, a helper that does the same but without waiting, instead returning teh transaction hash. -- Added `fromDerivePath` to `AptosAccount`. You can use this to create an `AptosAccount` (which is a local representation of an account) using a bip44 path and mnemonics. - -## 1.3.7 (2022-08-17) - -- Add a transaction builder that is able to serialize transaction arguments with remote ABIs. Remote ABIs are fetchable through REST APIs. With the remote ABI transaction builder, developers can build BCS transactions by only providing the native JS values. -- Make all functions that accept `BigInt` parameters accept `BigInt | number` instead. - -## 1.3.6 (2022-08-10) - -- Switch back to representing certain move types (MoveModuleId, MoveStructTag, ScriptFunctionId) as strings, for both requests and responses. This reverts the change made in 1.3.2. See [#2663](https://github.com/aptos-labs/aptos-core/pull/2663) for more. -- Represent certain fields with slightly different snake casing, e.g. `ed25519_signature` now instead of `ed_25519_signature`. -- Add generated types for healthcheck endpoint. -- If the given URL is missing `/v1`, the `AptosClient` constructor will add it for you. You can opt out of this behavior by setting `doNotFixNodeUrl` to true when calling the constructor. - -## 1.3.5 (2022-08-08) - -- Re-expose BCS and items from `transaction_builder/builder` from the root of the module. - -## 1.3.4 (2022-08-07) - -- Downscaled default value for `max_gas`. - -## 1.3.3 (2022-08-05) - -- Update the token clients to submit transactions through BCS interface. The new token client doesn't hex-code "name", "decription" and "uri" any more. String properties are passed and saved just as strings. -- Expose `buildTransactionPayload` from ABI transaction builder. In some scenarios, developers just want to get a TransactionPayload rather than a RawTransaction. - -## 1.3.2 (2022-08-04) - -This special entry does not conform to the format set out by [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) as there are noteworthy breaking changes with necessary rationale. Future entries will follow this format. - -This release updates the SDK to work with V1 of the Aptos Node API. There are some key changes between V0 and V1 that you can read about in the [API changelog](https://github.com/aptos-labs/aptos-core/blob/main/api/doc/v1/CHANGELOG.md), refer to the notes for version 1.0.0. Accordingly, this SDK version represents breaking changes compared to 1.2.1. - -- The SDK now communicates by default with the `/v1` path of the API. It will not work correctly with the v0 API. If you provide a path yourself when instantiating a client, make sure you include `/v1`, e.g. http://fullnode.devnet.aptoslabs.com/v1. -- As of this release, the API, API spec, client generated from that spec, SDK wrapper, and examples are all tested together in CI. Previously it was possible for these to be out of sync, or in some cases, they would test against a different deployment entirely, such as devnet. Now we make the guarantee that all these pieces from the same commit work together. Notably this means exactly that; there is no guarantee that the latest version of the SDK will work with a particular Aptos network, such as devnet, except for a network built from the same commit as the SDK. -- The generated client within the SDK is generated using a different tool, [openapi-typescript-codegen](https://www.npmjs.com/package/openapi-typescript-codegen). Most of these changes are transparent to the user, as we continue to wrap the generated client, but some of the generated types are different, which we mention here. -- Token types are no longer exposed from the generated client (under `Types`) as they are no longer part of the API (indeed, they never truly were). Instead you can find these definitions exposed at `TokenTypes`. -- Some functions, such as for getting account resources and events, no longer accept resource types as concatenated strings. For example: - -```tsx -# Before: -const aptosCoin = "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>"; -# After -const aptosCoin = const aptosCoin = { - address: "0x1", - module: "coin", - name: "CoinStore", - generic_type_params: ["0x1::aptos_coin::AptosCoin"], -}; -``` - -- Similarly, some endpoints no longer return this data as a string, but in a structured format, e.g. `MoveStructTag`. Remember to use something like `lodash.isEqual` to do equality checks with these structs. -- To help work with these different formats, functions for converting between them have been added to `utils`. -- A new function, `waitForTransactionWithResult`, has been added to help wait for a transaction and then get access to the response from the server once the function exits. - -For help with migration, we recommend you see the updated examples under `examples/`, they demonstrate how to deal with some of these changes, such as the more structured responses. We are also available to assist in the [Aptos Discord](https://discord.gg/aptosnetwork). - -**Deprecation Notice**: On September 1st we will remove the v0 API from the running nodes. As a user of the TS SDK, the best way you can migrate prior to this is by upgrading to version 1.3.2 or higher of the SDK. We will repeatedly remind developers of this upcoming deprecation as we approach that date. - -## 1.3.1 (2022-08-04) - -See release notes for 1.3.2. - -## 1.3.0 (2022-08-03) - -See release notes for 1.3.2. - -## 1.2.1 (2022-07-23) - -**Note:** This entry and earlier do not conform to the format set out by [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - -### Features - -- Deprecate getTokenBalance api in SDK ([2ec554e](https://github.com/aptos-labs/aptos-core/commit/2ec554e6e40a81cee4e760f6f84ef7362c570240)) -- Memoize chain id in aptos client ([#1589](https://github.com/aptos-labs/aptos-core/issues/1589)) ([4a6453b](https://github.com/aptos-labs/aptos-core/commit/4a6453bf0e620247557854053b661446bff807a7)) -- **Multiagent:** Support multiagent transaction submission ([#1543](https://github.com/aptos-labs/aptos-core/issues/1543)) ([0f0c70e](https://github.com/aptos-labs/aptos-core/commit/0f0c70e8ed2fefa952f0c89b7edb78edc174cb49)) -- Support retrieving token balance for any account ([7f93c21](https://github.com/aptos-labs/aptos-core/commit/7f93c2100f8b8e848461a0b5a395bfb76ade8667)) - -### Bug Fixes - -- Get rid of "natual" calls ([#1678](https://github.com/aptos-labs/aptos-core/issues/1678)) ([54601f7](https://github.com/aptos-labs/aptos-core/commit/54601f79206ea0f8b8b1b0d6599d31832fc4d195)) - -## 1.2.0 (2022-06-28) - -### Features - -- Vector tests for transaction signing ([6210c10](https://github.com/aptos-labs/aptos-core/commit/6210c10d3192fd0417b35709545fae850099e4d4)) -- Add royalty support for NFT tokens ([93a2cd0](https://github.com/aptos-labs/aptos-core/commit/93a2cd0bfd644725ac524f419e94077e0b16343b)) -- Add transaction builder examples ([a710a50](https://github.com/aptos-labs/aptos-core/commit/a710a50e8177258d9c0766762b3c2959fc231259)) -- Support transaction simulation ([93073bf](https://github.com/aptos-labs/aptos-core/commit/93073bf1b508d00cfa1f8bb441ed57085fd08a82)) - -### Bug Fixes - -- Fix a typo, natual now becomes natural ([1b7d295](https://github.com/aptos-labs/aptos-core/commit/1b7d2957b79a5d2821ada0c5096cf43c412e0c2d)), closes [#1526](https://github.com/aptos-labs/aptos-core/issues/1526) -- Fix Javascript example ([5781fee](https://github.com/aptos-labs/aptos-core/commit/5781fee74b8f2b065e7f04c2f76952026860751d)), closes [#1405](https://github.com/aptos-labs/aptos-core/issues/1405) diff --git a/ecosystem/typescript/sdk/CONTRIBUTING.md b/ecosystem/typescript/sdk/CONTRIBUTING.md deleted file mode 100644 index 61c7e798bd9..00000000000 --- a/ecosystem/typescript/sdk/CONTRIBUTING.md +++ /dev/null @@ -1,57 +0,0 @@ -# Contribution Guidelines for Typescript SDK - -- Coding Styles - - File names must use Snake case. For example, `aptos_account.ts` . - - Class names must use Pascal case. For example, `class AuthenticationKey` . - - Function and method names must use Camel case. For example, `derivedAddress(): HexString` . - - Constants must use all caps (upper case) words separated by `_`. For example, `MAX_U8_NUMBER` . -- Comments - - Comments are required for new classes and functions. - - Comments should follow the TSDoc standard, [https://tsdoc.org/](https://tsdoc.org/). -- Lints and Formats - - ESlint (eslint) and Prettier (prettier) should be used for code checking and code formatting. Make sure to run `pnpm lint` and `pnpm fmt` after making changes to the code. -- Tests - - Unit tests are required for any non-trivial changes you make. - - The Jest testing framework is used in the repo and we recommend you use it. See Jest: [https://jestjs.io/](https://jestjs.io/). - - Make sure to run `pnpm test` after making changes. -- Commits - - Commit messages follow the [Angular convention](https://www.conventionalcommits.org/en/v1.0.0-beta.4/#summary). - -## Creating a pull request - -You are welcome to create a pull request against the main branch. - -Before creating a PR, - -- Make sure your branch is up to date with the `main` branch. -- On the root folder, run `pnpm test`. -- On the root folder, run `pnpm fmt`. -- On the root folder, run `pnpm lint`. - -If everything passes, you should be able to create a PR. - -#### Changelog - -This project keeps a changelog. If a pull request created needs to bump a package version, please follow those steps to create a changelog - -1. Bump the version in `package.json` according to [semver](https://semver.org/). -2. Bump the version in `version.ts`. -3. Add the change description in the CHANGELOG under the "Unreleased" section. - -## Release process - -To release a new version of the SDK do the following. - -1. Check that the commit you're deploying from (likely just the latest commit of `main`) is green in CI. Go to GitHub and make sure there is a green tick, specifically for the `sdk-release` release CI step. This ensures that the all tests, formatters, and linters passed, including server / client compatibility tests (within that commit) and tests to ensure the API, API spec, and client were all generated and match up. -2. Bump the version in `package.json` according to [semver](https://semver.org/). -3. Bump the version in `version.ts`. -4. Add an entry in the CHANGELOG for the version. We adhere to [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). Generally this means changing the "Unreleased" section to a version and then making a new "Unreleased" section. -5. Once you're confident everything is correct, submit your PR. The CI will ensure that you have followed all the previous steps, specifically ensuring that the API, API spec, and SDK client are all compatible, that you've updated the changelog, that the tests pass, etc. -6. Land the PR into the main branch. Make sure this commit comes up green in CI too. -7. Check out the latest commit on main. -8. Get the auth token from our password manager. Search for "npmjs". It should look like similar to this: `npm_cccaCVg0bWaaR741D5Gdsd12T4JpQre444aaaa`. -9. Run `pnpm publish --dry-run`. From here, make some sanity checks: - a. Look closely at the output of the command. {ay close attention to what is packaged. Make sure we're not including some files that were included accidentally. For example `.aptos`. Add those to .npmignore if needed. - b. Compare the summary with the public npm package summary on npmjs. The number of files and sizes should not vary too much. -10. Run `NODE_AUTH_TOKEN= pnpm checked-publish`. -11. Double check that the release worked by visitng npmjs: https://www.npmjs.com/package/aptos. diff --git a/ecosystem/typescript/sdk/LICENSE b/ecosystem/typescript/sdk/LICENSE deleted file mode 100644 index c61b66391a3..00000000000 --- a/ecosystem/typescript/sdk/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/ecosystem/typescript/sdk/README.md b/ecosystem/typescript/sdk/README.md index 1d0319e2bbd..79556a11ed0 100644 --- a/ecosystem/typescript/sdk/README.md +++ b/ecosystem/typescript/sdk/README.md @@ -1,90 +1,3 @@ -# SDK for Aptos Node API +# TypeScript SDK -> **_NOTE:_** -> This is the `legacy TypeScript SDK`, aka the npm package `aptos`. For a more robust SDK and better support, we recommend upgrading to the `new TypeScript SDK` [@aptos-labs/ts-sdk](https://github.com/aptos-labs/aptos-ts-sdk). Take a look at the [documentation](https://aptos.dev/sdks/new-ts-sdk/) and the [migration guide](https://aptos.dev/sdks/new-ts-sdk/migration-guide). - -[![Discord][discord-image]][discord-url] -[![NPM Package Version][npm-image-version]][npm-url] -[![NPM Package Downloads][npm-image-downloads]][npm-url] - -The Aptos TypeScript SDK provides a convenient way to interact with the Aptos blockchain using TypeScript. It offers a set of utility functions, classes, and types to simplify the integration process and enhance developer productivity. - -## Installation - -##### For use in Node.js or a web application - -```ts -pnpm install aptos -``` - -You can also use your preferred npm client, such as yarn or npm. - -##### For use in a browser - -```ts -