diff --git a/.config/nextest.toml b/.config/nextest.toml index f090f0c9bd9..f8b1b63abcf 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -5,6 +5,14 @@ fail-fast = true status-level = "pass" +# --- Platform-specific overrides --- + +# Skip Windows-incompatible tests +[[profile.default.overrides]] +platform = 'cfg(target_os = "windows")' + +filter = "not test(=trusted_chain_sync_handles_forks_correctly) and not test(=delete_old_databases)" + # --- All Tests profile --- # CI-friendly test selection. # @@ -21,6 +29,7 @@ status-level = "pass" # below for running them when needed. # TODO: We need a better test architecture to run all non-stateful [profile.all-tests] +failure-output = "immediate" default-filter = "not test(check_no_git_dependencies) and not test(=fully_synced_rpc_z_getsubtreesbyindex_snapshot_test) and not test(=lwd_rpc_test) and not test(=lwd_rpc_send_tx) and not test(=lwd_grpc_wallet) and not test(=lwd_integration) and not test(=lwd_sync_full) and not test(=lwd_sync_update) and not test(=lightwalletd_test_suite) and not test(=rpc_get_block_template) and not test(=rpc_submit_block) and not test(=get_peer_info) and not test(~generate_checkpoints_) and not test(=sync_update_mainnet) and not test(=activate_mempool_mainnet)" # --- Individual Test Profiles --- diff --git a/.github/PULL_REQUEST_TEMPLATE/hotfix-release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/hotfix-release-checklist.md index ab730cac1b4..9f4149fe690 100644 --- a/.github/PULL_REQUEST_TEMPLATE/hotfix-release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/hotfix-release-checklist.md @@ -65,7 +65,7 @@ cargo release commit --verbose --execute --allow-branch '*' - [ ] Wait until the Docker binaries have been built on the hotfix release branch, and the quick tests have passed: - [ ] [ci-tests.yml](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-tests.yml) -- [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/cd-deploy-nodes-gcp.yml?query=event%3Arelease) +- [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/zfnd-deploy-nodes-gcp.yml?query=event%3Arelease) ## Publish Release diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index a99cd9d308a..efe0c920dbd 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -166,7 +166,7 @@ The end of support height is calculated from the current blockchain height: - [ ] Wait until the Docker binaries have been built on `main`, and the quick tests have passed: - [ ] [ci-tests.yml](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-tests.yml?query=branch%3Amain) -- [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/cd-deploy-nodes-gcp.yml?query=event%3Arelease) +- [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/zfnd-deploy-nodes-gcp.yml?query=event%3Arelease) ## Publish Release diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 70ebc94f6df..b89e9d945a0 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -39,5 +39,7 @@ - [ ] The PR name is suitable for the release notes. +- [ ] The PR follows the [contribution guidelines](https://github.com/ZcashFoundation/zebra/blob/main/CONTRIBUTING.md). +- [ ] The library crate changelogs are up to date. - [ ] The solution is tested. - [ ] The documentation is up to date. diff --git a/.github/workflows/README.md b/.github/workflows/README.md index ed9666a1d6f..4bf01956091 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -19,85 +19,58 @@ Zebra's CI/CD system is built on GitHub Actions, providing a unified platform fo ## CI/CD Workflow Diagram -Below is a Mermaid diagram illustrating how our CI workflows relate to each other, with a focus on parallel execution patterns and job dependencies. The diagram shows the main CI pipeline, integration test flow, unit test flow, underlying infrastructure, and the various triggers that initiate the pipeline. +Below is a simplified Mermaid diagram showing the current workflows, their key triggers, and major dependencies. ```mermaid graph TB - %% Define Triggers subgraph with parallel triggers - subgraph "Triggers" - direction TB - P[Pull Request] & Q[Push to main] & R[Weekly Schedule] & S[Manual Trigger] & T[Merge Queue] - end - - %% Main CI Pipeline with parallel flows after build - subgraph "Main CI Pipeline" - direction TB - A[ci-tests.yml] - B[sub-build-docker-image.yml] - A --> B - end - - %% Infrastructure dependencies - subgraph "Infrastructure" - direction TB - M[Docker Build Cloud] - N[GCP Resources] - O[GitHub Runners] - end - - %% Unit Test Flow with parallel test execution - subgraph "Unit Test Flow" - direction TB - C[sub-ci-unit-tests-docker.yml] - H[test-all] & I[test-fake-activation-heights] & J[test-empty-sync] & K[test-lightwalletd-integration] & L[test-docker-configurations] - C --> H - C --> I - C --> J - C --> K - C --> L - end - - %% Integration Test Flow with some parallel and some sequential steps - subgraph "Integration Test Flow" - direction TB - D[sub-ci-integration-tests-gcp.yml] - E[sub-find-cached-disks.yml] - F[sub-deploy-integration-tests-gcp.yml] - G[sub-test-zebra-config.yml] - D --> E - D --> F - E --> F - F --> G - end - - %% Connect triggers to main pipeline - P --> A - Q --> A - R --> A - S --> A - T --> A - - %% Connect infrastructure to respective components - M --> B - N --> D - O --> C - - %% Connect main pipeline to test flows - B --> C - B --> D - - %% Style definitions - classDef primary fill:#2374ab,stroke:#2374ab,color:white - classDef secondary fill:#48a9a6,stroke:#48a9a6,color:white - classDef infra fill:#4b4e6d,stroke:#4b4e6d,color:white - classDef trigger fill:#95a5a6,stroke:#95a5a6,color:white - - %% Apply styles - class A,B primary - class C,D,E,F,G secondary - class H,I,J,K,L secondary - class M,N,O infra - class P,Q,R,S,T trigger + %% Triggers + subgraph Triggers + PR[Pull Request] & Push[Push to main] & Schedule[Weekly] & Manual[Manual] + end + + %% Reusable build + subgraph Build + BuildDocker[zfnd-build-docker-image.yml] + end + + %% CI workflows + subgraph CI + Unit[tests-unit.yml] + Lint[lint.yml] + Coverage[coverage.yml] + DockerCfg[test-docker.yml] + CrateBuild[test-crates.yml] + Docs[book.yml] + Security[zizmor.yml] + end + + %% Integration tests on GCP + subgraph GCP Integration + IT[zfnd-ci-integration-tests-gcp.yml] + FindDisks[zfnd-find-cached-disks.yml] + Deploy[zfnd-deploy-integration-tests-gcp.yml] + DeployNodes[zfnd-deploy-nodes-gcp.yml] + Cleanup[zfnd-delete-gcp-resources.yml] + end + + %% Trigger wiring + PR --> Unit & Lint & DockerCfg & CrateBuild & IT & Security + Push --> Unit & Lint & Coverage & Docs & Security + Schedule --> IT + Manual --> IT & DeployNodes & Cleanup + + %% Build dependency + BuildDocker --> IT + IT --> FindDisks --> Deploy + + %% Styling + classDef primary fill:#2374ab,stroke:#2374ab,color:white + classDef secondary fill:#48a9a6,stroke:#48a9a6,color:white + classDef trigger fill:#95a5a6,stroke:#95a5a6,color:white + class BuildDocker primary + class Unit,Lint,Coverage,DockerCfg,CrateBuild,Docs,Security secondary + class IT,FindDisks,Deploy,DeployNodes,Cleanup secondary + class PR,Push,Schedule,Manual trigger ``` *The diagram above illustrates the parallel execution patterns in our CI/CD system. All triggers can initiate the pipeline concurrently, unit tests run in parallel after the Docker image build, and integration tests follow a mix of parallel and sequential steps. The infrastructure components support their respective workflow parts concurrently.* @@ -168,41 +141,25 @@ graph TB ### Main Workflows -- **CI Tests** (`ci-*.yml`): Core testing workflows - - Unit tests - - Integration tests - - Code coverage - - Linting -- **CD Deployments** (`cd-*.yml`): Deployment workflows - - Node deployment to GCP - - Documentation deployment -- **Release Management** (`release-*.yml`): Version and release workflows - -### Supporting Workflows - -- **Sub-workflows** (`sub-*.yml`): Reusable workflow components - - Docker image building - - Test configurations - - GCP resource management -- **Patch Workflows** (`*.patch.yml`, `*.patch-external.yml`): Handle GitHub Actions limitations for required checks - -### Patch Workflows Rationale - -Our use of patch workflows (`.patch.yml` and `.patch-external.yml`) is a workaround for a [known limitation in GitHub Actions](https://github.com/orgs/community/discussions/44490) regarding path filters and required checks. When a workflow is marked as required for PR merging: - -1. **Path Filtering Limitation**: GitHub Actions does not properly handle the case where a required workflow is skipped due to path filters. Instead of marking the check as "skipped" or "passed", it remains in a "pending" state, blocking PR merges. - -2. **Our Solution**: We maintain parallel "patch" workflows that: - - - Run without path filters - - Contain minimal steps that always pass when the original workflow would have been skipped - - Allow PRs to merge when changes don't affect relevant paths - -3. **Impact**: - - - Doubled number of workflow files to maintain - - Additional complexity in workflow management - - Extra status checks in PR UI +- **Unit Tests** (`tests-unit.yml`): OS matrix unit tests via nextest +- **Lint** (`lint.yml`): Clippy, fmt, deny, features, docs build checks +- **Coverage** (`coverage.yml`): llvm-cov with nextest, uploads to Codecov +- **Test Docker Config** (`test-docker.yml`): Validates zebrad configs against built test image +- **Test Crate Build** (`test-crates.yml`): Builds each crate under various feature sets +- **Docs (Book + internal)** (`book.yml`): Builds mdBook and internal rustdoc, publishes to Pages +- **Security Analysis** (`zizmor.yml`): GitHub Actions security lint (SARIF) +- **Release Binaries** (`release-binaries.yml`): Build and publish release artifacts +- **Release Drafter** (`release-drafter.yml`): Automates release notes +- **Integration Tests on GCP** (`zfnd-ci-integration-tests-gcp.yml`): Stateful tests, cached disks, lwd flows + +### Supporting/Re-usable Workflows + +- **Build docker image** (`zfnd-build-docker-image.yml`): Reusable image build with caching and tagging +- **Find cached disks** (`zfnd-find-cached-disks.yml`): Discovers GCP disks for stateful tests +- **Deploy integration tests** (`zfnd-deploy-integration-tests-gcp.yml`): Orchestrates GCP VMs and test runs +- **Deploy nodes** (`zfnd-deploy-nodes-gcp.yml`): Provision long-lived nodes +- **Delete GCP resources** (`zfnd-delete-gcp-resources.yml`): Cleanup utilities +- Helper scripts in `.github/workflows/scripts/` used by the above ## Test Execution Strategy diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml deleted file mode 100644 index da0153bc92b..00000000000 --- a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml +++ /dev/null @@ -1,29 +0,0 @@ -# Workflow patches for skipping Google Cloud CD deployments on PRs from external repositories. -name: Deploy Nodes to GCP - -# Run on PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them. -# GitHub doesn't support filtering workflows by source branch names, so we have to do it for each -# job. -on: - pull_request: - -#! IMPORTANT -#! -#! The job names in `cd-deploy-nodes-gcp.yml`, `cd-deploy-nodes-gcp.patch.yml` and -#! `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync. -jobs: - # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) - get-disk-name: - name: Get disk name / Get Mainnet cached disk - if: ${{ (github.event_name != 'release' && !(github.event.pull_request.head.repo.fork)) && (github.event_name != 'workflow_dispatch' || inputs.need_cached_disk) }} - runs-on: ubuntu-latest - steps: - - run: 'echo "Skipping job on fork"' - - build: - name: Build CD Docker / Build images - # Only run on PRs from external repositories, skipping ZF branches and tags. - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch.yml b/.github/workflows/cd-deploy-nodes-gcp.patch.yml deleted file mode 100644 index 8ca9804f4ed..00000000000 --- a/.github/workflows/cd-deploy-nodes-gcp.patch.yml +++ /dev/null @@ -1,43 +0,0 @@ -# Workflow patches for skipping Google Cloud CD deployments, when Rust code or dependencies aren't -# modified in a PR. -name: Deploy Nodes to GCP - -# Run on PRs with unmodified code and dependency files. -on: - pull_request: - paths-ignore: - # code and tests - - '**/*.rs' - # hard-coded checkpoints and proptest regressions - - '**/*.txt' - # dependencies - - '**/Cargo.toml' - - '**/Cargo.lock' - # configuration files - - '.cargo/config.toml' - - '**/clippy.toml' - # workflow definitions - - 'docker/**' - - '.dockerignore' - - '.github/workflows/cd-deploy-nodes-gcp.yml' - - '.github/workflows/sub-build-docker-image.yml' - -#! IMPORTANT -#! -#! The job names in `cd-deploy-nodes-gcp.yml`, `cd-deploy-nodes-gcp.patch.yml` and -#! `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync. -jobs: - # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) - get-disk-name: - name: Get disk name / Get Mainnet cached disk - runs-on: ubuntu-latest - if: ${{ (github.event_name != 'release' && !(github.event.pull_request.head.repo.fork)) && (github.event_name != 'workflow_dispatch' || inputs.need_cached_disk) }} - steps: - - run: 'echo "No build required"' - - build: - name: Build CD Docker / Build images - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' diff --git a/.github/workflows/ci-build-crates.patch.yml b/.github/workflows/ci-build-crates.patch.yml deleted file mode 100644 index 5f0518880fc..00000000000 --- a/.github/workflows/ci-build-crates.patch.yml +++ /dev/null @@ -1,71 +0,0 @@ -name: Build crates individually - -# We need to keep the `matrix` job in this workflow as-is, as we need the results -# to actually match the same `build` job names from the original file. -on: - pull_request: - paths-ignore: - # production code and test code - - "**/*.rs" - # dependencies - - "**/Cargo.toml" - - "**/Cargo.lock" - # configuration files - - ".cargo/config.toml" - - "**/clippy.toml" - # workflow definitions - - ".github/workflows/ci-build-crates.yml" - -jobs: - matrix: - name: Generate crates matrix - runs-on: ubuntu-latest - outputs: - matrix: ${{ steps.set-matrix.outputs.matrix }} - steps: - - uses: actions/checkout@v4.2.2 - - # Setup Rust with stable toolchain and minimal profile - - name: Setup Rust - run: | - rustup toolchain install stable --profile minimal - - # This step is meant to dynamically create a JSON containing the values of each crate - # available in this repo in the root directory. We use `cargo tree` to accomplish this task. - # - # The result from `cargo tree` is then transform to JSON values between double quotes, - # and separated by commas, then added to a `crates.txt` and assigned to a $JSON_CRATES variable. - # - # A JSON object is created and assigned to a $MATRIX variable, which is use to create an output - # named `matrix`, which is then used as the input in following steps, - # using ` ${{ fromJson(needs.matrix.outputs.matrix) }}` - - id: set-matrix - name: Dynamically build crates JSON - run: | - TEMP_DIR=$(mktemp -d) - echo "$(cargo tree --depth 0 --edges no-normal,no-dev,no-build,no-proc-macro --prefix none | cut -d ' ' -f1 | sed '/^$/d' | awk '{ printf "\"%s\",\n", $0 }' | sed '$ s/.$//')" > $TEMP_DIR/crates.txt - MATRIX=$( ( - echo '{ "crate" : [' - echo "$(cat $TEMP_DIR/crates.txt)" - echo " ]}" - ) | jq -c .) - echo $MATRIX - echo $MATRIX | jq . - echo "matrix=$MATRIX" >> "$GITHUB_OUTPUT" - - check-matrix: - name: Check crates matrix - runs-on: ubuntu-latest - needs: [matrix] - steps: - - run: 'echo "No job required"' - - build: - name: Build ${{ matrix.crate }} crate - needs: [matrix, check-matrix] - runs-on: ubuntu-latest - strategy: - matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} - - steps: - - run: 'echo "No job required"' diff --git a/.github/workflows/ci-coverage.patch.yml b/.github/workflows/ci-coverage.patch.yml deleted file mode 100644 index 9b2320d82ee..00000000000 --- a/.github/workflows/ci-coverage.patch.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Coverage - -on: - pull_request: - paths-ignore: - - '**/*.rs' - - '**/*.txt' - - '**/*.snap' - - '**/Cargo.toml' - - '**/Cargo.lock' - # configuration files - - '.cargo/config.toml' - - '**/clippy.toml' - - 'codecov.yml' - - '.github/workflows/ci-coverage.yml' - -jobs: - coverage: - name: Coverage on stable - runs-on: ubuntu-latest - - steps: - - run: 'echo "No build required"' diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml deleted file mode 100644 index 879398aed54..00000000000 --- a/.github/workflows/ci-coverage.yml +++ /dev/null @@ -1,106 +0,0 @@ -# This workflow calculates the test coverage for the Rust codebase. -# 1. The code is checked out. -# 2. Rust with the stable toolchain, minimal profile, and llvm-tools-preview component is set up. -# 3. Necessary tools like 'cargo-llvm-cov' are installed. -# 4. Proptest is minimized for efficient coverage test runs. -# 5. Tests are run without producing a report to gather coverage information. -# 6. A coverage report (lcov format) is generated based on the gathered information. -# 7. Finally, this report is uploaded to Codecov for visualization and analysis. -name: Coverage - -# Ensures that only one workflow task will run at a time. Previous builds, if -# already in process, will get cancelled. Only the latest commit will be allowed -# to run, cancelling any workflows in between -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -on: - workflow_dispatch: - - # we build Rust caches on main, - # so they can be shared by all branches: - # https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache - push: - branches: - - main - paths: - # code and tests - - "**/*.rs" - # hard-coded checkpoints and proptest regressions - - "**/*.txt" - # test data snapshots - - "**/*.snap" - # dependencies - - "**/Cargo.toml" - - "**/Cargo.lock" - # configuration files - - ".cargo/config.toml" - - "**/clippy.toml" - # workflow definitions - - "codecov.yml" - - ".github/workflows/ci-coverage.yml" - - pull_request: - paths: - - "**/*.rs" - - "**/*.txt" - - "**/*.snap" - - "**/Cargo.toml" - - "**/Cargo.lock" - - ".cargo/config.toml" - - "**/clippy.toml" - - "codecov.yml" - - ".github/workflows/ci-coverage.yml" - -env: - CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} - RUST_LOG: ${{ vars.RUST_LOG }} - RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }} - RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }} - COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }} - -jobs: - coverage: - name: Coverage on stable - # The large timeout is to accommodate: - # - stable builds (typically 50-90 minutes), and - timeout-minutes: 120 - runs-on: ${{ github.repository_owner == 'ZcashFoundation' && 'ubuntu-latest-xl' || 'ubuntu-latest' }} - - steps: - - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - # Setup Rust with stable toolchain and minimal profile - - name: Setup Rust - run: | - rustup toolchain install stable --profile minimal - - - name: Install cargo-llvm-cov cargo command - run: cargo install cargo-llvm-cov - - - name: Skip network tests on Ubuntu - # Ubuntu runners don't have reliable network or DNS during test steps. - shell: bash - run: echo "SKIP_NETWORK_TESTS=1" >> $GITHUB_ENV - - - name: Minimise proptest cases in Coverage tests - # We set cases to 1, because some tests already run 1 case by default. - # We set maximum shrink iterations to 0, because we don't expect failures in these tests. - # - # Coverage tests are much slower than other tests, particularly in hot loops. - shell: bash - run: | - echo "PROPTEST_CASES=1" >> $GITHUB_ENV - echo "PROPTEST_MAX_SHRINK_ITERS=0" >> $GITHUB_ENV - - - name: Run Zebra tests - run: cargo llvm-cov --lcov --no-report - - - name: Generate coverage report - run: cargo llvm-cov --lcov --no-run --output-path lcov.info - - - name: Upload coverage report to Codecov - uses: codecov/codecov-action@v5.4.3 diff --git a/.github/workflows/ci-lint.patch.yml b/.github/workflows/ci-lint.patch.yml deleted file mode 100644 index a6f443c25c2..00000000000 --- a/.github/workflows/ci-lint.patch.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: Lint - -on: - pull_request: - -jobs: - clippy: - name: Clippy - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - fmt: - name: Rustfmt - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml deleted file mode 100644 index 83059e95aed..00000000000 --- a/.github/workflows/ci-lint.yml +++ /dev/null @@ -1,172 +0,0 @@ -# This workflow conducts various linting checks for a Rust-based project. -# 1. Determines if Rust or workflow files have been modified. -# 2. Runs the Clippy linter on Rust files, producing annotations and failing on warnings. -# 3. Ensures Rust code formatting complies with 'rustfmt' standards. -# 4. Lints GitHub Actions workflow files for common issues. -# 5. Checks for common spelling errors in the codebase. -# The workflow is designed to maintain code quality and consistency, running checks conditionally based on the changed files. -name: Lint - -# Ensures that only one workflow task will run at a time. Previous builds, if -# already in process, will get cancelled. Only the latest commit will be allowed -# to run, cancelling any workflows in between -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -on: - # we build Rust caches on main, so they can be shared by all branches: - # https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache - push: - branches: - - main - pull_request: - -env: - CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} - RUST_LOG: ${{ vars.RUST_LOG }} - RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }} - RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }} - COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }} - -jobs: - changed-files: - runs-on: ubuntu-latest - name: Checks changed-files - outputs: - rust: ${{ steps.changed-files-rust.outputs.any_changed == 'true' }} - workflows: ${{ steps.changed-files-workflows.outputs.any_changed == 'true' }} - steps: - - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - fetch-depth: 0 - - - name: Rust files - id: changed-files-rust - uses: tj-actions/changed-files@v46.0.5 - with: - files: | - **/*.rs - **/Cargo.toml - **/Cargo.lock - clippy.toml - .cargo/config.toml - .github/workflows/ci-lint.yml - - - name: Workflow files - id: changed-files-workflows - uses: tj-actions/changed-files@v46.0.5 - with: - files: | - .github/workflows/*.yml - - clippy: - name: Clippy - timeout-minutes: 45 - runs-on: ubuntu-latest - needs: changed-files - if: ${{ needs.changed-files.outputs.rust == 'true' }} - - steps: - - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - - name: Install last version of Protoc - uses: arduino/setup-protoc@v3.0.0 - with: - # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: "23.x" - repo-token: ${{ secrets.GITHUB_TOKEN }} - - - name: Check workflow permissions - id: check_permissions - uses: scherermichael-oss/action-has-permission@1.0.6 - with: - required-permission: write - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - # Setup Rust with stable toolchain and default profile - - name: Setup Rust - run: | - rustup toolchain install stable --profile default - - - uses: Swatinem/rust-cache@v2.7.8 - with: - shared-key: "clippy-cargo-lock" - - # TODO: keep this action until we find a better solution - - name: Run clippy action to produce annotations - uses: actions-rs/clippy-check@v1.0.7 - if: ${{ steps.check_permissions.outputs.has-permission }} - with: - # GitHub displays the clippy job and its results as separate entries - name: Clippy (stable) Results - token: ${{ secrets.GITHUB_TOKEN }} - args: --workspace --all-features --all-targets -- -D warnings - - - name: Run clippy manually without annotations - if: ${{ !steps.check_permissions.outputs.has-permission }} - run: cargo clippy --workspace --all-features --all-targets -- -D warnings - - fmt: - name: Rustfmt - timeout-minutes: 30 - runs-on: ubuntu-latest - needs: changed-files - if: ${{ needs.changed-files.outputs.rust == 'true' }} - - steps: - - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.5.0 - - - name: Install last version of Protoc - uses: arduino/setup-protoc@v3.0.0 - with: - # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: "23.x" - repo-token: ${{ secrets.GITHUB_TOKEN }} - - # Setup Rust with stable toolchain and default profile - - name: Setup Rust - run: | - rustup toolchain install stable --profile default - - # We don't cache `fmt` outputs because the job is quick, - # and we want to use the limited GitHub actions cache space for slower jobs. - #- uses: Swatinem/rust-cache@v2.7.8 - - - run: | - cargo fmt --all -- --check - - actionlint: - runs-on: ubuntu-latest - continue-on-error: true - needs: changed-files - if: ${{ needs.changed-files.outputs.workflows == 'true' }} - steps: - - uses: actions/checkout@v4.2.2 - - name: actionlint - uses: reviewdog/action-actionlint@v1.48.0 - with: - level: warning - fail_on_error: false - # This is failing with a JSON schema error, see #8028 for details. - #- name: validate-dependabot - # # This gives an error when run on PRs from external repositories, so we skip it. - # # If this is a PR, check that the PR source is a local branch. Always runs on non-PRs. - # if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} - # uses: marocchino/validate-dependabot@v2.1.0 - - codespell: - runs-on: ubuntu-latest - needs: changed-files - steps: - - uses: actions/checkout@v4.2.2 - - uses: codespell-project/actions-codespell@v2.1 - with: - only_warn: 1 diff --git a/.github/workflows/ci-tests.patch-external.yml b/.github/workflows/ci-tests.patch-external.yml deleted file mode 100644 index b0f86ddcb12..00000000000 --- a/.github/workflows/ci-tests.patch-external.yml +++ /dev/null @@ -1,201 +0,0 @@ -# Workflow patches for skipping CI tests on PRs from external repositories -name: Run tests - -# Run on PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them. -# GitHub doesn't support filtering workflows by source branch names, so we have to do it for each -# job. -on: - pull_request: - -#! IMPORTANT -#! -#! The job names in `sub-ci-unit-tests-docker.yml`, `sub-ci-integration-tests-gcp.yml`, -#! `ci-tests.patch.yml` and `ci-tests.patch-external.yml` must be kept in sync. -jobs: - build: - name: Build CI Docker / Build images - # Only run on PRs from external repositories. - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - runs-on: ubuntu-latest - steps: - - run: 'echo "Skipping job on fork"' - - #### - ## The following jobs are related to sub-ci-unit-tests-docker.yml - ### - test-all: - name: Unit tests / Test all - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - sync-large-checkpoints-empty: - name: Unit tests / Test checkpoint sync from empty state - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - test-lightwalletd-integration: - name: Unit tests / Lightwalletd integration - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - #### - ## The following jobs are related to sub-test-zebra-config.yml - ### - test-docker-configurations-default: - name: Unit tests / Test Zebra Docker configurations / Test Default config - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-custom: - name: Unit tests / Test Zebra Docker configurations / Test Custom cache and cookie directories - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-custom-config: - name: Unit tests / Test Zebra Docker configurations / Test Custom config - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-mining: - name: Unit tests / Test Zebra Docker configurations / Test Mining configuration - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-prometheus: - name: Unit tests / Test Zebra Docker configurations / Test Prometheus metrics - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-rpc: - name: Unit tests / Test Zebra Docker configurations / Test RPC config - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-rpc-custom-cookie: - name: Unit tests / Test Zebra Docker configurations / Test RPC with custom cookie dir - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-rpc-custom-port: - name: Unit tests / Test Zebra Docker configurations / Test RPC with custom port - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-testnet: - name: Unit tests / Test Zebra Docker configurations / Test Testnet config - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - #### - #### - ## The following jobs are related to sub-ci-integration-tests-gcp.yml - ### - - # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) - get-available-disks: - name: Integration tests / Check if cached state disks exist for Mainnet / Get Mainnet cached disk - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - sync-past-mandatory-checkpoint: - name: Integration tests / Zebra checkpoint update / Run sync-past-checkpoint test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - sync-update-mainnet: - name: Integration tests / Zebra tip update / Run update-to-tip test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - generate-checkpoints-mainnet: - name: Integration tests / Generate checkpoints mainnet / Run checkpoints-mainnet test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - generate-checkpoints-testnet: - name: Integration tests / Generate checkpoints testnet / Run checkpoints-testnet test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - rpc-fully-synced-test: - name: Integration tests / Zebra tip JSON-RPC / Run fully-synced-rpc test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - lwd-rpc-send-tx: - name: Integration tests / lightwalletd tip send / Run lwd-send-transactions test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - rpc-get-template: - name: Integration tests / get block template / Run get-block-template test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - rpc-submit-block: - name: Integration tests / submit block / Run submit-block test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - lwd-sync-full: - name: Integration tests / lightwalletd tip / Run lwd-full-sync test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - lwd-sync-update: - name: Integration tests / lightwalletd tip update / Run lwd-update-sync test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - lwd-grpc-wallet: - name: Integration tests / lightwalletd GRPC tests / Run lwd-grpc-wallet test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/ci-tests.patch.yml b/.github/workflows/ci-tests.patch.yml deleted file mode 100644 index adf09fc5f66..00000000000 --- a/.github/workflows/ci-tests.patch.yml +++ /dev/null @@ -1,219 +0,0 @@ -# Workflow patches for skipping CI tests when Rust code or dependencies -# aren't modified in a PR. -name: Run tests - -# Run on PRs with unmodified code and dependency files. -on: - pull_request: - paths-ignore: - # code and tests - - "**/*.rs" - # hard-coded checkpoints and proptest regressions - - "**/*.txt" - # test data snapshots - - "**/*.snap" - # dependencies - - "**/Cargo.toml" - - "**/Cargo.lock" - # configuration files - - ".cargo/config.toml" - - "**/clippy.toml" - # workflow definitions - - "docker/**" - - ".dockerignore" - - ".github/workflows/ci-tests.yml" - - ".github/workflows/sub-ci-unit-tests-docker.yml" - - ".github/workflows/sub-ci-integration-tests-gcp.yml" - - ".github/workflows/sub-deploy-integration-tests-gcp.yml" - - ".github/workflows/sub-find-cached-disks.yml" - - ".github/workflows/sub-build-docker-image.yml" - -#! IMPORTANT -#! -#! The job names in `sub-ci-unit-tests-docker.yml`, `sub-ci-integration-tests-gcp.yml`, -#! `ci-tests.patch.yml` and `ci-tests.patch-external.yml` must be kept in sync. -jobs: - build: - name: Build CI Docker / Build images - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - #### - ## The following jobs are related to sub-ci-unit-tests-docker.yml - ### - test-all: - name: Unit tests / Test all - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - sync-large-checkpoints-empty: - name: Unit tests / Test checkpoint sync from empty state - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-lightwalletd-integration: - name: Unit tests / Lightwalletd integration - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - #### - ## The following jobs are related to sub-test-zebra-config.yml - ### - test-docker-configurations-default: - name: Unit tests / Test Zebra Docker configurations / Test Default config - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-custom: - name: Unit tests / Test Zebra Docker configurations / Test Custom cache and cookie directories - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-custom-config: - name: Unit tests / Test Zebra Docker configurations / Test Custom config - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-mining: - name: Unit tests / Test Zebra Docker configurations / Test Mining configuration - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-prometheus: - name: Unit tests / Test Zebra Docker configurations / Test Prometheus metrics - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-rpc: - name: Unit tests / Test Zebra Docker configurations / Test RPC config - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-rpc-custom-cookie: - name: Unit tests / Test Zebra Docker configurations / Test RPC with custom cookie dir - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-rpc-custom-port: - name: Unit tests / Test Zebra Docker configurations / Test RPC with custom port - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - test-docker-configurations-testnet: - name: Unit tests / Test Zebra Docker configurations / Test Testnet config - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - #### - ## The following jobs are related to sub-ci-integration-tests-gcp.yml - ### - - # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) - get-available-disks: - name: Integration tests / Check if cached state disks exist for Mainnet / Get Mainnet cached disk - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - sync-past-mandatory-checkpoint: - name: Integration tests / Zebra checkpoint update / Run sync-past-checkpoint test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - sync-update-mainnet: - name: Integration tests / Zebra tip update / Run update-to-tip test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - generate-checkpoints-mainnet: - name: Integration tests / Generate checkpoints mainnet / Run checkpoints-mainnet test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - generate-checkpoints-testnet: - name: Integration tests / Generate checkpoints testnet / Run checkpoints-testnet test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - rpc-fully-synced-test: - name: Integration tests / Zebra tip JSON-RPC / Run fully-synced-rpc test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - lwd-rpc-send-tx: - name: Integration tests / lightwalletd tip send / Run lwd-send-transactions test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - rpc-get-template: - name: Integration tests / get block template / Run get-block-template test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - rpc-submit-block: - name: Integration tests / submit block / Run submit-block test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - lwd-sync-full: - name: Integration tests / lightwalletd tip / Run lwd-full-sync test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - lwd-sync-update: - name: Integration tests / lightwalletd tip update / Run lwd-update-sync test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - lwd-grpc-wallet: - name: Integration tests / lightwalletd GRPC tests / Run lwd-grpc-wallet test - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml deleted file mode 100644 index 846c5323cba..00000000000 --- a/.github/workflows/ci-tests.yml +++ /dev/null @@ -1,170 +0,0 @@ -# This workflow builds a Zebra Docker image and runs integration and unit tests -# on the Zebra codebase. It is designed to add the different test workflows -name: Run tests - -# Ensures that only one workflow task will run at a time. Previous builds, if -# already in process, will get cancelled. Only the latest commit will be allowed -# to run, cancelling any workflows in between - -concurrency: - group: ${{ github.workflow }}-${{ github.event_name == 'schedule' && 'schedule' || github.event_name == 'workflow_dispatch' && 'manual' || github.head_ref || github.ref_name }} - cancel-in-progress: ${{ github.event_name == 'pull_request' }} - -on: - merge_group: - types: [checks_requested] - - schedule: - # Run this job every Friday at mid-day UTC - # This is limited to the Zebra and lightwalletd Full Sync jobs - # TODO: we should move this behavior to a separate workflow - - cron: "0 12 * * 5" - - workflow_dispatch: - inputs: - network: - default: "Mainnet" - description: "Network to deploy: Mainnet or Testnet" - required: true - regenerate-disks: - type: boolean - default: false - description: "Just run a Zebra checkpoint sync and update checkpoint disks" - required: true - run-full-sync: - type: boolean - default: false - description: "Just run a Zebra full sync on `network`, and update tip disks" - required: true - run-lwd-sync: - type: boolean - default: false - description: "Just run a lightwalletd full sync and update tip disks" - required: true - force_save_to_disk: - required: false - type: boolean - default: false - description: "Force tests to always create a cached state disk, if they already create disks" - no_cache: - description: "Disable the Docker cache for this build" - required: false - type: boolean - default: false - - pull_request: - # Run only on PRs that modify Rust code or dependencies. - paths: - # code and tests - - "**/*.rs" - # hard-coded checkpoints and proptest regressions - - "**/*.txt" - # test data snapshots - - "**/*.snap" - # dependencies - - "**/Cargo.toml" - - "**/Cargo.lock" - # configuration files - - ".cargo/config.toml" - - "**/clippy.toml" - # supply chain security - - "**/deny.toml" - # workflow definitions - - "docker/**" - - ".dockerignore" - - ".github/workflows/ci-tests.yml" - - ".github/workflows/sub-ci-unit-tests-docker.yml" - - ".github/workflows/sub-ci-integration-tests-gcp.yml" - - ".github/workflows/sub-deploy-integration-tests-gcp.yml" - - ".github/workflows/sub-find-cached-disks.yml" - - ".github/workflows/sub-build-docker-image.yml" - - push: - # Run only on main branch updates that modify Rust code or dependencies. - branches: - - main - paths: - # code and tests - - "**/*.rs" - # hard-coded checkpoints and proptest regressions - - "**/*.txt" - # test data snapshots - - "**/*.snap" - # dependencies - - "**/Cargo.toml" - - "**/Cargo.lock" - # configuration files - - ".cargo/config.toml" - - "**/clippy.toml" - # supply chain security - - "**/deny.toml" - # workflow definitions - - "docker/**" - - ".dockerignore" - - ".github/workflows/ci-tests.yml" - - ".github/workflows/sub-ci-unit-tests-docker.yml" - - ".github/workflows/sub-ci-integration-tests-gcp.yml" - - ".github/workflows/sub-deploy-integration-tests-gcp.yml" - - ".github/workflows/sub-find-cached-disks.yml" - - ".github/workflows/sub-build-docker-image.yml" - -env: - RUST_LOG: ${{ vars.RUST_LOG }} - RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }} - RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }} - COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }} - CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} - -#! IMPORTANT -#! -#! The job names in `sub-ci-unit-tests-docker.yml`, `sub-ci-integration-tests-gcp.yml`, -#! `ci-tests.patch.yml` and `ci-tests.patch-external.yml` must be kept in sync. -jobs: - # Build the docker image used by the tests. - # - # The default network in the Zebra config in the image is mainnet, unless a manually triggered - # workflow or repository variable is configured differently. Testnet jobs change that config to - # testnet when running the image. - build: - name: Build CI Docker - # Skip PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them - if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} - uses: ./.github/workflows/sub-build-docker-image.yml - with: - dockerfile_path: ./docker/Dockerfile - dockerfile_target: tests - image_name: ${{ vars.CI_IMAGE_NAME }} - no_cache: ${{ inputs.no_cache || false }} - rust_backtrace: full - rust_lib_backtrace: full - rust_log: info - features: ${{ format('{0} {1}', vars.RUST_PROD_FEATURES, vars.RUST_TEST_FEATURES) }} - # This step needs access to Docker Hub secrets to run successfully - secrets: inherit - - # Runs Zebra unit tests - unit-tests: - name: Unit tests - # Skip Unit tests when the event is a scheduled run, as this is just needed for integration tests - if: ${{ github.event_name != 'schedule' }} - needs: build - uses: ./.github/workflows/sub-ci-unit-tests-docker.yml - with: - image_digest: ${{ needs.build.outputs.image_digest }} - network: ${{ inputs.network || vars.ZCASH_NETWORK || 'Mainnet' }} - no_cache: ${{ inputs.no_cache || false }} - secrets: inherit - - # Runs Zebra integration tests - integration-tests: - name: Integration tests - needs: build - uses: ./.github/workflows/sub-ci-integration-tests-gcp.yml - with: - network: ${{ inputs.network || vars.ZCASH_NETWORK || 'Mainnet' }} - regenerate-disks: ${{ inputs.regenerate-disks || false }} - run-full-sync: ${{ inputs.run-full-sync || false }} - run-lwd-sync: ${{ inputs.run-lwd-sync || false }} - force_save_to_disk: ${{ inputs.force_save_to_disk || false }} - no_cache: ${{ inputs.no_cache || false }} - secrets: inherit diff --git a/.github/workflows/ci-unit-tests-os.patch.yml b/.github/workflows/ci-unit-tests-os.patch.yml deleted file mode 100644 index 9de541d518d..00000000000 --- a/.github/workflows/ci-unit-tests-os.patch.yml +++ /dev/null @@ -1,66 +0,0 @@ -name: Multi-OS Unit Tests - -on: - pull_request: - paths-ignore: - - '**/*.rs' - - '**/*.txt' - - '**/*.snap' - - '**/Cargo.toml' - - '**/Cargo.lock' - - '**/deny.toml' - - '.cargo/config.toml' - - '**/clippy.toml' - - '.github/workflows/ci-unit-tests-os.yml' - -jobs: - test: - name: Test ${{ matrix.rust }} on ${{ matrix.os }}${{ matrix.features }} - # We're just doing this job for the name, the platform doesn't matter. - # So we use the platform with the most concurrent instances. - runs-on: ubuntu-latest - strategy: - matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - rust: [stable, beta] - features: [""] - exclude: - - os: macos-latest - rust: beta - - steps: - - run: 'echo "No build required"' - - install-from-lockfile-no-cache: - name: Install zebrad from lockfile without cache on ubuntu-latest - runs-on: ubuntu-latest - - steps: - - run: 'echo "No build required"' - - check-cargo-lock: - name: Check Cargo.lock is up to date - runs-on: ubuntu-latest - - steps: - - run: 'echo "No build required"' - - cargo-deny: - name: Check deny.toml ${{ matrix.checks }} ${{ matrix.features }} - runs-on: ubuntu-latest - strategy: - matrix: - checks: - - bans - - sources - features: ['', '--features default-release-binaries', '--all-features'] - - steps: - - run: 'echo "No build required"' - - unused-deps: - name: Check for unused dependencies - runs-on: ubuntu-latest - - steps: - - run: 'echo "No build required"' diff --git a/.github/workflows/ci-unit-tests-os.yml b/.github/workflows/ci-unit-tests-os.yml deleted file mode 100644 index 4c3af021842..00000000000 --- a/.github/workflows/ci-unit-tests-os.yml +++ /dev/null @@ -1,336 +0,0 @@ -# This workflow performs unit tests across different operating systems and Rust versions. It includes steps for: -# - Testing on Ubuntu and macOS with stable and beta Rust toolchains. -# - Installing Zebra from the lockfile without cache on Ubuntu. -# - Verifying that Cargo.lock is up-to-date with Cargo.toml changes. -# - Running cargo-deny checks for dependencies. -# - Checking for unused dependencies in the code. -name: Multi-OS Unit Tests - -# Ensures that only one workflow task will run at a time. Previous builds, if -# already in process, will get cancelled. Only the latest commit will be allowed -# to run, cancelling any workflows in between -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -on: - merge_group: - types: [checks_requested] - - workflow_dispatch: - - pull_request: - paths: - # code and tests - - "**/*.rs" - # hard-coded checkpoints and proptest regressions - - "**/*.txt" - # test data snapshots - - "**/*.snap" - # dependencies - - "**/Cargo.toml" - - "**/Cargo.lock" - - "**/deny.toml" - # configuration files - - ".cargo/config.toml" - - "**/clippy.toml" - # workflow definitions - - ".github/workflows/ci-unit-tests-os.yml" - - # we build Rust caches on main, - # so they can be shared by all branches: - # https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache - push: - branches: - - main - paths: - # production code and test code - - "**/*.rs" - # hard-coded checkpoints - # TODO: skip proptest regressions? - - "**/*.txt" - # test data snapshots - - "**/*.snap" - # dependencies - - "**/Cargo.toml" - - "**/Cargo.lock" - - "**/deny.toml" - # configuration files - - ".cargo/config.toml" - - "**/clippy.toml" - # workflow definitions - - ".github/workflows/ci-unit-tests-os.yml" - -env: - CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} - RUST_LOG: ${{ vars.RUST_LOG }} - RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }} - RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }} - COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }} - -jobs: - ######################################## - ### Build and test Zebra on all OSes ### - ######################################## - test: - env: - RUSTUP_TOOLCHAIN: ${{ matrix.rust }} - - name: Test ${{ matrix.rust }} on ${{ matrix.os }} - # The large timeout is to accommodate: - # - macOS and Windows builds (typically 50-90 minutes), and - timeout-minutes: 120 - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - rust: [stable, beta] - # We only test with default features in this workflow - # Other feature combinations are tested in specific workflows - features: ["default-release-binaries"] - exclude: - # We're excluding macOS beta for the following reasons: - # - the concurrent macOS runner limit is much lower than the Linux limit - # - macOS is slower than Linux, and shouldn't have a build or test difference with Linux - # - macOS is a second-tier Zebra support platform - - os: macos-latest - rust: beta - - steps: - - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.5.0 - - - name: Install last version of Protoc - uses: arduino/setup-protoc@v3.0.0 - with: - # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: "23.x" - repo-token: ${{ secrets.GITHUB_TOKEN }} - - # Setup Rust with ${{ matrix.rust }} toolchain and minimal profile - - name: Setup Rust - run: | - rustup toolchain install ${{ matrix.rust }} --profile minimal - - - uses: Swatinem/rust-cache@v2.7.8 - # TODO: change Rust cache target directory on Windows, - # or remove this workaround once the build is more efficient (#3005). - #with: - # workspaces: ". -> C:\\zebra-target" - with: - # Split the cache by feature set to avoid linker errors - key: ${{ matrix.features }} - - - name: Change target output directory on Windows - # Windows doesn't have enough space on the D: drive, so we redirect the build output to the - # larger C: drive. - # TODO: Remove this workaround once the build is more efficient (#3005). - if: matrix.os == 'windows-latest' - run: | - mkdir "C:\\zebra-target" - echo "CARGO_TARGET_DIR=C:\\zebra-target" | Out-File -FilePath "$env:GITHUB_ENV" -Encoding utf8 -Append - - - name: cargo fetch - run: | - cargo fetch - - - name: Install LLVM on Windows - if: matrix.os == 'windows-latest' - run: | - choco install llvm -y - echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "LIBCLANG_PATH=C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - - - name: Skip network tests on all OSes - # TODO: remove this step once we have reliable network on all OSes. - # Ubuntu runners don't have reliable network or DNS during test steps. - # Windows runners have an unreliable network. - # macOS is affected locally and in the CI, so we skip the tests on all OSes. - shell: bash - run: echo "SKIP_NETWORK_TESTS=1" >> $GITHUB_ENV - - - name: Minimise proptest cases on macOS and Windows - # We set cases to 1, because some tests already run 1 case by default. - # We keep maximum shrink iterations at the default value, because it only happens on failure. - # - # Windows compilation and tests are slower than other platforms. - # macOS runners do extra network tests, so they take longer. - shell: bash - if: matrix.os != 'ubuntu-latest' - run: | - echo "PROPTEST_CASES=1" >> $GITHUB_ENV - echo "PROPTEST_MAX_SHRINK_ITERS=1024" >> $GITHUB_ENV - - # Run unit and basic acceptance tests, only showing command output if the test fails. - # - # If some tests hang, add "-- --nocapture" for just that test, or for all the tests. - - name: Run tests ${{ matrix.features }} - run: | - cargo test --features "${{ matrix.features }}" --release --verbose --workspace - - # Explicitly run any tests that are usually #[ignored] - - - name: Run zebrad large sync tests${{ matrix.features }} - # Skip the entire step on Ubuntu and Windows, because the test would be skipped anyway due to SKIP_NETWORK_TESTS - if: matrix.os == 'macos-latest' - run: | - cargo test --features "${{ matrix.features }}" --release --verbose --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ - - # Install Zebra with lockfile dependencies, with no caching and default features - install-from-lockfile-no-cache: - name: Install zebrad from lockfile without cache on ubuntu-latest - timeout-minutes: 60 - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.5.0 - - - name: Install last version of Protoc - uses: arduino/setup-protoc@v3.0.0 - with: - # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: "23.x" - repo-token: ${{ secrets.GITHUB_TOKEN }} - - # Setup Rust with stable toolchain and minimal profile - - name: Setup Rust - run: | - rustup toolchain install stable --profile minimal - - - name: Install zebrad - run: | - cargo install --locked --path ./zebrad/ zebrad - - # Check that Cargo.lock includes any Cargo.toml changes. - # This check makes sure the `cargo-deny` crate dependency checks are accurate. - check-cargo-lock: - name: Check Cargo.lock is up to date - timeout-minutes: 60 - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.5.0 - - - name: Install last version of Protoc - uses: arduino/setup-protoc@v3.0.0 - with: - version: "23.x" - repo-token: ${{ secrets.GITHUB_TOKEN }} - - # Setup Rust with stable toolchain and minimal profile - - name: Setup Rust - run: | - rustup toolchain install stable --profile minimal - - - uses: Swatinem/rust-cache@v2.7.8 - with: - shared-key: "clippy-cargo-lock" - - - name: Check Cargo.lock is up to date - run: | - cargo check --locked --all-features --all-targets - - cargo-deny: - name: Check deny.toml ${{ matrix.checks }} ${{ matrix.features }} - runs-on: ubuntu-latest - strategy: - matrix: - checks: - - bans - - sources - # We don't need to check `--no-default-features` here, because (except in very rare cases): - # - disabling features isn't going to add duplicate dependencies - # - disabling features isn't going to add more crate sources - features: ["", "--features default-release-binaries", "--all-features"] - # Always run the --all-features job, to get accurate "skip tree root was not found" warnings - fail-fast: false - - # Prevent sudden announcement of a new advisory from failing ci: - continue-on-error: ${{ matrix.checks == 'advisories' }} - - steps: - - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.5.0 - - - name: Check ${{ matrix.checks }} with features ${{ matrix.features }} - uses: EmbarkStudios/cargo-deny-action@v2 - with: - # --all-features spuriously activates openssl, but we want to ban that dependency in - # all of zebrad's production features for security reasons. But the --all-features job is - # the only job that gives accurate "skip tree root was not found" warnings. - # In other jobs, we expect some of these warnings, due to disabled features. - command: check ${{ matrix.checks }} ${{ matrix.features == '--all-features' && '--allow banned' || '--allow unmatched-skip-root' }} - arguments: --workspace ${{ matrix.features }} - - unused-deps: - name: Check for unused dependencies - runs-on: ubuntu-latest - - steps: - - name: Checkout git repository - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.5.0 - - # Setup Rust with stable toolchain and minimal profile - - name: Setup Rust - run: | - rustup toolchain install stable --profile minimal - - - name: Install cargo-machete - uses: baptiste0928/cargo-install@v3.3.0 - with: - crate: cargo-machete - - - name: Check unused dependencies - # Exclude macro and transitive dependencies by filtering them out of the output, - # then if there are any more unused dependencies, fail the job. - run: | - echo "-- full cargo machete output, including ignored dependencies --" - cargo machete --skip-target-dir || true - echo "-- unused dependencies are below this line, full output is above --" - if cargo machete --skip-target-dir 2>/dev/null | \ - grep --extended-regexp -e '^\\t' | \ - grep -v -e gumdrop -e humantime-serde -e tinyvec -e zebra-utils; then - echo "New unused dependencies were found, please remove them!" - exit 1 - else - echo "No unused dependencies found." - fi - - failure-issue: - name: Open or update issues for OS integration failures - # When a new job is added to this workflow, add it to this list. - needs: - [ - test, - install-from-lockfile-no-cache, - check-cargo-lock, - cargo-deny, - unused-deps, - ] - # Only open tickets for failed or cancelled jobs that are not coming from PRs. - # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) - if: (failure() || cancelled()) && github.repository_owner == 'ZcashFoundation' && github.event.pull_request == null - runs-on: ubuntu-latest - steps: - - uses: jayqi/failed-build-issue-action@v1 - with: - title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}" - # New failures open an issue with this label. - label-name: S-ci-fail-os-integration-auto-issue - # If there is already an open issue with this label, any failures become comments on that issue. - always-create-new-issue: false - github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml new file mode 100644 index 00000000000..243ed9b8653 --- /dev/null +++ b/.github/workflows/coverage.yml @@ -0,0 +1,84 @@ +name: Coverage + +on: + push: + branches: [main] + paths: + - "**/*.rs" + - "**/Cargo.toml" + - "**/Cargo.lock" + - .config/nextest.toml + - codecov.yml + - .github/workflows/coverage.yml + +# Ensures that only one workflow task will run at a time. Previous builds, if +# already in process, will get cancelled. Only the latest commit will be allowed +# to run, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read + pull-requests: read + +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 + RUST_LIB_BACKTRACE: 1 + RUST_LOG: info + COLORBT_SHOW_HIDDEN: 1 + +jobs: + coverage: + permissions: + id-token: write + statuses: write + runs-on: ubuntu-latest + timeout-minutes: 120 + + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + - name: Install last version of Protoc + uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b #v3.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + toolchain: stable + components: llvm-tools-preview + cache-on-failure: true + - uses: taiki-e/install-action@3ee5d63d29478156148c0b53e9f3447829b47bc2 #v2.58.23 + with: + tool: cargo-llvm-cov,nextest + - name: Run coverage tests + run: | + cargo llvm-cov --no-report nextest + # TODO: Do we need --locked --release --features "default-release-binaries" here? + cargo llvm-cov report --lcov --output-path lcov.info + env: + TEST_LARGE_CHECKPOINTS: 1 + # We set cases to 1, because some tests already run 1 case by default. + # We set maximum shrink iterations to 0, because we don't expect failures in coverage tests. + # Coverage tests are much slower than other tests, particularly in hot loops. + PROPTEST_CASES: 1 + PROPTEST_MAX_SHRINK_ITERS: 0 + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@fdcc8476540edceab3de004e990f80d881c6cc00 #v5.5.0 + with: + files: lcov.info + + coverage-success: + name: coverage success + runs-on: ubuntu-latest + if: always() + needs: + - coverage + steps: + - name: Decide whether the needed jobs succeeded or failed + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe #v1.2.2 + with: + jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/docs-deploy-firebase.patch-external.yml b/.github/workflows/docs-deploy-firebase.patch-external.yml deleted file mode 100644 index 9a725ba21b6..00000000000 --- a/.github/workflows/docs-deploy-firebase.patch-external.yml +++ /dev/null @@ -1,30 +0,0 @@ -# Workflow patches for skipping Google Cloud docs updates on PRs from external repositories. -name: Docs - -# Run on PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them. -# GitHub doesn't support filtering workflows by source branch names, so we have to do it for each -# job. -on: - pull_request: - -#! IMPORTANT -#! -#! The job names in `docs-deploy-firebase.yml`, `docs-deploy-firebase.patch.yml` and -#! `docs-deploy-firebase.patch-external.yml` must be kept in sync. -jobs: - build-docs-book: - name: Build and Deploy Zebra Book Docs - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' - - build-docs-internal: - name: Build and Deploy Zebra Internal Docs - # This dependency allows all these jobs to depend on a single condition, making it easier to - # change. - needs: build-docs-book - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/docs-deploy-firebase.patch.yml b/.github/workflows/docs-deploy-firebase.patch.yml deleted file mode 100644 index 30c0793d852..00000000000 --- a/.github/workflows/docs-deploy-firebase.patch.yml +++ /dev/null @@ -1,41 +0,0 @@ -# Workflow patches for skipping Google Cloud docs updates when docs, Rust code, or dependencies -# aren't modified in a PR. -name: Docs - -# Run on PRs with unmodified docs, code, and dependency files. -on: - pull_request: - paths-ignore: - # doc source files - - 'book/**' - - '**/firebase.json' - - '**/.firebaserc' - - 'katex-header.html' - # rustdoc source files - - '**/*.rs' - - '**/Cargo.toml' - - '**/Cargo.lock' - # configuration files - - '.cargo/config.toml' - - '**/clippy.toml' - # workflow definitions - - '.github/workflows/docs-deploy-firebase.yml' - -#! IMPORTANT -#! -#! The job names in `docs-deploy-firebase.yml`, `docs-deploy-firebase.patch.yml` and -#! `docs-deploy-firebase.patch-external.yml` must be kept in sync. -jobs: - build-docs-book: - name: Build and Deploy Zebra Book Docs - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' - - build-docs-internal: - name: Build and Deploy Zebra Internal Docs - runs-on: ubuntu-latest - if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} - steps: - - run: 'echo "No build required"' diff --git a/.github/workflows/docs-deploy-firebase.yml b/.github/workflows/docs-deploy-firebase.yml deleted file mode 100644 index c61241226dd..00000000000 --- a/.github/workflows/docs-deploy-firebase.yml +++ /dev/null @@ -1,192 +0,0 @@ -# Google Cloud docs updates that run when docs, Rust code, or dependencies are modified, -# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are deployed by GitHub's Merge Queue.) - -# - Builds and deploys Zebra Book Docs using mdBook, setting up necessary tools and deploying to Firebase. -# - Compiles and deploys external documentation, setting up Rust with the beta toolchain and default profile, building the docs, and deploying them to Firebase. -# - Assembles and deploys internal documentation with similar steps, including private items in the documentation, and deploys to Firebase. -name: Docs - -# Ensures that only one workflow task will run at a time. Previous deployments, if -# already in process, won't get cancelled. Instead, we let the first to complete -# then queue the latest pending workflow, cancelling any workflows in between -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -on: - workflow_dispatch: - - push: - # Skip main branch updates where docs, Rust code, and dependencies aren't modified. - branches: - - main - paths: - # doc source files - - "book/**" - - "**/firebase.json" - - "**/.firebaserc" - - "katex-header.html" - # rustdoc source files - - "**/*.rs" - - "**/Cargo.toml" - - "**/Cargo.lock" - # configuration files - - ".cargo/config.toml" - - "**/clippy.toml" - # workflow definitions - - ".github/workflows/docs-deploy-firebase.yml" - - pull_request: - # Skip PRs where docs, Rust code, and dependencies aren't modified. - paths: - # doc source files - - "book/**" - - "**/firebase.json" - - "**/.firebaserc" - - "katex-header.html" - # rustdoc source files - - "**/*.rs" - - "**/Cargo.toml" - - "**/Cargo.lock" - # configuration files - - ".cargo/config.toml" - - "**/clippy.toml" - # workflow definitions - - ".github/workflows/docs-deploy-firebase.yml" - -env: - RUST_LOG: ${{ vars.RUST_LOG }} - RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }} - RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }} - COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }} - FIREBASE_CHANNEL: ${{ github.event_name == 'pull_request' && 'preview' || 'live' }} - # cargo doc doesn't support '-- -D warnings', so we have to add it here - # https://github.com/rust-lang/cargo/issues/8424#issuecomment-774662296 - # - # The -A and -W settings must be the same as the `rustdocflags` in: - # https://github.com/ZcashFoundation/zebra/blob/main/.cargo/config.toml#L87 - RUSTDOCFLAGS: --html-in-header katex-header.html -D warnings -A rustdoc::private_intra_doc_links - -# IMPORTANT -# -# The job names in `docs-deploy-firebase.yml`, `docs-deploy-firebase.patch.yml` and -# `docs-deploy-firebase.patch-external.yml` must be kept in sync. -jobs: - build-docs-book: - name: Build and Deploy Zebra Book Docs - # Skip PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them - if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} - timeout-minutes: 5 - runs-on: ubuntu-latest - permissions: - checks: write - contents: "read" - id-token: "write" - pull-requests: write - steps: - - name: Checkout the source code - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - - uses: r7kamura/rust-problem-matchers@v1.5.0 - - - name: Setup mdBook - uses: jontze/action-mdbook@v4.0.0 - with: - token: ${{ secrets.GITHUB_TOKEN }} - mdbook-version: "~0.4" - use-linkcheck: true - use-mermaid: true - - - name: Build Zebra book - run: | - mdbook build book --dest-dir "$(pwd)"/target/book - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - if: github.repository_owner == 'ZcashFoundation' - id: auth - uses: google-github-actions/auth@v2.1.10 - with: - workload_identity_provider: "${{ vars.GCP_WIF }}" - service_account: "${{ vars.GCP_FIREBASE_SA }}" - - # TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed - - - name: Add $GCP_FIREBASE_SA_PATH to env - if: github.repository_owner == 'ZcashFoundation' - run: | - # shellcheck disable=SC2002 - echo "GCP_FIREBASE_SA_PATH=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV" - - - name: Deploy Zebra book to firebase - if: github.repository_owner == 'ZcashFoundation' - uses: FirebaseExtended/action-hosting-deploy@v0.9.0 - with: - firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA_PATH }} - channelId: ${{ env.FIREBASE_CHANNEL }} - projectId: ${{ vars.GCP_FIREBASE_PROJECT }} - target: docs-book - - build-docs-internal: - env: - RUSTUP_TOOLCHAIN: beta - name: Build and Deploy Zebra Internal Docs - if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} - timeout-minutes: 45 - runs-on: ubuntu-latest - permissions: - checks: write - contents: "read" - id-token: "write" - pull-requests: write - steps: - - name: Checkout the source code - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - - uses: r7kamura/rust-problem-matchers@v1.5.0 - - - name: Install last version of Protoc - uses: arduino/setup-protoc@v3.0.0 - with: - version: "23.x" - repo-token: ${{ secrets.GITHUB_TOKEN }} - - # Setup Rust with beta toolchain and default profile (to include rust-docs) - - name: Setup Rust - run: | - rustup toolchain install beta --profile default - - - uses: Swatinem/rust-cache@v2.7.8 - - - name: Build internal docs - run: | - cargo doc --no-deps --workspace --all-features --document-private-items --target-dir "$(pwd)"/target/internal - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - if: github.repository_owner == 'ZcashFoundation' - id: auth - uses: google-github-actions/auth@v2.1.10 - with: - workload_identity_provider: "${{ vars.GCP_WIF }}" - service_account: "${{ vars.GCP_FIREBASE_SA }}" - - # TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed - - name: Add $GCP_FIREBASE_SA_PATH to env - if: github.repository_owner == 'ZcashFoundation' - run: | - # shellcheck disable=SC2002 - echo "GCP_FIREBASE_SA_PATH=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV" - - - name: Deploy internal docs to firebase - if: github.repository_owner == 'ZcashFoundation' - uses: FirebaseExtended/action-hosting-deploy@v0.9.0 - with: - firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA_PATH }} - channelId: ${{ env.FIREBASE_CHANNEL }} - target: docs-internal - projectId: ${{ vars.GCP_FIREBASE_PROJECT }} diff --git a/.github/workflows/docs-dockerhub-description.yml b/.github/workflows/docs-dockerhub-description.yml deleted file mode 100644 index 5d0367b6037..00000000000 --- a/.github/workflows/docs-dockerhub-description.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: Update Docker Hub Description - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: false - -on: - workflow_dispatch: - push: - branches: - - main - paths: - - README.md - - .github/workflows/dockerhub-description.yml - -jobs: - dockerHubDescription: - if: github.repository_owner == 'ZcashFoundation' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - - name: Docker Hub Description - uses: peter-evans/dockerhub-description@v4.0.2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - repository: zfnd/zebra - short-description: ${{ github.event.repository.description }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 00000000000..0e655134f44 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,302 @@ +name: Lint + +on: + pull_request: + branches: [main] + paths: + - "**/*.rs" + - "**/Cargo.toml" + - "**/Cargo.lock" + - clippy.toml + - .cargo/config.toml + - .github/workflows/lint.yml + + push: + branches: [main] + paths: + - "**/*.rs" + - "**/Cargo.toml" + - "**/Cargo.lock" + - clippy.toml + - .cargo/config.toml + - .github/workflows/lint.yml + +# Ensures that only one workflow task will run at a time. Previous builds, if +# already in process, will get cancelled. Only the latest commit will be allowed +# to run, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read + pull-requests: read + +env: + CLICOLOR: 1 + +jobs: + clippy: + name: clippy ${{ matrix.rust-version }} / ${{ matrix.type }} + permissions: + id-token: write + statuses: write + runs-on: ubuntu-latest + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + rust-version: [stable, beta] + type: [release, tests] + include: + - type: release + args: --workspace --all-targets + features: default-release-binaries + - type: tests + args: --workspace --all-targets + features: default-release-binaries proptest-impl lightwalletd-grpc-tests zebra-checkpoints + + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + components: clippy + toolchain: ${{ matrix.rust-version }} + cache-on-failure: true + - uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b #v3.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - name: Run clippy + run: cargo clippy ${{ matrix.args }} --features "${{ matrix.features }}" + + crate-checks: + permissions: + statuses: write + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + cache-on-failure: true + - uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b #v3.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - uses: taiki-e/install-action@3ee5d63d29478156148c0b53e9f3447829b47bc2 #v2.58.23 + with: + tool: cargo-hack + - run: cargo hack check --workspace + + msrv: + name: MSRV + permissions: + id-token: write + statuses: write + runs-on: ubuntu-latest + timeout-minutes: 30 + strategy: + matrix: + include: + - binary: zebrad + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + - uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b #v3.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + toolchain: 1.89 # MSRV + cache-on-failure: true + - run: cargo build --bin "${{ matrix.binary }}" --workspace + + docs: + name: docs + permissions: + statuses: write + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + - name: Install last version of Protoc + uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b #v3.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + toolchain: nightly + cache-on-failure: true + - run: cargo doc --no-deps --workspace --all-features --document-private-items --target-dir "$(pwd)"/target/internal + env: + # Keep in sync with ./book.yml:jobs.build + # The -A and -W settings must be the same as the `rustdocflags` in: + # https://github.com/ZcashFoundation/zebra/blob/main/.cargo/config.toml#L87 + RUSTDOCFLAGS: --html-in-header katex-header.html -A rustdoc::private_intra_doc_links -D warnings + + fmt: + name: fmt + permissions: + statuses: write + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + toolchain: nightly + components: rustfmt + - name: Run fmt + run: cargo fmt --all -- --check + + unused-deps: + name: unused-deps + permissions: + id-token: write + statuses: write + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + - name: Install last version of Protoc + uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b #v3.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + toolchain: nightly + cache-on-failure: true + - uses: taiki-e/install-action@3ee5d63d29478156148c0b53e9f3447829b47bc2 #v2.58.23 + with: + tool: cargo-udeps + - run: cargo udeps --workspace --all-targets --all-features --locked + + no-test-deps: + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + toolchain: stable + cache-on-failure: true + - name: Ensure no arbitrary or proptest dependency on default build + run: cargo tree --package zebrad -e=features,no-dev | grep -Eq "arbitrary|proptest" && exit 1 || exit 0 + + # Checks that selected rates can compile with power set of features + features: + name: features + permissions: + id-token: write + statuses: write + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + - name: Install last version of Protoc + uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b #v3.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + toolchain: nightly + cache-on-failure: true + - name: cargo install cargo-hack + uses: taiki-e/install-action@3ee5d63d29478156148c0b53e9f3447829b47bc2 #v2.58.23 + with: + tool: cargo-hack + - run: cargo hack check --all + env: + RUSTFLAGS: -D warnings + + check-cargo-lock: + name: check-cargo-lock + permissions: + id-token: write + statuses: write + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + toolchain: stable + cache-on-failure: true + - uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b #v3.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - run: cargo check --locked --all-features --all-targets + + deny: + name: Check deny ${{ matrix.checks }} ${{ matrix.features }} + permissions: + statuses: write + runs-on: ubuntu-latest + strategy: + matrix: + checks: + - bans + - sources + # We don't need to check `--no-default-features` here, because (except in very rare cases): + # - disabling features isn't going to add duplicate dependencies + # - disabling features isn't going to add more crate sources + features: ["", --features default-release-binaries, --all-features] + # Always run the --all-features job, to get accurate "skip tree root was not found" warnings + fail-fast: false + + # Prevent sudden announcement of a new advisory from failing ci: + continue-on-error: ${{ matrix.checks == 'advisories' }} + + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + cache-on-failure: true + - name: Check ${{ matrix.checks }} with features ${{ matrix.features }} + uses: EmbarkStudios/cargo-deny-action@f2ba7abc2abebaf185c833c3961145a3c275caad #v2.0.13 + with: + # --all-features spuriously activates openssl, but we want to ban that dependency in + # all of zebrad's production features for security reasons. But the --all-features job is + # the only job that gives accurate "skip tree root was not found" warnings. + # In other jobs, we expect some of these warnings, due to disabled features. + command: check ${{ matrix.checks }} ${{ matrix.features == '--all-features' && '--allow banned' || '--allow unmatched-skip-root' }} + arguments: --workspace ${{ matrix.features }} + + lint-success: + name: lint success + runs-on: ubuntu-latest + if: always() + needs: + - clippy + - crate-checks + - docs + - fmt + - unused-deps + - check-cargo-lock + - no-test-deps + - features + - deny + timeout-minutes: 30 + steps: + - name: Decide whether the needed jobs succeeded or failed + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe #v1.2.2 + with: + jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/manual-zcashd-deploy.yml b/.github/workflows/manual-zcashd-deploy.yml deleted file mode 100644 index f96f6e482ca..00000000000 --- a/.github/workflows/manual-zcashd-deploy.yml +++ /dev/null @@ -1,106 +0,0 @@ -# This workflow is designed for manually deploying zcashd nodes to Google Cloud Platform (GCP) based on user inputs. -# - Allows selection of network type (Mainnet or Testnet) and instance group size. -# - Converts network name to lowercase to comply with GCP labeling requirements. -# - Authenticates with Google Cloud using provided credentials. -# - Creates a GCP instance template from a container image of zcashd. -# - Checks if the specified instance group already exists. -# - Depending on the existence check, either creates a new managed instance group or updates the existing one with the new template. -name: Zcashd Manual Deploy - -on: - workflow_dispatch: - inputs: - network: - default: 'Mainnet' - description: 'Network to deploy: Mainnet or Testnet' - required: true - size: - default: '10' - description: 'GCP Managed Instance Group size' - required: true - -jobs: - deploy: - name: Deploy zcashd nodes - runs-on: ubuntu-latest - timeout-minutes: 30 - permissions: - contents: 'read' - id-token: 'write' - - steps: - - uses: actions/checkout@v4.2.2 - with: - persist-credentials: false - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v5 - with: - short-length: 7 - - # Makes the Zcash network name lowercase. - # - # Labels in GCP are required to be in lowercase, but the blockchain network - # uses sentence case, so we need to downcase ${{ inputs.network }}. - # - # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable. - - name: Downcase network name for labels - run: | - NETWORK_CAPS="${{ inputs.network }}" - echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v2.1.10 - with: - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.4 - - # Create instance template from container image - - name: Create instance template - run: | - gcloud compute instance-templates create-with-container zcashd-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --boot-disk-size=10GB \ - --boot-disk-type=pd-standard \ - --image-project=cos-cloud \ - --image-family=cos-stable \ - --container-stdin \ - --container-tty \ - --container-image electriccoinco/zcashd \ - --container-env ZCASHD_NETWORK="${{ inputs.network }}" \ - --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ - --subnet=${{ vars.GCP_SUBNETWORK }} \ - --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ - --scopes cloud-platform \ - --labels=app=zcashd,environment=prod,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }} \ - --tags zcashd - - # Check if our destination instance group exists already - - name: Check if instance group exists - id: does-group-exist - continue-on-error: true - run: | - gcloud compute instance-groups list | grep "zcashd-${{ env.GITHUB_REF_SLUG_URL }}-${{ inputs.network }}" | grep "${{ vars.GCP_REGION }}" - - # Deploy new managed instance group using the new instance template - - name: Create managed instance group - if: steps.does-group-exist.outcome == 'failure' - run: | - gcloud compute instance-groups managed create \ - "zcashd-${{ env.GITHUB_REF_SLUG_URL }}-${{ inputs.network }}" \ - --template "zcashd-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ - --region "${{ vars.GCP_REGION }}" \ - --size "${{ github.event.inputs.size }}" - - # Rolls out update to existing group using the new instance template - - name: Update managed instance group - if: steps.does-group-exist.outcome == 'success' - run: | - gcloud compute instance-groups managed rolling-action start-update \ - "zcashd-${{ env.GITHUB_REF_SLUG_URL }}-${{ inputs.network }}" \ - --version template="zcashd-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ - --region "${{ vars.GCP_REGION }}" diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index f2c915c86f8..e55065fe559 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -1,5 +1,5 @@ # This workflow is meant to trigger a build of Docker binaries when a release -# is published, it uses the existing `sub-build-docker-image.yml` workflow +# is published, it uses the existing `zfnd-build-docker-image.yml` workflow # # We use a separate action as we might want to trigger this under # different circumstances than a Continuous Deployment, for example. @@ -14,11 +14,18 @@ on: types: - released +permissions: + contents: read + jobs: # The image will be named `zebra:` build: name: Build Release Docker - uses: ./.github/workflows/sub-build-docker-image.yml + permissions: + contents: read + id-token: write + pull-requests: write + uses: ./.github/workflows/zfnd-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime @@ -26,17 +33,54 @@ jobs: features: ${{ vars.RUST_PROD_FEATURES }} rust_log: ${{ vars.RUST_LOG }} # This step needs access to Docker Hub secrets to run successfully - secrets: inherit + secrets: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + + dockerhub-description: + if: github.repository_owner == 'ZcashFoundation' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + + - name: Docker Hub Description + uses: peter-evans/dockerhub-description@432a30c9e07499fd01da9f8a49f0faf9e0ca5b77 #v4.0.2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + repository: zfnd/zebra + short-description: ${{ github.event.repository.description }} + + release-binaries-success: + name: Release binaries success + runs-on: ubuntu-latest + # Only run when the Docker Hub update job is allowed to execute + if: >- + ${{ + always() && + github.repository_owner == 'ZcashFoundation' + }} + needs: + - build + - dockerhub-description + timeout-minutes: 1 + steps: + - name: Decide whether the needed jobs succeeded or failed + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe #v1.2.2 + with: + jobs: ${{ toJSON(needs) }} failure-issue: name: Open or update issues for release binaries failures # When a new job is added to this workflow, add it to this list. - needs: [ build ] + needs: [ build, dockerhub-description ] # Open tickets for any failed build in this workflow. if: failure() || cancelled() runs-on: ubuntu-latest steps: - - uses: jayqi/failed-build-issue-action@v1 + - uses: jayqi/failed-build-issue-action@1a893bbf43ef1c2a8705e2b115cd4f0fe3c5649b #v1.2.0 with: title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}" # New failures open an issue with this label. diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml index 3538c4da913..ce8a3de0836 100644 --- a/.github/workflows/release-drafter.yml +++ b/.github/workflows/release-drafter.yml @@ -38,7 +38,7 @@ jobs: runs-on: ubuntu-latest steps: # Drafts your next Release notes - - uses: release-drafter/release-drafter@v6.1.0 + - uses: release-drafter/release-drafter@b1476f6e6eb133afa41ed8589daba6dc69b4d3f5 # v6.1.0 with: config-name: release-drafter.yml commitish: main diff --git a/.github/workflows/ci-build-crates.yml b/.github/workflows/test-crates.yml similarity index 52% rename from .github/workflows/ci-build-crates.yml rename to .github/workflows/test-crates.yml index 851bc8176c6..b769478718d 100644 --- a/.github/workflows/ci-build-crates.yml +++ b/.github/workflows/test-crates.yml @@ -1,81 +1,73 @@ -# This workflow facilitates the individual building of Rust crates present in the repository. -# 1. A matrix is generated dynamically to identify each crate in the repository. -# 2. This matrix is checked for validity. -# 3. Each identified crate undergoes three build processes: -# - With no features. -# - With the default features. -# - With all the features enabled. -# 4. In case of build failures outside of pull requests, an issue is either opened or updated -# in the repository to report the failure. -# Throughout the workflow, various setup steps ensure the correct environment and tools are present. -name: Build crates individually - -# Ensures that only one workflow task will run at a time. Previous builds, if -# already in process, will get cancelled. Only the latest commit will be allowed -# to run, cancelling any workflows in between -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true +name: Test Crate Build on: - workflow_dispatch: - push: - branches: - - main + pull_request: + branches: [main] paths: - # production code and test code - "**/*.rs" - # dependencies - "**/Cargo.toml" - "**/Cargo.lock" - # configuration files - - ".cargo/config.toml" + - .cargo/config.toml - "**/clippy.toml" - # workflow definitions - - ".github/workflows/ci-build-crates.yml" - pull_request: + - .github/workflows/test-crate-build.yml + + push: + branches: [main] paths: - # production code and test code - "**/*.rs" - # dependencies - "**/Cargo.toml" - "**/Cargo.lock" - # configuration files - - ".cargo/config.toml" + - .cargo/config.toml - "**/clippy.toml" - # workflow definitions - - ".github/workflows/ci-build-crates.yml" + - .github/workflows/test-crate-build.yml + + workflow_dispatch: + +# Ensures that only one workflow task will run at a time. Previous builds, if +# already in process, will get cancelled. Only the latest commit will be allowed +# to run, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read + pull-requests: read env: - CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} - RUST_LOG: ${{ vars.RUST_LOG }} - RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }} - RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }} - COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }} + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 + RUST_LIB_BACKTRACE: 1 + RUST_LOG: info + COLORBT_SHOW_HIDDEN: 1 jobs: matrix: name: Generate crates matrix + permissions: + statuses: write runs-on: ubuntu-latest + timeout-minutes: 30 outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - uses: actions/checkout@v4.2.2 - - uses: r7kamura/rust-problem-matchers@v1.5.0 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false - # Setup Rust with stable toolchain and minimal profile - - name: Setup Rust - run: | - rustup toolchain install stable --profile minimal + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + toolchain: stable + cache-on-failure: true - # This step is meant to dynamically create a JSON containing the values of each crate + # This step dynamically creates a JSON containing the values of each crate # available in this repo in the root directory. We use `cargo tree` to accomplish this task. # # The result from `cargo tree` is then sorted so the longest job (zebrad) runs first, # transformed to JSON values between double quotes, and separated by commas, # then added to a `crates.txt`. # - # A JSON object is created and assigned to a $MATRIX variable, which is use to create an + # A JSON object is created and assigned to a $MATRIX variable, which is used to create an # output named `matrix`, which is then used as the input in following steps, # using ` ${{ fromJson(needs.matrix.outputs.matrix) }}` - id: set-matrix @@ -92,28 +84,15 @@ jobs: echo $MATRIX | jq . echo "matrix=$MATRIX" >> "$GITHUB_OUTPUT" - check-matrix: - name: Check crates matrix - runs-on: ubuntu-latest - needs: [matrix] - steps: - - name: Install json2yaml - run: | - sudo npm install -g json2yaml - - - name: Check matrix definition - run: | - matrix='${{ needs.matrix.outputs.matrix }}' - echo $matrix - echo $matrix | jq . - echo $matrix | json2yaml - build: name: Build ${{ matrix.crate }} crate + permissions: + id-token: write + statuses: write timeout-minutes: 90 - needs: [matrix, check-matrix] + needs: [matrix] # Some of these builds take more than 14GB disk space - runs-on: ${{ github.repository_owner == 'ZcashFoundation' && 'ubuntu-latest-m' || 'ubuntu-latest' }} + runs-on: ubuntu-latest strategy: # avoid rate-limit errors by only launching a few of these jobs at a time, # but still finish in a similar time to the longest tests @@ -122,22 +101,19 @@ jobs: matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} steps: - - uses: actions/checkout@v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.5.0 - - name: Install last version of Protoc - uses: arduino/setup-protoc@v3.0.0 + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 with: - # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: "23.x" - repo-token: ${{ secrets.GITHUB_TOKEN }} + toolchain: stable + cache-key: crate-build-${{ matrix.crate }} + cache-on-failure: true - # Setup Rust with stable toolchain and minimal profile - - name: Setup Rust - run: | - rustup toolchain install stable --profile minimal + - uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b #v3.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} # We could use `features: ['', '--all-features', '--no-default-features']` as a matrix argument, # but it's faster to run these commands sequentially, so they can re-use the local cargo cache. @@ -164,20 +140,16 @@ jobs: cargo clippy --package ${{ matrix.crate }} --all-features --all-targets -- -D warnings cargo build --package ${{ matrix.crate }} --all-features --all-targets - failure-issue: - name: Open or update issues for building crates individually failures - # When a new job is added to this workflow, add it to this list. - needs: [matrix, build] - # Only open tickets for failed or cancelled jobs that are not coming from PRs. - # (PR statuses are already reported in the PR jobs list, and checked by GitHub's Merge Queue.) - if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) + test-crate-build-success: + name: test crate build success runs-on: ubuntu-latest + if: always() + needs: + - matrix + - build + timeout-minutes: 30 steps: - - uses: jayqi/failed-build-issue-action@v1 + - name: Decide whether the needed jobs succeeded or failed + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe #v1.2.2 with: - title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}" - # New failures open an issue with this label. - label-name: S-ci-fail-build-crates-auto-issue - # If there is already an open issue with this label, any failures become comments on that issue. - always-create-new-issue: false - github-token: ${{ secrets.GITHUB_TOKEN }} + jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/test-docker.yml similarity index 50% rename from .github/workflows/sub-test-zebra-config.yml rename to .github/workflows/test-docker.yml index 6f1794290d9..1367d214905 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/test-docker.yml @@ -1,32 +1,90 @@ -# This workflow is designed to test Zebra configuration files using Docker containers. -# It acts as a centralized test suite for Docker configuration scenarios, running multiple -# distinct tests against a provided Docker image using a matrix approach. -# - Runs a specified Docker image with the provided test variables and network settings. -# - Monitors and analyzes container logs for specific patterns to determine test success. -# - Provides flexibility in testing various configurations and networks by dynamically adjusting input parameters. - -name: Test Zebra Configs in Docker +name: Test Docker Config on: - workflow_call: - inputs: - docker_image: - required: true - type: string - description: Docker image to test, including digest (e.g., gcr.io/example/zebrad@sha256:...) - -permissions: read-all + pull_request: + branches: [main] + paths: + - "**/*.rs" + - "**/Cargo.toml" + - "**/Cargo.lock" + - docker/Dockerfile + - docker/entrypoint.sh + - docker/**/*.toml + - zebrad/tests/common/configs/** + - .github/workflows/test-docker-config.yml + + push: + branches: [main] + paths: + - "**/*.rs" + - "**/Cargo.toml" + - "**/Cargo.lock" + - docker/Dockerfile + - docker/entrypoint.sh + - docker/**/*.toml + - zebrad/tests/common/configs/** + - .github/workflows/test-docker-config.yml + +# Ensures that only one workflow task will run at a time. Previous builds, if +# already in process, will get cancelled. Only the latest commit will be allowed +# to run, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read + pull-requests: read + +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 + RUST_LIB_BACKTRACE: 1 + RUST_LOG: info + COLORBT_SHOW_HIDDEN: 1 jobs: + build-docker-image: + name: Build Docker Image + permissions: + id-token: write + statuses: write + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 #v3.11.1 + - name: Build & push + id: docker_build + uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 #v6.18.0 + with: + target: tests + context: . + file: docker/Dockerfile + tags: zebrad-test:${{ github.sha }} + build-args: | + SHORT_SHA=${{ github.sha }} + CARGO_INCREMENTAL=1 + outputs: type=docker,dest=${{ runner.temp }}/zebrad-test.tar + + - name: Upload artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 #v4.6.2 + with: + name: zebrad-test + path: ${{ runner.temp }}/zebrad-test.tar + test-configurations: + name: Test ${{ matrix.name }} + needs: build-docker-image permissions: contents: read actions: read checks: read - # Use the matrix 'name' for the job name for clarity in UI - name: Test ${{ matrix.name }} - timeout-minutes: 30 runs-on: ubuntu-latest + timeout-minutes: 30 strategy: fail-fast: false matrix: @@ -42,8 +100,8 @@ jobs: env_vars: -e ZEBRA_NETWORK__NETWORK=Testnet grep_patterns: -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter" - # Only runs when using the CI image, because the CD image doesn't have the custom config file - # available in the tests/common/configs directory + # Only runs when using the image is built with the `tests` target, because the `runtime` target + # doesn't have the custom config file available in the tests/common/configs directory - id: custom-conf name: Custom config env_vars: -e CONFIG_FILE_PATH=/home/zebra/zebrad/tests/common/configs/custom-conf.toml @@ -77,31 +135,33 @@ jobs: name: Prometheus metrics env_vars: -e FEATURES=prometheus -e ZEBRA_METRICS__ENDPOINT_ADDR=0.0.0.0:9999 grep_patterns: -e "0.0.0.0:9999" - # Todo: This test is flaky, and we need to fix it. But this is included in the Rust tests as a fallback. - # # Mining configuration - # - id: mining-config - # name: Mining configuration - # env_vars: -e ZEBRA_MINING__MINER_ADDRESS="u1cymdny2u2vllkx7t5jnelp0kde0dgnwu0jzmggzguxvxj6fe7gpuqehywejndlrjwgk9snr6g69azs8jfet78s9zy60uepx6tltk7ee57jlax49dezkhkgvjy2puuue6dvaevt53nah7t2cc2k4p0h0jxmlu9sx58m2xdm5f9sy2n89jdf8llflvtml2ll43e334avu2fwytuna404a" - # grep_patterns: -e "configured miner address.*miner_address=" steps: - - uses: actions/checkout@v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 with: persist-credentials: false + - name: Download artifact + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 #v5.0.0 + with: + name: zebrad-test + path: ${{ runner.temp }} + + - name: Load image + run: | + docker load --input ${{ runner.temp }}/zebrad-test.tar + docker image ls -a + - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v5 + uses: rlespinasse/github-slug-action@c33ff65466c58d57e4d796f88bb1ae0ff26ee453 #v5.2.0 with: short-length: 7 - - uses: r7kamura/rust-problem-matchers@v1.5.0 - - name: Run ${{ matrix.name }} test - # Only run if this isn't a skipped custom-conf test - if: ${{ matrix.id != 'custom-conf' || contains(inputs.docker_image, vars.CI_IMAGE_NAME) }} + # Only run custom-conf test if the config file exists in the built image + if: ${{ matrix.id != 'custom-conf' || hashFiles('zebrad/tests/common/configs/custom-conf.toml') != '' }} run: | - docker pull ${{ inputs.docker_image }} - docker run ${{ matrix.env_vars }} --detach --name ${{ matrix.id }} -t ${{ inputs.docker_image }} zebrad start + docker run ${{ matrix.env_vars }} --detach --name ${{ matrix.id }} -t zebrad-test:${{ github.sha }} zebrad start # Use a subshell to handle the broken pipe error gracefully ( trap "" PIPE; @@ -130,6 +190,19 @@ jobs: else echo "SUCCESS: Found the expected pattern in logs."; # Exit successfully if grep passed, even if docker stop resulted in SIGKILL (137 or 139) - # See ticket #7898 for details. exit 0; fi + + test-docker-config-success: + name: Test Docker Config Success + runs-on: ubuntu-latest + if: always() + needs: + - build-docker-image + - test-configurations + timeout-minutes: 30 + steps: + - name: Decide whether the needed jobs succeeded or failed + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe #v1.2.2 + with: + jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml new file mode 100644 index 00000000000..6cc2d2faad1 --- /dev/null +++ b/.github/workflows/tests-unit.yml @@ -0,0 +1,139 @@ +name: Unit Tests + +on: + pull_request: + branches: [main] + paths: + - "**/*.rs" + - "**/Cargo.toml" + - "**/Cargo.lock" + - .config/nextest.toml + - .github/workflows/tests-unit.yml + + push: + branches: [main] + paths: + - "**/*.rs" + - "**/Cargo.toml" + - "**/Cargo.lock" + - .config/nextest.toml + - .github/workflows/tests-unit.yml + +# Ensures that only one workflow task will run at a time. Previous builds, if +# already in process, will get cancelled. Only the latest commit will be allowed +# to run, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read + pull-requests: read + +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 + RUST_LIB_BACKTRACE: 1 + RUST_LOG: info + COLORBT_SHOW_HIDDEN: 1 + +jobs: + unit-tests: + name: ${{ matrix.rust-version }} on ${{ matrix.os }} + permissions: + id-token: write + statuses: write + runs-on: ${{ matrix.os }} + timeout-minutes: 120 + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + rust-version: [stable, beta] + features: [default-release-binaries] + exclude: + # Exclude macOS beta due to limited runner capacity and slower performance + # Exclude Windows beta to reduce the amount of minutes cost per workflow run + - os: macos-latest + rust-version: beta + - os: windows-latest + rust-version: beta + + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + toolchain: ${{ matrix.rust-version }} + cache-key: unit-tests-${{ matrix.os }}-${{ matrix.rust-version }}-${{ matrix.features }} + cache-on-failure: true + - uses: taiki-e/install-action@3ee5d63d29478156148c0b53e9f3447829b47bc2 #v2.58.23 + with: + tool: cargo-nextest + - uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b #v3.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + + # Windows-specific setup + - name: Install LLVM on Windows + if: matrix.os == 'windows-latest' + run: | + choco install llvm -y + echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "LIBCLANG_PATH=C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + + - name: Minimise proptest cases on macOS and Windows + # We set cases to 1, because some tests already run 1 case by default. + # We keep maximum shrink iterations at the default value, because it only happens on failure. + # + # Windows compilation and tests are slower than other platforms. + if: matrix.os == 'windows-latest' + run: | + echo "PROPTEST_CASES=1" >> $GITHUB_ENV + echo "PROPTEST_MAX_SHRINK_ITERS=1024" >> $GITHUB_ENV + + - name: Run unit tests + run: cargo nextest run --profile all-tests --locked --release --features "${{ matrix.features }}" --run-ignored=all + env: + TEST_LARGE_CHECKPOINTS: 1 + + check-no-git-dependencies: + name: Check no git dependencies + permissions: + id-token: write + statuses: write + runs-on: ubuntu-latest + timeout-minutes: 30 + if: contains(github.event.pull_request.labels.*.name, 'A-release') + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + - uses: actions-rust-lang/setup-rust-toolchain@ab6845274e2ff01cd4462007e1a9d9df1ab49f42 #v1.14.0 + with: + toolchain: stable + cache-on-failure: true + - uses: taiki-e/install-action@3ee5d63d29478156148c0b53e9f3447829b47bc2 #v2.58.23 + with: + tool: cargo-nextest + - uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b #v3.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - name: Check no git dependencies + run: cargo nextest run --profile check-no-git-dependencies + + test-success: + name: test success + runs-on: ubuntu-latest + if: always() + needs: + - unit-tests + - check-no-git-dependencies + timeout-minutes: 30 + steps: + - name: Decide whether the needed jobs succeeded or failed + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe #v1.2.2 + with: + jobs: ${{ toJSON(needs) }} + allowed-skips: check-no-git-dependencies diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/zfnd-build-docker-image.yml similarity index 87% rename from .github/workflows/sub-build-docker-image.yml rename to .github/workflows/zfnd-build-docker-image.yml index 8d64df41604..0e4d0cfaf17 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/zfnd-build-docker-image.yml @@ -36,16 +36,25 @@ on: required: false type: string no_cache: - description: "Disable the Docker cache for this build" + description: Disable the Docker cache for this build required: false type: boolean default: false + secrets: + DOCKERHUB_USERNAME: + required: false + DOCKERHUB_TOKEN: + required: false + outputs: image_digest: - description: "The image digest to be used on a caller workflow" + description: The image digest to be used on a caller workflow value: ${{ jobs.build.outputs.image_digest }} +permissions: + contents: read + env: FEATURES: ${{ inputs.features }} RUST_LOG: ${{ inputs.rust_log || vars.RUST_LOG }} @@ -62,26 +71,25 @@ jobs: image_digest: ${{ steps.docker_build.outputs.digest }} image_name: ${{ fromJSON(steps.docker_build.outputs.metadata)['image.name'] }} permissions: - contents: "read" - id-token: "write" + contents: read + id-token: write pull-requests: write # for `docker-scout` to be able to write the comment env: DOCKER_BUILD_SUMMARY: ${{ vars.DOCKER_BUILD_SUMMARY }} steps: - - uses: actions/checkout@v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v4.2.2 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v5 + uses: rlespinasse/github-slug-action@c33ff65466c58d57e4d796f88bb1ae0ff26ee453 #v5.2.0 with: short-length: 7 # Automatic tag management and OCI Image Format Specification for labels - name: Docker meta id: meta - uses: docker/metadata-action@v5.7.0 + uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f #v5.8.0 with: # list of Docker images to use as base name for tags # We only publish images to DockerHub if a release is not a pre-release @@ -112,25 +120,25 @@ jobs: - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.10 + uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 #v3.0.0 with: workload_identity_provider: "${{ vars.GCP_WIF }}" service_account: "${{ vars.GCP_ARTIFACTS_SA }}" - token_format: "access_token" + token_format: access_token # Some builds might take over an hour, and Google's default lifetime duration for # an access token is 1 hour (3600s). We increase this to 3 hours (10800s) # as some builds take over an hour. access_token_lifetime: 10800s - name: Login to Google Artifact Registry - uses: docker/login-action@v3.4.0 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 #v3.5.0 with: registry: us-docker.pkg.dev username: oauth2accesstoken password: ${{ steps.auth.outputs.access_token }} - name: Login to DockerHub - uses: docker/login-action@v3.4.0 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 #v3.5.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} @@ -138,16 +146,16 @@ jobs: # Setup Docker Buildx to use Docker Build Cloud - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v3.10.0 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 #v3.11.1 with: - version: "lab:latest" + version: lab:latest driver: ${{ vars.DOCKER_BUILDER || 'docker' }} - endpoint: "zfnd/zebra" + endpoint: zfnd/zebra # Build and push image to Google Artifact Registry, and possibly DockerHub - name: Build & push id: docker_build - uses: docker/build-push-action@v6.18.0 + uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 #v6.18.0 with: target: ${{ inputs.dockerfile_target }} context: . @@ -180,7 +188,7 @@ jobs: # - `dev` for a pull request event - name: Docker Scout id: docker-scout - uses: docker/scout-action@v1.18.1 + uses: docker/scout-action@f8c776824083494ab0d56b8105ba2ca85c86e4de #v1.18.2 # We only run Docker Scout on the `runtime` target, as the other targets are not meant to be released # and are commonly used for testing, and thus are ephemeral. # TODO: Remove the `contains` check once we have a better way to determine if just new vulnerabilities are present. diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/zfnd-ci-integration-tests-gcp.yml similarity index 70% rename from .github/workflows/sub-ci-integration-tests-gcp.yml rename to .github/workflows/zfnd-ci-integration-tests-gcp.yml index 2b6853d4da0..577ae7b1449 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/zfnd-ci-integration-tests-gcp.yml @@ -5,11 +5,87 @@ # Each test has a description of the conditions under which it runs. name: Integration Tests on GCP +# Ensures that only one workflow task will run at a time. Previous builds, if +# already in process, will get cancelled. Only the latest commit will be allowed +# to run, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.event_name == 'schedule' && 'schedule' || github.event_name == 'workflow_dispatch' && 'manual' || github.head_ref || github.ref_name }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + on: + + schedule: + # Run this job every Friday at mid-day UTC + # This is limited to the Zebra and lightwalletd Full Sync jobs + - cron: 0 12 * * 5 + + workflow_dispatch: + inputs: + network: + default: Mainnet + description: "Network to deploy: Mainnet or Testnet" + required: true + regenerate-disks: + type: boolean + default: false + description: Just run a Zebra checkpoint sync and update checkpoint disks + required: true + run-full-sync: + type: boolean + default: false + description: Just run a Zebra full sync on `network`, and update tip disks + required: true + run-lwd-sync: + type: boolean + default: false + description: Just run a lightwalletd full sync and update tip disks + required: true + force_save_to_disk: + required: false + type: boolean + default: false + description: Force tests to always create a cached state disk, if they already create disks + no_cache: + description: Disable the Docker cache for this build + required: false + type: boolean + default: false + + pull_request: + branches: [main] + types: [labeled] + + push: + # Run only on main branch updates that modify Rust code or dependencies. + branches: + - main + paths: + # code and tests + - "**/*.rs" + # hard-coded checkpoints and proptest regressions + - "**/*.txt" + # test data snapshots + - "**/*.snap" + # dependencies + - "**/Cargo.toml" + - "**/Cargo.lock" + # configuration files + - .cargo/config.toml + - "**/clippy.toml" + # supply chain security + - "**/deny.toml" + # workflow definitions + - docker/** + - .dockerignore + - .github/workflows/zfnd-ci-integration-tests-gcp.yml + - .github/workflows/zfnd-deploy-integration-tests-gcp.yml + - .github/workflows/zfnd-find-cached-disks.yml + - .github/workflows/zfnd-build-docker-image.yml + workflow_call: inputs: network: - default: "Mainnet" + default: Mainnet type: string regenerate-disks: default: false @@ -27,36 +103,79 @@ on: default: false type: boolean +permissions: + contents: read + +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 + RUST_LIB_BACKTRACE: 1 + RUST_LOG: info + COLORBT_SHOW_HIDDEN: 1 + #! IMPORTANT #! -#! The job names in `ci-integration-tests-gcp.yml`, `ci-integration-tests-gcp.patch.yml` and -#! `ci-integration-tests-gcp.patch-external.yml` must be kept in sync. +#! The job names in `zfnd-ci-integration-tests-gcp.yml` must be kept in sync. #! jobs: + # Build the docker image used by the tests. + # + # The default network in the Zebra config in the image is mainnet, unless a manually triggered + # workflow or repository variable is configured differently. Testnet jobs change that config to + # testnet when running the image. + build: + name: Build CI Docker + # Skip PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them + if: ${{ (!startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork) && (github.event_name != 'pull_request' || github.event.label.name == 'run-stateful-tests') }} + permissions: + contents: read + id-token: write + pull-requests: write + statuses: write + uses: ./.github/workflows/zfnd-build-docker-image.yml + with: + dockerfile_path: ./docker/Dockerfile + dockerfile_target: tests + image_name: ${{ vars.CI_IMAGE_NAME }} + no_cache: ${{ inputs.no_cache || false }} + rust_backtrace: full + rust_lib_backtrace: full + rust_log: info + features: ${{ format('{0} {1}', vars.RUST_PROD_FEATURES, vars.RUST_TEST_FEATURES) }} + # This step needs access to Docker Hub secrets to run successfully + secrets: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} # Check if the cached state disks used by the tests are available for the default network. # # The default network is mainnet unless a manually triggered workflow or repository variable # is configured differently. # - # The outputs for this job have the same names as the workflow outputs in sub-find-cached-disks.yml + # The outputs for this job have the same names as the workflow outputs in zfnd-find-cached-disks.yml get-available-disks: name: Check if cached state disks exist for ${{ inputs.network || vars.ZCASH_NETWORK }} # Skip PRs from external repositories, let them pass, and then GitHub's Merge Queue will check them - if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} - uses: ./.github/workflows/sub-find-cached-disks.yml + if: ${{ (!startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork) && (github.event_name != 'pull_request' || github.event.label.name == 'run-stateful-tests') }} + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-find-cached-disks.yml with: network: ${{ inputs.network || vars.ZCASH_NETWORK }} # Check if the cached state disks used by the tests are available for testnet. # - # The outputs for this job have the same names as the workflow outputs in sub-find-cached-disks.yml + # The outputs for this job have the same names as the workflow outputs in zfnd-find-cached-disks.yml # Some outputs are ignored, because we don't run those jobs on testnet. get-available-disks-testnet: name: Check if cached state disks exist for testnet - if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} - uses: ./.github/workflows/sub-find-cached-disks.yml + if: ${{ (!startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork) && (github.event_name != 'pull_request' || github.event.label.name == 'run-stateful-tests') }} + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-find-cached-disks.yml with: - network: "Testnet" + network: Testnet # zebrad cached checkpoint state tests @@ -69,12 +188,17 @@ jobs: # Note: the output from get-available-disks should match with the caller workflow inputs sync-to-mandatory-checkpoint: name: Zebra checkpoint - needs: [get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + needs: [build, get-available-disks] + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml if: ${{ !fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || github.event.inputs.regenerate-disks == 'true' }} concurrency: group: ${{ github.event_name == 'workflow_dispatch' && format('manual-{0}-sync-to-mandatory-checkpoint', github.run_id) || 'sync-to-mandatory-checkpoint' }} cancel-in-progress: false + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: zebrad test_id: sync-to-mandatory-checkpoint @@ -87,8 +211,7 @@ jobs: saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: checkpoint - height_grep_text: 'flushing database to disk .*height.*=.*Height.*\(' - secrets: inherit + height_grep_text: flushing database to disk .*height.*=.*Height.*\( # Test that Zebra syncs and fully validates a few thousand blocks from a cached mandatory checkpoint disk # @@ -97,8 +220,13 @@ jobs: sync-past-mandatory-checkpoint: name: Zebra checkpoint update needs: [sync-to-mandatory-checkpoint, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || needs.sync-to-mandatory-checkpoint.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: zebrad test_id: sync-past-mandatory-checkpoint @@ -107,7 +235,6 @@ jobs: needs_zebra_state: true saves_to_disk: false disk_suffix: checkpoint - secrets: inherit # zebrad cached tip state tests @@ -124,12 +251,17 @@ jobs: # Note: the output from get-available-disks should match with the caller workflow inputs sync-full-mainnet: name: Zebra tip - needs: [get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + needs: [build, get-available-disks] + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet') }} concurrency: group: ${{ github.event_name == 'workflow_dispatch' && format('manual-{0}-sync-full-mainnet', github.run_id) || 'sync-full-mainnet' }} cancel-in-progress: false + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: zebrad test_id: sync-full-mainnet @@ -141,8 +273,7 @@ jobs: needs_zebra_state: false saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} - height_grep_text: 'current_height.*=.*Height.*\(' - secrets: inherit + height_grep_text: current_height.*=.*Height.*\( # Test that Zebra can sync to the chain tip, using a cached Zebra tip state, # without launching `lightwalletd`. @@ -156,8 +287,13 @@ jobs: sync-update-mainnet: name: Zebra tip update needs: [sync-full-mainnet, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.sync-full-mainnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: zebrad test_id: sync-update-mainnet @@ -167,8 +303,7 @@ jobs: # update the disk on every PR, to increase CI speed saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} - height_grep_text: 'current_height.*=.*Height.*\(' - secrets: inherit + height_grep_text: current_height.*=.*Height.*\( # zebra mainnet checkpoint generation tests @@ -186,8 +321,13 @@ jobs: generate-checkpoints-mainnet: name: Generate checkpoints mainnet needs: [sync-full-mainnet, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.sync-full-mainnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: zebrad test_id: generate-checkpoints-mainnet @@ -197,8 +337,7 @@ jobs: needs_zebra_state: true # sync-update-mainnet updates the disk on every PR, so we don't need to do it here saves_to_disk: false - height_grep_text: 'current_height.*=.*Height.*\(' - secrets: inherit + height_grep_text: current_height.*=.*Height.*\( # zebra testnet checkpoint generation tests # @@ -217,12 +356,17 @@ jobs: # Note: the output from get-available-disks-testnet should match with the caller workflow inputs sync-full-testnet: name: Zebra tip on testnet - needs: [get-available-disks-testnet] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + needs: [build, get-available-disks-testnet] + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml if: ${{ !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Testnet') }} concurrency: group: ${{ github.event_name == 'workflow_dispatch' && format('manual-{0}-sync-full-testnet', github.run_id) || 'sync-full-testnet' }} cancel-in-progress: false + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: zebrad test_id: sync-full-testnet @@ -235,8 +379,7 @@ jobs: needs_zebra_state: false saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} - height_grep_text: 'current_height.*=.*Height.*\(' - secrets: inherit + height_grep_text: current_height.*=.*Height.*\( # Test that Zebra can generate testnet checkpoints after syncing to the chain tip, # using a cached Zebra tip state. @@ -252,8 +395,13 @@ jobs: generate-checkpoints-testnet: name: Generate checkpoints testnet needs: [sync-full-testnet, get-available-disks-testnet] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || needs.sync-full-testnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: zebrad test_id: generate-checkpoints-testnet @@ -265,8 +413,7 @@ jobs: # we don't have a sync-update-mainnet-testnet job, so we need to update the disk here saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} - height_grep_text: 'zebra_tip_height.*=.*Height.*\(' - secrets: inherit + height_grep_text: zebra_tip_height.*=.*Height.*\( # lightwalletd cached tip state tests @@ -282,12 +429,17 @@ jobs: lwd-sync-full: name: lightwalletd tip needs: [sync-full-mainnet, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml # Currently the lightwalletd tests only work on Mainnet if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.sync-full-mainnet.result == 'success') && (github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }} concurrency: group: ${{ github.event_name == 'workflow_dispatch' && format('manual-{0}-lwd-sync-full', github.run_id) || 'lwd-sync-full' }} cancel-in-progress: false + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: lightwalletd test_id: lwd-sync-full @@ -301,7 +453,6 @@ jobs: force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_prefix: lwd-cache height_grep_text: "Waiting for block: " - secrets: inherit # Test update sync of lightwalletd with a lightwalletd and Zebra tip state # Runs: @@ -313,8 +464,13 @@ jobs: lwd-sync-update: name: lightwalletd tip update needs: [lwd-sync-full, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lwd-sync-full.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: lightwalletd test_id: lwd-sync-update @@ -326,7 +482,6 @@ jobs: force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_prefix: lwd-cache height_grep_text: "Waiting for block: " - secrets: inherit # Test that Zebra can answer a synthetic RPC call, using a cached Zebra tip state # @@ -341,8 +496,13 @@ jobs: lwd-rpc-test: name: Zebra tip JSON-RPC needs: [sync-full-mainnet, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.sync-full-mainnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: lightwalletd test_id: lwd-rpc-test @@ -350,7 +510,6 @@ jobs: test_variables: "ZEBRA_NETWORK__NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }},NEXTEST_PROFILE=lwd-rpc-test,TEST_LIGHTWALLETD=1" needs_zebra_state: true saves_to_disk: false - secrets: inherit # Test that Zebra can handle a lightwalletd send transaction RPC call, using a cached Zebra tip state # @@ -363,8 +522,13 @@ jobs: lwd-rpc-send-tx: name: Lightwalletd send transactions needs: [lwd-sync-full, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lwd-sync-full.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: lightwalletd test_id: lwd-rpc-send-tx @@ -373,7 +537,6 @@ jobs: needs_zebra_state: true needs_lwd_state: true saves_to_disk: false - secrets: inherit # Test that Zebra can handle gRPC wallet calls, using a cached Zebra tip state # @@ -386,8 +549,13 @@ jobs: lwd-grpc-wallet: name: lightwalletd GRPC tests needs: [lwd-sync-full, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lwd-sync-full.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: lightwalletd test_id: lwd-grpc-wallet @@ -396,7 +564,6 @@ jobs: needs_zebra_state: true needs_lwd_state: true saves_to_disk: false - secrets: inherit ## getblocktemplate RPC tests using cached Zebra state on mainnet # @@ -413,8 +580,13 @@ jobs: rpc-get-block-template: name: get block template needs: [sync-full-mainnet, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.sync-full-mainnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: zebrad test_id: rpc-get-block-template @@ -423,7 +595,6 @@ jobs: needs_zebra_state: true needs_lwd_state: false saves_to_disk: false - secrets: inherit # Test that Zebra can handle a submit block RPC call, using a cached Zebra tip state # @@ -436,8 +607,13 @@ jobs: rpc-submit-block: name: submit block needs: [sync-full-mainnet, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.sync-full-mainnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + secrets: + GCP_SSH_PRIVATE_KEY: ${{ secrets.GCP_SSH_PRIVATE_KEY }} with: app_name: zebrad test_id: rpc-submit-block @@ -446,7 +622,6 @@ jobs: needs_zebra_state: true needs_lwd_state: false saves_to_disk: false - secrets: inherit failure-issue: name: Open or update issues for main branch failures @@ -474,7 +649,7 @@ jobs: if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) runs-on: ubuntu-latest steps: - - uses: jayqi/failed-build-issue-action@v1 + - uses: jayqi/failed-build-issue-action@1a893bbf43ef1c2a8705e2b115cd4f0fe3c5649b #v1.2.0 with: title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}" # New failures open an issue with this label. @@ -482,3 +657,35 @@ jobs: # If there is already an open issue with this label, any failures become comments on that issue. always-create-new-issue: false github-token: ${{ secrets.GITHUB_TOKEN }} + + integration-tests-success: + name: integration tests success + runs-on: ubuntu-latest + if: >- + ${{ + always() && + (!startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork) && + (github.event_name != 'pull_request' || github.event.label.name == 'run-stateful-tests') + }} + needs: + - build + - sync-to-mandatory-checkpoint + - sync-full-mainnet + - lwd-sync-full + - sync-past-mandatory-checkpoint + - sync-update-mainnet + - generate-checkpoints-mainnet + - sync-full-testnet + - generate-checkpoints-testnet + - lwd-sync-update + - lwd-rpc-test + - lwd-rpc-send-tx + - lwd-grpc-wallet + - rpc-get-block-template + - rpc-submit-block + timeout-minutes: 30 + steps: + - name: Decide whether the needed jobs succeeded or failed + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe #v1.2.2 + with: + jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/chore-delete-gcp-resources.yml b/.github/workflows/zfnd-delete-gcp-resources.yml similarity index 79% rename from .github/workflows/chore-delete-gcp-resources.yml rename to .github/workflows/zfnd-delete-gcp-resources.yml index 00e254a9e53..2cf826ca133 100644 --- a/.github/workflows/chore-delete-gcp-resources.yml +++ b/.github/workflows/zfnd-delete-gcp-resources.yml @@ -14,7 +14,7 @@ on: # Run daily, when most devs aren't working # 0700 UTC is after AEST working hours but before ET working hours schedule: - - cron: "0 7 * * *" + - cron: 0 7 * * * workflow_dispatch: env: @@ -31,33 +31,36 @@ env: # We keep this long enough for PRs that are still on the same commit can re-run with the same image. DELETE_IMAGE_HOURS: 504h # 21 days +permissions: + contents: read + jobs: delete-resources: name: Delete old GCP resources if: github.repository_owner == 'ZcashFoundation' runs-on: ubuntu-latest permissions: - contents: 'read' - id-token: 'write' + contents: read + id-token: write strategy: matrix: environment: [dev, prod] environment: ${{ matrix.environment }} steps: - - uses: actions/checkout@v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v4.2.2 with: persist-credentials: false # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.10 + uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 #v3.0.0 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.4 + uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db #v3.0.1 # Deletes all mainnet and testnet instances older than $DELETE_INSTANCE_DAYS days. # @@ -109,36 +112,35 @@ jobs: if: github.repository_owner == 'ZcashFoundation' runs-on: ubuntu-latest permissions: - contents: 'read' - id-token: 'write' + contents: read + id-token: write strategy: matrix: environment: [dev, prod] environment: ${{ matrix.environment }} steps: - - uses: actions/checkout@v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v4.2.2 with: persist-credentials: false # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.10 + uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 #v3.0.0 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - token_format: 'access_token' + token_format: access_token - name: Login to Google Artifact Registry - uses: docker/login-action@v3.4.0 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 #v3.5.0 with: registry: us-docker.pkg.dev username: oauth2accesstoken password: ${{ steps.auth.outputs.access_token }} # Deletes all images older than $DELETE_IMAGE_HOURS days. - - uses: 'docker://us-docker.pkg.dev/gcr-cleaner/gcr-cleaner/gcr-cleaner-cli' - continue-on-error: true # TODO: remove after fixing https://github.com/ZcashFoundation/zebra/issues/5933 + - uses: docker://us-docker.pkg.dev/gcr-cleaner/gcr-cleaner/gcr-cleaner-cli@sha256:333a4d0617b19a86b55f9aaae10e49294b4469590166f3b4a1c4f7bbd20eb6c2 # Refer to the official documentation to understand available arguments: # https://github.com/GoogleCloudPlatform/gcr-cleaner with: @@ -146,3 +148,22 @@ jobs: -repo=us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra/zebrad-test -grace=${{ env.DELETE_IMAGE_HOURS }} -keep=${{ env.KEEP_LATEST_IMAGE_COUNT }} + + delete-resources-success: + name: Delete GCP resources success + runs-on: ubuntu-latest + # Only run when the owner-specific cleanup jobs were eligible to execute + if: >- + ${{ + always() && + github.repository_owner == 'ZcashFoundation' + }} + needs: + - delete-resources + - clean-registries + timeout-minutes: 1 + steps: + - name: Decide whether the needed jobs succeeded or failed + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe #v1.2.2 + with: + jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/zfnd-deploy-integration-tests-gcp.yml similarity index 85% rename from .github/workflows/sub-deploy-integration-tests-gcp.yml rename to .github/workflows/zfnd-deploy-integration-tests-gcp.yml index eba79de40d6..16707a9a8ef 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/zfnd-deploy-integration-tests-gcp.yml @@ -7,77 +7,80 @@ on: test_id: required: true type: string - description: 'Unique identifier for the test' + description: Unique identifier for the test test_description: required: true type: string - description: 'Explains what the test does' + description: Explains what the test does height_grep_text: required: false type: string - description: 'Regular expression to find the tip height in test logs, and add it to newly created cached state image metadata' + description: Regular expression to find the tip height in test logs, and add it to newly created cached state image metadata # Test selection and parameters test_variables: required: true type: string - description: 'Environmental variables used to select and configure the test' + description: Environmental variables used to select and configure the test network: required: false type: string default: Mainnet - description: 'Zcash network to test against' + description: Zcash network to test against is_long_test: required: false type: boolean default: false - description: 'Does this test need multiple run jobs? (Does it run longer than 6 hours?)' + description: Does this test need multiple run jobs? (Does it run longer than 6 hours?) # Cached state # zebra_state_dir: required: false type: string - default: '/home/zebra/.cache/zebra' - description: 'Zebra cached state directory and input image prefix to search in GCP' + default: /home/zebra/.cache/zebra + description: Zebra cached state directory and input image prefix to search in GCP lwd_state_dir: required: false type: string - default: '/home/zebra/.cache/lwd' - description: 'Lightwalletd cached state directory and input image prefix to search in GCP' + default: /home/zebra/.cache/lwd + description: Lightwalletd cached state directory and input image prefix to search in GCP disk_prefix: required: false type: string - default: 'zebrad-cache' - description: 'Image name prefix, and `zebra_state_dir` name for newly created cached states' + default: zebrad-cache + description: Image name prefix, and `zebra_state_dir` name for newly created cached states disk_suffix: required: false type: string - default: 'tip' - description: 'Image name suffix' + default: tip + description: Image name suffix needs_zebra_state: required: true type: boolean - description: 'Does the test use Zebra cached state?' + description: Does the test use Zebra cached state? needs_lwd_state: required: false type: boolean - description: 'Does the test use Lightwalletd and Zebra cached state?' + description: Does the test use Lightwalletd and Zebra cached state? # main branch states can be outdated and slower, but they can also be more reliable saves_to_disk: required: true type: boolean - description: 'Can this test create new or updated cached state disks?' + description: Can this test create new or updated cached state disks? force_save_to_disk: required: false type: boolean default: false - description: 'Force this test to create a new or updated cached state disk' + description: Force this test to create a new or updated cached state disk app_name: required: false type: string - default: 'zebra' - description: 'Application name, used to work out when a job is an update job' + default: zebra + description: Application name, used to work out when a job is an update job + secrets: + GCP_SSH_PRIVATE_KEY: + required: true env: RUST_LOG: ${{ vars.RUST_LOG }} @@ -95,6 +98,10 @@ env: # How many blocks to wait before creating an updated cached state image. # 1 day is approximately 1152 blocks. CACHED_STATE_UPDATE_LIMIT: 576 + +permissions: + contents: read + jobs: # Find a cached state disk for ${{ inputs.test_id }}, matching all of: # - disk cached state prefix -> zebrad-cache or lwd-cache @@ -111,7 +118,10 @@ jobs: # get-disk-name: name: Get disk name - uses: ./.github/workflows/sub-find-cached-disks.yml + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-find-cached-disks.yml if: ${{ (inputs.needs_zebra_state || inputs.needs_lwd_state) || (inputs.saves_to_disk || inputs.force_save_to_disk) }} with: network: ${{ inputs.network || vars.ZCASH_NETWORK }} @@ -136,17 +146,16 @@ jobs: env: CACHED_DISK_NAME: ${{ (inputs.needs_zebra_state || inputs.needs_lwd_state) && needs.get-disk-name.outputs.cached_disk_name || '' }} permissions: - contents: 'read' - id-token: 'write' + contents: read + id-token: write steps: - - uses: actions/checkout@v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v4.2.2 with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v5 + uses: rlespinasse/github-slug-action@c33ff65466c58d57e4d796f88bb1ae0ff26ee453 #v5.2.0 with: short-length: 7 @@ -157,7 +166,7 @@ jobs: # Install our SSH secret - name: Install private SSH key - uses: shimataro/ssh-key-action@v2.7.0 + uses: shimataro/ssh-key-action@d4fffb50872869abe2d9a9098a6d9c5aa7d16be4 #v2.7.0 with: key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} name: google_compute_engine @@ -171,13 +180,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.10 + uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 #v3.0.0 with: - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + workload_identity_provider: "${{ vars.GCP_WIF }}" + service_account: "${{ vars.GCP_DEPLOYMENTS_SA }}" - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.4 + uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db #v3.0.1 # Disk Mounting Logic Explanation: # @@ -220,47 +229,11 @@ jobs: DISK_ATTACH_PARAMS="--create-disk=${DISK_PARAMS}" fi - # Mount the disk(s) to the container - # This partition=1 logic differentiates between disk types: - # - Only Zebra tip disks (from full sync) have partitions and need partition=1 - # - LWD disks never have partitions - # - Checkpoint disks don't have partitions - # TODO: Consider removing this logic once all cached disk images use consistent partitioning. - - # Determine if we should use partition=1 based on specific test requirements - # Default to safe approach: no partitions unless explicitly whitelisted - USE_PARTITION="false" - if [ -n "${{ env.CACHED_DISK_NAME }}" ]; then - # All tests that use Zebra tip disks (which have partitions) - if [[ "${{ inputs.test_id }}" == "sync-update-mainnet" ]] || \ - [[ "${{ inputs.test_id }}" == "sync-full-mainnet" ]] || \ - [[ "${{ inputs.test_id }}" == "generate-checkpoints-mainnet" ]] || \ - [[ "${{ inputs.test_id }}" == "lwd-rpc-test" ]] || \ - [[ "${{ inputs.test_id }}" == "rpc-get-block-template" ]] || \ - [[ "${{ inputs.test_id }}" == "rpc-submit-block" ]]; then - USE_PARTITION="true" - echo "Using Zebra tip disk with partition=1: ${{ env.CACHED_DISK_NAME }}" - # All other tests default to no partition for safety - else - USE_PARTITION="false" - echo "Using cached disk without partition (safe default): ${{ env.CACHED_DISK_NAME }}" - fi - fi - - # Mount zebra state directory - if [[ "$USE_PARTITION" == "true" ]]; then - CONTAINER_MOUNT_DISKS="--container-mount-disk=mount-path=${{ inputs.zebra_state_dir }},name=${NAME},mode=rw,partition=1" - else - CONTAINER_MOUNT_DISKS="--container-mount-disk=mount-path=${{ inputs.zebra_state_dir }},name=${NAME},mode=rw" - fi + CONTAINER_MOUNT_DISKS="--container-mount-disk=mount-path=${{ inputs.zebra_state_dir }},name=${NAME},mode=rw" # Mount the same disk to the lwd path if needed if [[ "${{ inputs.needs_lwd_state }}" == "true" || "${{ inputs.test_id }}" == "lwd-sync-full" ]]; then - if [[ "$USE_PARTITION" == "true" ]]; then - CONTAINER_MOUNT_DISKS+=" --container-mount-disk=mount-path=${{ inputs.lwd_state_dir }},name=${NAME},mode=rw,partition=1" - else - CONTAINER_MOUNT_DISKS+=" --container-mount-disk=mount-path=${{ inputs.lwd_state_dir }},name=${NAME},mode=rw" - fi + CONTAINER_MOUNT_DISKS+=" --container-mount-disk=mount-path=${{ inputs.lwd_state_dir }},name=${NAME},mode=rw" fi # Environment variables for the container @@ -387,17 +360,16 @@ jobs: STATE_VERSION: ${{ needs.test-result.outputs.state_version }} CACHED_DISK_NAME: ${{ needs.test-result.outputs.cached_disk_name }} permissions: - contents: 'read' - id-token: 'write' + contents: read + id-token: write steps: - - uses: actions/checkout@v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v4.2.2 with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v5 + uses: rlespinasse/github-slug-action@c33ff65466c58d57e4d796f88bb1ae0ff26ee453 #v5.2.0 with: short-length: 7 @@ -420,7 +392,7 @@ jobs: # Install our SSH secret - name: Install private SSH key - uses: shimataro/ssh-key-action@v2.7.0 + uses: shimataro/ssh-key-action@d4fffb50872869abe2d9a9098a6d9c5aa7d16be4 #v2.7.0 with: key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} name: google_compute_engine @@ -434,13 +406,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.10 + uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 #v3.0.0 with: - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + workload_identity_provider: "${{ vars.GCP_WIF }}" + service_account: "${{ vars.GCP_DEPLOYMENTS_SA }}" - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.4 + uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db #v3.0.1 # Sets the $UPDATE_SUFFIX env var to "-u" if updating a previous cached state, # and the empty string otherwise. @@ -718,30 +690,29 @@ jobs: if: always() continue-on-error: true permissions: - contents: 'read' - id-token: 'write' + contents: read + id-token: write steps: - - uses: actions/checkout@v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v4.2.2 with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.5.0 - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v5 + uses: rlespinasse/github-slug-action@c33ff65466c58d57e4d796f88bb1ae0ff26ee453 #v5.2.0 with: short-length: 7 # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.10 + uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 #v3.0.0 with: - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + workload_identity_provider: "${{ vars.GCP_WIF }}" + service_account: "${{ vars.GCP_DEPLOYMENTS_SA }}" - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.4 + uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db #v3.0.1 # Deletes the instances that has been recently deployed in the actual commit after all # previous jobs have run, no matter the outcome of the job. @@ -754,3 +725,21 @@ jobs: else gcloud compute instances delete "${INSTANCE}" --zone "${{ vars.GCP_ZONE }}" --delete-disks all --quiet fi + + deployment-success: + name: Integration test deployment success + runs-on: ubuntu-latest + # Always run this job to check the status of dependent jobs + if: always() + needs: + - get-disk-name + - test-result + - create-state-image + - delete-instance + timeout-minutes: 1 + steps: + - name: Decide whether the needed jobs succeeded or failed + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe #v1.2.2 + with: + jobs: ${{ toJSON(needs) }} + allowed-skips: ${{ (inputs.saves_to_disk || inputs.force_save_to_disk) && '' || 'create-state-image' }} diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/zfnd-deploy-nodes-gcp.yml similarity index 88% rename from .github/workflows/cd-deploy-nodes-gcp.yml rename to .github/workflows/zfnd-deploy-nodes-gcp.yml index 74cedf6f87c..8afba9c8c04 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/zfnd-deploy-nodes-gcp.yml @@ -25,8 +25,6 @@ concurrency: cancel-in-progress: ${{ github.event_name == 'pull_request' }} on: - merge_group: - types: [checks_requested] workflow_dispatch: inputs: @@ -40,7 +38,7 @@ on: - Testnet cached_disk_type: default: tip - description: "Type of cached disk to use" + description: Type of cached disk to use required: true type: choice options: @@ -48,17 +46,17 @@ on: - checkpoint need_cached_disk: default: true - description: "Use a cached state disk" + description: Use a cached state disk required: false type: boolean no_cache: - description: "Disable the Docker cache for this build" + description: Disable the Docker cache for this build required: false type: boolean default: false log_file: default: "" - description: "Log to a file path rather than standard output" + description: Log to a file path rather than standard output push: # Skip main branch updates where Rust code and dependencies aren't modified. @@ -73,13 +71,13 @@ on: - "**/Cargo.toml" - "**/Cargo.lock" # configuration files - - ".cargo/config.toml" + - .cargo/config.toml - "**/clippy.toml" # workflow definitions - - "docker/**" - - ".dockerignore" - - ".github/workflows/cd-deploy-nodes-gcp.yml" - - ".github/workflows/sub-build-docker-image.yml" + - docker/** + - .dockerignore + - .github/workflows/zfnd-deploy-nodes-gcp.yml + - .github/workflows/zfnd-build-docker-image.yml # Only runs the Docker image tests, doesn't deploy any instances pull_request: @@ -93,22 +91,21 @@ on: - "**/Cargo.toml" - "**/Cargo.lock" # configuration files - - ".cargo/config.toml" + - .cargo/config.toml - "**/clippy.toml" # workflow definitions - - "docker/**" - - ".dockerignore" - - ".github/workflows/cd-deploy-nodes-gcp.yml" - - ".github/workflows/sub-build-docker-image.yml" + - docker/** + - .dockerignore + - .github/workflows/zfnd-deploy-nodes-gcp.yml + - .github/workflows/zfnd-build-docker-image.yml release: types: - published -# IMPORTANT -# -# The job names in `cd-deploy-nodes-gcp.yml`, `cd-deploy-nodes-gcp.patch.yml` and -# `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync. +permissions: + contents: read + jobs: # If a release was made we want to extract the first part of the semver from the # tag_name @@ -128,7 +125,7 @@ jobs: steps: - name: Getting Zebrad Version id: get - uses: actions/github-script@v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd #v8.0.0 with: result-encoding: string script: | @@ -146,7 +143,10 @@ jobs: # PRs from forked repositories are skipped. get-disk-name: name: Get disk name - uses: ./.github/workflows/sub-find-cached-disks.yml + permissions: + contents: read + id-token: write + uses: ./.github/workflows/zfnd-find-cached-disks.yml if: ${{ !(github.event.pull_request.head.repo.fork) && (github.event_name != 'workflow_dispatch' || inputs.need_cached_disk) }} with: network: ${{ inputs.network || vars.ZCASH_NETWORK }} @@ -159,7 +159,11 @@ jobs: # The image will be commonly named `zebrad:` build: name: Build CD Docker - uses: ./.github/workflows/sub-build-docker-image.yml + permissions: + contents: read + id-token: write + pull-requests: write + uses: ./.github/workflows/zfnd-build-docker-image.yml if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} with: dockerfile_path: ./docker/Dockerfile @@ -169,15 +173,9 @@ jobs: rust_log: info features: ${{ format('{0} {1}', vars.RUST_PROD_FEATURES, vars.RUST_TEST_FEATURES) }} # This step needs access to Docker Hub secrets to run successfully - secrets: inherit - - # Run a matrix of configuration tests against the Docker image - test-docker-configurations: - name: Test Zebra Docker configurations - needs: build - uses: ./.github/workflows/sub-test-zebra-config.yml - with: - docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + secrets: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} # Deploy Managed Instance Groups (MiGs) for Mainnet and Testnet, # with one node in the configured GCP region. @@ -221,7 +219,6 @@ jobs: set-matrix, build, versioning, - test-docker-configurations, get-disk-name, ] runs-on: ubuntu-latest @@ -230,17 +227,17 @@ jobs: CACHED_DISK_NAME: ${{ needs.get-disk-name.outputs.cached_disk_name }} environment: ${{ github.event_name == 'release' && 'prod' || 'dev' }} permissions: - contents: "read" - id-token: "write" + contents: read + id-token: write if: ${{ !cancelled() && !failure() && needs.build.result == 'success' && github.repository_owner == 'ZcashFoundation' && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release' || github.event_name == 'workflow_dispatch') }} steps: - - uses: actions/checkout@v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v4.2.2 with: persist-credentials: false - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v5 + uses: rlespinasse/github-slug-action@c33ff65466c58d57e4d796f88bb1ae0ff26ee453 #v5.2.0 with: short-length: 7 @@ -258,13 +255,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.10 + uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 #v3.0.0 with: workload_identity_provider: "${{ vars.GCP_WIF }}" service_account: "${{ vars.GCP_DEPLOYMENTS_SA }}" - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.4 + uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db #v3.0.1 # Retrieves a static IP address for long-running nodes. # This step runs only when triggered by a release or a manual workflow_dispatch event. @@ -375,6 +372,29 @@ jobs: --version template="zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \ --region "${{ vars.GCP_REGION }}" + deploy-nodes-success: + name: Deploy nodes success + runs-on: ubuntu-latest + # Only run when the deployment job actually executed + if: >- + ${{ + always() && + needs.deploy-nodes.result != 'skipped' + }} + needs: + - versioning + - get-disk-name + - build + - set-matrix + - deploy-nodes + timeout-minutes: 1 + steps: + - name: Decide whether the needed jobs succeeded or failed + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe #v1.2.2 + with: + jobs: ${{ toJSON(needs) }} + allowed-skips: versioning + failure-issue: name: Open or update issues for release failures # When a new job is added to this workflow, add it to this list. @@ -385,7 +405,7 @@ jobs: if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) runs-on: ubuntu-latest steps: - - uses: jayqi/failed-build-issue-action@v1 + - uses: jayqi/failed-build-issue-action@1a893bbf43ef1c2a8705e2b115cd4f0fe3c5649b #v1.2.0 with: title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}" # New failures open an issue with this label. diff --git a/.github/workflows/sub-find-cached-disks.yml b/.github/workflows/zfnd-find-cached-disks.yml similarity index 81% rename from .github/workflows/sub-find-cached-disks.yml rename to .github/workflows/zfnd-find-cached-disks.yml index 7f260ebdb86..321c6c6ef38 100644 --- a/.github/workflows/sub-find-cached-disks.yml +++ b/.github/workflows/zfnd-find-cached-disks.yml @@ -11,7 +11,7 @@ on: workflow_call: inputs: network: - description: 'The Zcash network used to look up the disks' + description: The Zcash network used to look up the disks required: true type: string disk_prefix: @@ -21,26 +21,29 @@ on: required: false type: string test_id: - description: 'The test ID requiring the cached state disks' + description: The test ID requiring the cached state disks required: false type: string outputs: state_version: - description: 'The version of the cached state disks' + description: The version of the cached state disks value: ${{ jobs.get-cached-disks.outputs.state_version }} cached_disk_name: - description: 'The name of the cached state disk' + description: The name of the cached state disk value: ${{ jobs.get-cached-disks.outputs.cached_disk_name }} lwd_tip_disk: - description: 'true if there is a lightwalletd and Zebra cached state disk, synced near the chain tip' + description: true if there is a lightwalletd and Zebra cached state disk, synced near the chain tip value: ${{ jobs.get-cached-disks.outputs.lwd_tip_disk }} zebra_tip_disk: - description: 'true if there is a Zebra cached state disk synced near the chain tip' + description: true if there is a Zebra cached state disk synced near the chain tip value: ${{ jobs.get-cached-disks.outputs.zebra_tip_disk }} zebra_checkpoint_disk: - description: 'true if there is a Zebra cached state disk synced to the mandatory Zebra checkpoint' + description: true if there is a Zebra cached state disk synced to the mandatory Zebra checkpoint value: ${{ jobs.get-cached-disks.outputs.zebra_checkpoint_disk }} +permissions: + contents: read + jobs: get-cached-disks: name: Get ${{ inputs.test_id || inputs.network }} cached disk @@ -53,28 +56,28 @@ jobs: zebra_tip_disk: ${{ steps.get-available-disks.outputs.zebra_tip_disk || steps.set-release-defaults.outputs.zebra_tip_disk }} zebra_checkpoint_disk: ${{ steps.get-available-disks.outputs.zebra_checkpoint_disk || steps.set-release-defaults.outputs.zebra_checkpoint_disk }} permissions: - contents: 'read' - id-token: 'write' + contents: read + id-token: write steps: - - uses: actions/checkout@v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v4.2.2 with: persist-credentials: false fetch-depth: 0 - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v5 + uses: rlespinasse/github-slug-action@c33ff65466c58d57e4d796f88bb1ae0ff26ee453 #v5.2.0 with: short-length: 7 # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v2.1.10 + uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 #v3.0.0 with: - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + workload_identity_provider: "${{ vars.GCP_WIF }}" + service_account: "${{ vars.GCP_DEPLOYMENTS_SA }}" - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v2.1.4 + uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db #v3.0.1 # Performs formatting on disk name components. # @@ -83,7 +86,7 @@ jobs: # # Disk image names in GCP are limited to 63 characters, so we need to limit # branch names to 12 characters. - # Check the `create-state-image` in `sub-deploy-integration-tests-gcp.yml` for more details in image names. + # Check the `create-state-image` in `zfnd-deploy-integration-tests-gcp.yml` for more details in image names. # More info: https://cloud.google.com/compute/docs/naming-resources#resource-name-format # # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable. diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 2846f9898e3..cd40a6f656b 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -2,30 +2,25 @@ name: GitHub Actions Security Analysis with zizmor 🌈 on: push: - branches: ["main"] + branches: [main] pull_request: - branches: ["*"] + branches: ["**"] + +permissions: {} jobs: zizmor: - name: zizmor latest via Cargo runs-on: ubuntu-latest permissions: - contents: read security-events: write + contents: read + actions: read steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false - name: Install the latest version of uv uses: astral-sh/setup-uv@4db96194c378173c656ce18a155ffc14a9fc4355 # v5.2.2 - name: Run zizmor 🌈 - run: uvx zizmor --format sarif . > results.sarif - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 - with: - sarif_file: results.sarif - category: zizmor + uses: zizmorcore/zizmor-action@e673c3917a1aef3c65c972347ed84ccd013ecda4 # v0.2.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 781286f213c..ac2b72a346b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,6 +23,9 @@ PRs are welcome for small and large changes, but please don't make large PRs without coordinating with us via the [issue tracker](https://github.com/ZcashFoundation/zebra/issues) or [Discord](https://discord.gg/yVNhQwQE68). This helps increase development coordination and makes PRs easier to merge. Low-effort PRs, including but not limited to fixing typos and grammatical corrections, will generally be redone by us to dissuade metric farming. +Issues in this repository may not need to be addressed here, Zebra is meant to exclude any new features that are not strictly needed by the validator node. It may be desirable to implement features that support wallets, +block explorers, and other clients, particularly features that require database format changes, in [Zaino](https://github.com/zingolabs/zaino), [Zallet](https://github.com/zcash/wallet), or [librustzcash](https://github.com/zcash/librustzcash/). + Check out the [help wanted][hw] or [good first issue][gfi] labels if you're looking for a place to get started! diff --git a/Cargo.lock b/Cargo.lock index 0f8069de03d..f579442d3e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -129,12 +129,6 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.5" @@ -989,15 +983,14 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "num-traits 0.2.19", "serde", - "windows-link 0.1.3", + "windows-link 0.2.0", ] [[package]] @@ -1936,6 +1929,12 @@ dependencies = [ "num-traits 0.1.43", ] +[[package]] +name = "env_home" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe" + [[package]] name = "env_logger" version = "0.7.1" @@ -1976,12 +1975,12 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -3579,17 +3578,6 @@ dependencies = [ "serde", ] -[[package]] -name = "jsonrpc" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3662a38d341d77efecb73caf01420cfa5aa63c0253fd7bc05289ef9f6616e1bf" -dependencies = [ - "base64 0.13.1", - "serde", - "serde_json", -] - [[package]] name = "jsonrpsee" version = "0.24.9" @@ -4235,9 +4223,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.9.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" @@ -6225,15 +6213,15 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.8" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ "bitflags 2.9.4", "errno", "libc", "linux-raw-sys", - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -7559,7 +7547,6 @@ dependencies = [ "pin-project", "rand 0.8.5", "rayon", - "tinyvec", "tokio", "tokio-test", "tokio-util", @@ -7852,9 +7839,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" [[package]] name = "unicode-segmentation" @@ -8261,6 +8248,17 @@ dependencies = [ "rustls-pki-types", ] +[[package]] +name = "which" +version = "8.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3fabb953106c3c8eea8306e4393700d7657561cb43122571b172bbfb7c7ba1d" +dependencies = [ + "env_home", + "rustix", + "winsafe", +] + [[package]] name = "widestring" version = "1.2.0" @@ -8694,6 +8692,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "winsafe" +version = "0.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" + [[package]] name = "wit-bindgen" version = "0.45.1" @@ -8916,9 +8920,9 @@ dependencies = [ [[package]] name = "zcash_primitives" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a0c26140f2e6b760dcf052d22bd01f1a8773cdefb650ff5829430798a21b85b" +checksum = "76362b79e432bde2f22b3defcb6919d4fb50446985997169da3cc3ae4035a6d9" dependencies = [ "bip32", "blake2b_simd", @@ -9095,7 +9099,6 @@ dependencies = [ "static_assertions", "tempfile", "thiserror 2.0.16", - "tinyvec", "tokio", "tracing", "uint 0.10.0", @@ -9140,7 +9143,6 @@ dependencies = [ "serde", "spandoc", "thiserror 2.0.16", - "tinyvec", "tokio", "tower 0.4.13", "tower-batch-control", @@ -9293,6 +9295,7 @@ dependencies = [ "proptest", "prost 0.14.1", "rand 0.8.5", + "sapling-crypto", "semver", "serde", "serde_json", @@ -9306,6 +9309,7 @@ dependencies = [ "tonic-reflection", "tower 0.4.13", "tracing", + "which", "zcash_address", "zcash_keys", "zcash_primitives", @@ -9342,6 +9346,7 @@ dependencies = [ "chrono", "color-eyre", "crossbeam-channel", + "derive-getters", "dirs", "elasticsearch", "futures", @@ -9366,13 +9371,13 @@ dependencies = [ "regex", "rlimit", "rocksdb", + "sapling-crypto", "semver", "serde", "serde_json", "spandoc", "tempfile", "thiserror 2.0.16", - "tinyvec", "tokio", "tower 0.4.13", "tracing", @@ -9400,7 +9405,6 @@ dependencies = [ "spandoc", "tempfile", "thiserror 2.0.16", - "tinyvec", "tokio", "tower 0.4.13", "tracing", @@ -9416,7 +9420,6 @@ dependencies = [ "hex", "indexmap 2.11.0", "itertools 0.14.0", - "jsonrpc", "quote", "rand 0.8.5", "regex", @@ -9427,7 +9430,6 @@ dependencies = [ "structopt", "syn 2.0.106", "thiserror 2.0.16", - "tinyvec", "tokio", "tracing-error", "tracing-subscriber", @@ -9484,7 +9486,6 @@ dependencies = [ "tempfile", "thiserror 2.0.16", "thread-priority", - "tinyvec", "tokio", "tokio-stream", "toml 0.9.5", @@ -9516,18 +9517,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 73ba50af335..b28ba68fa02 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -165,6 +165,7 @@ x25519-dalek = "2.0.1" zcash_note_encryption = "0.4.1" zcash_script = "0.3.2" config = { version = "0.15.14", features = ["toml"] } +which = "8.0.0" [workspace.metadata.release] @@ -295,3 +296,11 @@ panic = "abort" # - add "-flto=thin" to all C/C++ code builds # - see https://doc.rust-lang.org/rustc/linker-plugin-lto.html#cc-code-as-a-dependency-in-rust lto = "thin" + + +[workspace.lints.rust] +# The linter should ignore these expected config flags/values +unexpected_cfgs = { level = "warn", check-cfg = [ + 'cfg(tokio_unstable)', # Used by tokio-console + 'cfg(zcash_unstable, values("zfuture", "nu6.1", "nu7"))' # Used in Zebra and librustzcash +] } diff --git a/book/src/dev/continuous-delivery.md b/book/src/dev/continuous-delivery.md index c977de01fc3..530d3453659 100644 --- a/book/src/dev/continuous-delivery.md +++ b/book/src/dev/continuous-delivery.md @@ -25,4 +25,4 @@ A single instance can also be deployed, on an on-demand basis, if required, when long-lived instance, with specific changes, is needed to be tested in the Mainnet with the same infrastructure used for CI & CD. -Further validations of the actual process can be done on our continuous delivery [workflow file](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/cd-deploy-nodes-gcp.yml). +Further validations of the actual process can be done on our continuous delivery [workflow file](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/zfnd-deploy-nodes-gcp.yml). diff --git a/book/src/dev/continuous-integration.md b/book/src/dev/continuous-integration.md index 7e2a452d03c..2382737bd40 100644 --- a/book/src/dev/continuous-integration.md +++ b/book/src/dev/continuous-integration.md @@ -165,7 +165,7 @@ This means that the entire workflow must be re-run when a single test fails. 1. Look for the earliest job that failed, and find the earliest failure. For example, this failure doesn't tell us what actually went wrong: -> Error: The template is not valid. ZcashFoundation/zebra/.github/workflows/sub-build-docker-image.yml@8bbc5b21c97fafc83b70fbe7f3b5e9d0ffa19593 (Line: 52, Col: 19): Error reading JToken from JsonReader. Path '', line 0, position 0. +> Error: The template is not valid. ZcashFoundation/zebra/.github/workflows/zfnd-build-docker-image.yml@8bbc5b21c97fafc83b70fbe7f3b5e9d0ffa19593 (Line: 52, Col: 19): Error reading JToken from JsonReader. Path '', line 0, position 0. https://github.com/ZcashFoundation/zebra/runs/8181760421?check_suite_focus=true#step:41:4 @@ -240,7 +240,7 @@ To fix duplicate dependencies, follow these steps until the duplicate dependenci If the Docker cached state disks are full, increase the disk sizes in: - [deploy-gcp-tests.yml](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/deploy-gcp-tests.yml) -- [cd-deploy-nodes-gcp.yml](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/cd-deploy-nodes-gcp.yml) +- [zfnd-deploy-nodes-gcp.yml](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/zfnd-deploy-nodes-gcp.yml) If the GitHub Actions disks are full, follow these steps until the errors are fixed: diff --git a/book/src/dev/zebra-checkpoints.md b/book/src/dev/zebra-checkpoints.md index 84363be4f07..bf22eebd536 100644 --- a/book/src/dev/zebra-checkpoints.md +++ b/book/src/dev/zebra-checkpoints.md @@ -5,4 +5,4 @@ Developers should run this tool every few months to add new checkpoints to Zebra. (By default, Zebra uses these checkpoints to sync to the chain tip.) -For more information on how to run this program visit [Zebra checkpoints README](https://github.com/ZcashFoundation/zebra/tree/main/zebra-consensus/src/checkpoint/README.md) +For more information on how to run this program visit [Zebra checkpoints README](https://github.com/ZcashFoundation/zebra/tree/main/zebra-chain/src/parameters/checkpoint/README.md) diff --git a/deny.toml b/deny.toml index 156c0037b51..4710f3eb263 100644 --- a/deny.toml +++ b/deny.toml @@ -46,12 +46,13 @@ skip = [ # ron 0.7.x is required by insta; 0.8.x is required by config { name = "ron", version = "=0.7.1" }, - # Older base64 pulled in via ron/config; newer via networking stack - { name = "base64", version = "=0.21.7" }, - # Keep older toml_datetime required via jsonrpsee-proc-macros -> toml_edit 0.22.x # We can't upgrade jsonrpsee yet to 0.25.x (as it introduces breaking changes) { name = "toml_datetime", version = "=0.6.11" }, + + # Multiple windows-sys versions via different dependencies + # 0.60.2 via clap/anstream; 0.61.0 via dirs-sys + { name = "windows-sys", version = "=0.60.2" } ] @@ -72,6 +73,8 @@ skip-tree = [ # wait for console-subscriber and tower to update hdrhistogram. # also wait for ron to update insta, and wait for tonic update. { name = "base64", version = "=0.13.1" }, + # Multiple base64 versions through different dependencies + { name = "base64", version = "=0.21.7" }, # wait for abscissa_core to update toml { name = "toml", version = "=0.5.11" }, @@ -88,7 +91,6 @@ skip-tree = [ # wait for zebra to update tower { name = "tower", version = "=0.4.13" }, - { name = "hashbrown", version = "=0.14.5" }, # wait for zebra to update vergen { name = "thiserror", version = "=1.0.69" }, diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index cc57b2f658c..620fa26fe51 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -8,6 +8,11 @@ set -eo pipefail +# Default cache directories for Zebra components. +# These use the config-rs ZEBRA_SECTION__KEY format and will be picked up +# by zebrad's configuration system automatically. +: "${ZEBRA_STATE__CACHE_DIR:=${HOME}/.cache/zebra}" +: "${ZEBRA_RPC__COOKIE_DIR:=${HOME}/.cache/zebra}" # Use gosu to drop privileges and execute the given command as the specified UID:GID exec_as_user() { @@ -45,6 +50,11 @@ create_owned_directory() { fi } +# Create and own cache and config directories based on ZEBRA_* environment variables +[[ -n ${ZEBRA_STATE__CACHE_DIR} ]] && create_owned_directory "${ZEBRA_STATE__CACHE_DIR}" +[[ -n ${ZEBRA_RPC__COOKIE_DIR} ]] && create_owned_directory "${ZEBRA_RPC__COOKIE_DIR}" +[[ -n ${ZEBRA_TRACING__LOG_FILE} ]] && create_owned_directory "$(dirname "${ZEBRA_TRACING__LOG_FILE}")" + # --- Optional config file support --- # If provided, pass a config file path through to zebrad via CONFIG_FILE_PATH. diff --git a/docs/decisions/devops/004-improve-docker-conf-tests.md b/docs/decisions/devops/004-improve-docker-conf-tests.md index f1332c15ef5..f21b609d7bb 100644 --- a/docs/decisions/devops/004-improve-docker-conf-tests.md +++ b/docs/decisions/devops/004-improve-docker-conf-tests.md @@ -10,7 +10,7 @@ story: Need a scalable and maintainable way to test various Docker image configu ## Context and Problem Statement -Currently, tests verifying Zebra's Docker image configuration (based on environment variables processed by `docker/entrypoint.sh`) are implemented using a reusable workflow (`sub-test-zebra-config.yml`). However, the _invocation_ of these tests, including the specific scenarios (environment variables, grep patterns), is duplicated and scattered across different workflows, notably the CI workflow (`sub-ci-unit-tests-docker.yml`) and the CD workflow (`cd-deploy-nodes-gcp.yml`). +Currently, tests verifying Zebra's Docker image configuration (based on environment variables processed by `docker/entrypoint.sh`) are implemented using a reusable workflow (`sub-test-zebra-config.yml`). However, the _invocation_ of these tests, including the specific scenarios (environment variables, grep patterns), is duplicated and scattered across different workflows, notably the CI workflow (`sub-ci-unit-tests-docker.yml`) and the CD workflow (`zfnd-deploy-nodes-gcp.yml`). This leads to: @@ -32,7 +32,7 @@ We need a centralized, scalable, and maintainable approach to define and run the ## Considered Options -1. **Status Quo:** Continue defining and invoking configuration tests within the respective CI (`sub-ci-unit-tests-docker.yml`) and CD (`cd-deploy-nodes-gcp.yml`) workflows, using `sub-test-zebra-config.yml` for the core run/grep logic. +1. **Status Quo:** Continue defining and invoking configuration tests within the respective CI (`sub-ci-unit-tests-docker.yml`) and CD (`zfnd-deploy-nodes-gcp.yml`) workflows, using `sub-test-zebra-config.yml` for the core run/grep logic. 2. **Modify and Extend `sub-test-zebra-config.yml`:** Convert the existing `sub-test-zebra-config.yml` workflow. Remove its specific test inputs (`test_id`, `grep_patterns`, `test_variables`). Add multiple jobs _inside_ this workflow, each hardcoding a specific test scenario (run container + grep logs). The workflow would only take `docker_image` as input. 3. **Use `docker-compose.test.yml`:** Define test scenarios as services within a dedicated `docker-compose.test.yml` file. The CI/CD workflows would call a script (like `sub-test-zebra-config.yml`) that uses `docker compose` to run specific services and performs log grepping. 4. **Create a _New_ Dedicated Reusable Workflow:** Create a _new_ reusable workflow (e.g., `sub-test-all-configs.yml`) that takes a Docker image digest as input and contains multiple jobs, each defining and executing a specific configuration test scenario (run container + grep logs). @@ -75,7 +75,7 @@ The `sub-test-zebra-config.yml` workflow will be modified to remove its specific - Easier addition of new configuration test scenarios by adding jobs to `sub-test-zebra-config.yml`. - Clearer separation between image building and configuration testing logic. - `sub-test-zebra-config.yml` will fundamentally change its structure and inputs. -- CI/CD workflows (`cd-deploy-nodes-gcp.yml`, parent of `sub-ci-unit-tests-docker.yml`) will need modification to remove old test jobs and add calls to the modified reusable workflow, passing the correct image digest. +- CI/CD workflows (`zfnd-deploy-nodes-gcp.yml`, parent of `sub-ci-unit-tests-docker.yml`) will need modification to remove old test jobs and add calls to the modified reusable workflow, passing the correct image digest. - Debugging might involve tracing execution across workflow calls and within the multiple jobs of `sub-test-zebra-config.yml`. ## More Information @@ -83,7 +83,7 @@ The `sub-test-zebra-config.yml` workflow will be modified to remove its specific - GitHub Actions: Reusing Workflows: [https://docs.github.com/en/actions/using-workflows/reusing-workflows](https://docs.github.com/en/actions/using-workflows/reusing-workflows) - Relevant files: - `.github/workflows/sub-test-zebra-config.yml` (To be modified) - - `.github/workflows/cd-deploy-nodes-gcp.yml` (To be modified) + - `.github/workflows/zfnd-deploy-nodes-gcp.yml` (To be modified) - `.github/workflows/sub-ci-unit-tests-docker.yml` (To be modified) - `docker/entrypoint.sh` (Script processing configurations) - `docker/.env` (Example environment variables) diff --git a/dprint.json b/dprint.json new file mode 100644 index 00000000000..dc615dbf49b --- /dev/null +++ b/dprint.json @@ -0,0 +1,13 @@ +{ + "toml": { + "useTabs": false, + "cargo.applyConventions": false, + "indentWidth": 4 + }, + "includes": [ + "**/*.toml" + ], + "plugins": [ + "https://plugins.dprint.dev/toml-0.6.4.wasm" + ] +} diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 0f7856ce4fb..23658420dc7 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -34,10 +34,6 @@ tracing-futures = { workspace = true } [dev-dependencies] color-eyre = { workspace = true } -# This is a transitive dependency via color-eyre. -# Enable a feature that makes tinyvec compile much faster. -tinyvec = { workspace = true, features = ["rustc_1_55"] } - ed25519-zebra = { workspace = true } rand = { workspace = true } @@ -48,5 +44,5 @@ tower-test = { workspace = true } zebra-test = { path = "../zebra-test/", version = "1.0.1" } -[lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } +[lints] +workspace = true diff --git a/tower-batch-control/src/layer.rs b/tower-batch-control/src/layer.rs index 42454619a20..f91f3c1170e 100644 --- a/tower-batch-control/src/layer.rs +++ b/tower-batch-control/src/layer.rs @@ -67,7 +67,7 @@ where impl fmt::Debug for BatchLayer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("BufferLayer") + f.debug_struct("BatchLayer") .field("max_items_weight_in_batch", &self.max_items_weight_in_batch) .field("max_batches", &self.max_batches) .field("max_latency", &self.max_latency) diff --git a/tower-batch-control/src/service.rs b/tower-batch-control/src/service.rs index 7516b729964..46c39e94bfe 100644 --- a/tower-batch-control/src/service.rs +++ b/tower-batch-control/src/service.rs @@ -302,7 +302,7 @@ where } fn call(&mut self, request: Request) -> Self::Future { - tracing::trace!("sending request to buffer worker"); + tracing::trace!("sending request to batch worker"); let _permit = self .permit .take() diff --git a/tower-batch-control/src/worker.rs b/tower-batch-control/src/worker.rs index da76fcd02c5..c1fa26b2b61 100644 --- a/tower-batch-control/src/worker.rs +++ b/tower-batch-control/src/worker.rs @@ -292,7 +292,7 @@ where /// Register an inner service failure. /// /// The underlying service failed when we called `poll_ready` on it with the given `error`. We - /// need to communicate this to all the `Buffer` handles. To do so, we wrap up the error in + /// need to communicate this to all the `Batch` handles. To do so, we wrap up the error in /// an `Arc`, send that `Arc` to all pending requests, and store it so that subsequent /// requests will also fail with the same error. fn failed(&mut self, error: crate::BoxError) { diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 548560cd527..ea552bedc89 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -25,3 +25,6 @@ tracing = { workspace = true } tokio = { workspace = true, features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/", version = "1.0.1" } + +[lints] +workspace = true diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index cbf43f5a711..aa9ff2bc090 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -139,9 +139,6 @@ criterion = { workspace = true, features = ["html_reports"] } # Error Handling & Formatting color-eyre = { workspace = true } -# This is a transitive dependency via color-eyre. -# Enable a feature that makes tinyvec compile much faster. -tinyvec = { workspace = true, features = ["rustc_1_55"] } spandoc = { workspace = true } tracing = { workspace = true } @@ -164,3 +161,6 @@ required-features = ["bench"] [[bench]] name = "redpallas" harness = false + +[lints] +workspace = true diff --git a/zebra-chain/src/block.rs b/zebra-chain/src/block.rs index 388761ad735..b46804ff05d 100644 --- a/zebra-chain/src/block.rs +++ b/zebra-chain/src/block.rs @@ -170,8 +170,11 @@ impl Block { .flat_map(|transaction| transaction.sprout_note_commitments()) } - /// Access the [sapling note commitments](jubjub::Fq) from all transactions in this block. - pub fn sapling_note_commitments(&self) -> impl Iterator { + /// Access the [sapling note commitments](`sapling_crypto::note::ExtractedNoteCommitment`) + /// from all transactions in this block. + pub fn sapling_note_commitments( + &self, + ) -> impl Iterator { self.transactions .iter() .flat_map(|transaction| transaction.sapling_note_commitments()) diff --git a/zebra-chain/src/fmt/time.rs b/zebra-chain/src/fmt/time.rs index 7b86998b8af..22da59acd0f 100644 --- a/zebra-chain/src/fmt/time.rs +++ b/zebra-chain/src/fmt/time.rs @@ -5,14 +5,19 @@ use std::time::Duration; /// The minimum amount of time displayed with only seconds (no milliseconds). pub const MIN_SECONDS_ONLY_TIME: Duration = Duration::from_secs(5); +/// The minimum amount of time displayed with only seconds and milliseconds (no microseconds). +pub const MIN_MS_ONLY_TIME: Duration = Duration::from_millis(5); + /// Returns a human-friendly formatted string for the whole number of seconds in `duration`. pub fn duration_short(duration: impl Into) -> String { let duration = duration.into(); if duration >= MIN_SECONDS_ONLY_TIME { humantime_seconds(duration) - } else { + } else if duration >= MIN_MS_ONLY_TIME { humantime_milliseconds(duration) + } else { + humantime_microseconds(duration) } } @@ -42,3 +47,17 @@ pub fn humantime_milliseconds(duration: impl Into) -> String { format!("{duration}") } + +/// Returns a human-friendly formatted string for the whole number of microseconds in `duration`. +pub fn humantime_microseconds(duration: impl Into) -> String { + let duration = duration.into(); + + // Truncate fractional seconds. + let duration_secs = Duration::from_secs(duration.as_secs()); + let duration_millis = Duration::from_millis(duration.subsec_millis().into()); + let duration_micros = Duration::from_micros(duration.subsec_micros().into()); + + let duration = humantime::format_duration(duration_secs + duration_millis + duration_micros); + + format!("{duration}") +} diff --git a/zebra-chain/src/parallel/tree.rs b/zebra-chain/src/parallel/tree.rs index 4f35dd44617..94cbc7d9bc3 100644 --- a/zebra-chain/src/parallel/tree.rs +++ b/zebra-chain/src/parallel/tree.rs @@ -22,7 +22,7 @@ pub struct NoteCommitmentTrees { pub sapling: Arc, /// The sapling note commitment subtree. - pub sapling_subtree: Option>, + pub sapling_subtree: Option>, /// The orchard note commitment tree. pub orchard: Arc, @@ -156,7 +156,7 @@ impl NoteCommitmentTrees { ) -> Result< ( Arc, - Option<(NoteCommitmentSubtreeIndex, sapling::tree::Node)>, + Option<(NoteCommitmentSubtreeIndex, sapling_crypto::Node)>, ), NoteCommitmentTreeError, > { diff --git a/zebra-chain/src/parameters.rs b/zebra-chain/src/parameters.rs index ebfa401d7b7..a5f0022fd94 100644 --- a/zebra-chain/src/parameters.rs +++ b/zebra-chain/src/parameters.rs @@ -12,6 +12,7 @@ //! Typically, consensus parameters are accessed via a function that takes a //! `Network` and `block::Height`. +pub mod checkpoint; pub mod constants; mod genesis; mod network; diff --git a/zebra-chain/src/parameters/checkpoint.rs b/zebra-chain/src/parameters/checkpoint.rs new file mode 100644 index 00000000000..b2cb1bd5649 --- /dev/null +++ b/zebra-chain/src/parameters/checkpoint.rs @@ -0,0 +1,4 @@ +//! Types, logic, and constants related to Zebra's list of checkpointed block heights and hashes. + +pub mod constants; +pub mod list; diff --git a/zebra-consensus/src/checkpoint/README.md b/zebra-chain/src/parameters/checkpoint/README.md similarity index 82% rename from zebra-consensus/src/checkpoint/README.md rename to zebra-chain/src/parameters/checkpoint/README.md index 92767a1c16f..ac875e9665e 100644 --- a/zebra-consensus/src/checkpoint/README.md +++ b/zebra-chain/src/parameters/checkpoint/README.md @@ -2,8 +2,8 @@ Zebra validates [settled network upgrades](https://zips.z.cash/protocol/protocol.pdf#blockchain) using a list of `Mainnet` and `Testnet` block hash checkpoints: -- [Mainnet checkpoints](https://github.com/ZcashFoundation/zebra/blob/main/zebra-consensus/src/checkpoint/main-checkpoints.txt) -- [Testnet checkpoints](https://github.com/ZcashFoundation/zebra/blob/main/zebra-consensus/src/checkpoint/test-checkpoints.txt) +- [Mainnet checkpoints](https://github.com/ZcashFoundation/zebra/blob/main/zebra-chain/src/parameters/checkpoint/main-checkpoints.txt) +- [Testnet checkpoints](https://github.com/ZcashFoundation/zebra/blob/main/zebra-chain/src/parameters/checkpoint/test-checkpoints.txt) Using these checkpoints increases Zebra's security against some attacks. diff --git a/zebra-node-services/src/constants.rs b/zebra-chain/src/parameters/checkpoint/constants.rs similarity index 100% rename from zebra-node-services/src/constants.rs rename to zebra-chain/src/parameters/checkpoint/constants.rs diff --git a/zebra-consensus/src/checkpoint/list.rs b/zebra-chain/src/parameters/checkpoint/list.rs similarity index 89% rename from zebra-consensus/src/checkpoint/list.rs rename to zebra-chain/src/parameters/checkpoint/list.rs index 923f13004e7..9560e8b033e 100644 --- a/zebra-consensus/src/checkpoint/list.rs +++ b/zebra-chain/src/parameters/checkpoint/list.rs @@ -5,19 +5,21 @@ //! Checkpoints can be used to verify their ancestors, by chaining backwards //! to another checkpoint, via each block's parent block hash. -#[cfg(test)] -mod tests; - -use crate::BoxError; - use std::{ collections::{BTreeMap, HashSet}, ops::RangeBounds, str::FromStr, + sync::Arc, }; -use zebra_chain::block; -use zebra_chain::parameters::Network; +use crate::{ + block::{self, Height}, + parameters::{Network, NetworkUpgrade}, + BoxError, +}; + +#[cfg(test)] +mod tests; /// The hard-coded checkpoints for mainnet, generated using the /// `zebra-checkpoints` tool. @@ -41,18 +43,11 @@ const MAINNET_CHECKPOINTS: &str = include_str!("main-checkpoints.txt"); /// /// See [`MAINNET_CHECKPOINTS`] for detailed `zebra-checkpoints` usage /// information. -const TESTNET_CHECKPOINTS: &str = include_str!("test-checkpoints.txt"); +pub(crate) const TESTNET_CHECKPOINTS: &str = include_str!("test-checkpoints.txt"); -/// Network methods related to checkpoints -pub trait ParameterCheckpoint { +impl Network { /// Returns the hash for the genesis block in `network`. - fn genesis_hash(&self) -> zebra_chain::block::Hash; - /// Returns the hard-coded checkpoint list for `network`. - fn checkpoint_list(&self) -> CheckpointList; -} - -impl ParameterCheckpoint for Network { - fn genesis_hash(&self) -> zebra_chain::block::Hash { + pub fn genesis_hash(&self) -> block::Hash { match self { // zcash-cli getblockhash 0 Network::Mainnet => "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08" @@ -62,12 +57,11 @@ impl ParameterCheckpoint for Network { Network::Testnet(params) => params.genesis_hash(), } } - - fn checkpoint_list(&self) -> CheckpointList { - let (checkpoints_for_network, should_fallback_to_genesis_hash_as_checkpoint) = match self { - Network::Mainnet => (MAINNET_CHECKPOINTS, false), - Network::Testnet(params) if params.is_default_testnet() => (TESTNET_CHECKPOINTS, false), - Network::Testnet(_params) => ("", true), + /// Returns the hard-coded checkpoint list for `network`. + pub fn checkpoint_list(&self) -> Arc { + let checkpoints_for_network = match self { + Network::Mainnet => MAINNET_CHECKPOINTS, + Network::Testnet(params) => return params.checkpoints(), }; // Check that the list starts with the correct genesis block and parses checkpoint list. @@ -76,26 +70,30 @@ impl ParameterCheckpoint for Network { .next() .map(checkpoint_height_and_hash); - match first_checkpoint_height { + let checkpoints = match first_checkpoint_height { // parse calls CheckpointList::from_list Some(Ok((block::Height(0), hash))) if hash == self.genesis_hash() => { checkpoints_for_network .parse() .expect("hard-coded checkpoint list parses and validates") } - _ if should_fallback_to_genesis_hash_as_checkpoint => { - CheckpointList::from_list([(block::Height(0), self.genesis_hash())]) - .expect("hard-coded checkpoint list parses and validates") - } Some(Ok((block::Height(0), _))) => { panic!("the genesis checkpoint does not match the {self} genesis hash") } Some(Ok(_)) => panic!("checkpoints must start at the genesis block height 0"), Some(Err(err)) => panic!("{err}"), + + None if NetworkUpgrade::Canopy.activation_height(self) == Some(Height(1)) => { + CheckpointList::from_list([(block::Height(0), self.genesis_hash())]) + .expect("hard-coded checkpoint list parses and validates") + } None => panic!( - "there must be at least one checkpoint on default networks, for the genesis block" + "Zebra requires checkpoints on networks which do not activate \ + the Canopy network upgrade at block height 1" ), - } + }; + + Arc::new(checkpoints) } } @@ -149,7 +147,7 @@ impl CheckpointList { /// Checkpoint heights and checkpoint hashes must be unique. /// There must be a checkpoint for a genesis block at block::Height 0. /// (All other checkpoints are optional.) - pub(crate) fn from_list( + pub fn from_list( list: impl IntoIterator, ) -> Result { // BTreeMap silently ignores duplicates, so we count the checkpoints @@ -237,6 +235,11 @@ impl CheckpointList { self.0.iter() } + /// Returns an iterator over all the checkpoints, in increasing height order. + pub fn iter_cloned(&self) -> impl Iterator + '_ { + self.iter().map(|(&height, &hash)| (height, hash)) + } + /// Returns the checkpoint at `height`, as a zero-based index. /// If `height` is not a checkpoint height, returns the checkpoint immediately before that height. pub fn prev_checkpoint_index(&self, height: block::Height) -> usize { diff --git a/zebra-consensus/src/checkpoint/list/tests.rs b/zebra-chain/src/parameters/checkpoint/list/tests.rs similarity index 98% rename from zebra-consensus/src/checkpoint/list/tests.rs rename to zebra-chain/src/parameters/checkpoint/list/tests.rs index 1df05327d64..4e2a12cc3af 100644 --- a/zebra-consensus/src/checkpoint/list/tests.rs +++ b/zebra-chain/src/parameters/checkpoint/list/tests.rs @@ -4,12 +4,14 @@ use std::sync::Arc; use num_integer::div_ceil; -use zebra_chain::{ +use crate::{ block::{Block, HeightDiff, MAX_BLOCK_BYTES}, - parameters::Network::*, + parameters::{ + checkpoint::constants::{MAX_CHECKPOINT_BYTE_COUNT, MAX_CHECKPOINT_HEIGHT_GAP}, + Network::*, + }, serialization::ZcashDeserialize, }; -use zebra_node_services::constants::{MAX_CHECKPOINT_BYTE_COUNT, MAX_CHECKPOINT_HEIGHT_GAP}; use super::*; diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-chain/src/parameters/checkpoint/main-checkpoints.txt similarity index 100% rename from zebra-consensus/src/checkpoint/main-checkpoints.txt rename to zebra-chain/src/parameters/checkpoint/main-checkpoints.txt diff --git a/zebra-consensus/src/checkpoint/test-checkpoints.txt b/zebra-chain/src/parameters/checkpoint/test-checkpoints.txt similarity index 100% rename from zebra-consensus/src/checkpoint/test-checkpoints.txt rename to zebra-chain/src/parameters/checkpoint/test-checkpoints.txt diff --git a/zebra-chain/src/parameters/network.rs b/zebra-chain/src/parameters/network.rs index 59876d171d9..076afec8c03 100644 --- a/zebra-chain/src/parameters/network.rs +++ b/zebra-chain/src/parameters/network.rs @@ -146,6 +146,8 @@ impl std::fmt::Debug for Network { .debug_struct("Regtest") .field("activation_heights", params.activation_heights()) .field("funding_streams", params.funding_streams()) + .field("lockbox_disbursements", ¶ms.lockbox_disbursements()) + .field("checkpoints", ¶ms.checkpoints()) .finish(), Self::Testnet(params) if params.is_default_testnet() => { write!(f, "{self}") diff --git a/zebra-chain/src/parameters/network/testnet.rs b/zebra-chain/src/parameters/network/testnet.rs index 205c2c7e94c..c4a6d262a01 100644 --- a/zebra-chain/src/parameters/network/testnet.rs +++ b/zebra-chain/src/parameters/network/testnet.rs @@ -5,6 +5,7 @@ use crate::{ amount::{Amount, NonNegative}, block::{self, Height, HeightDiff}, parameters::{ + checkpoint::list::{CheckpointList, TESTNET_CHECKPOINTS}, constants::{magics, SLOW_START_INTERVAL, SLOW_START_SHIFT}, network_upgrade::TESTNET_ACTIVATION_HEIGHTS, subsidy::{ @@ -327,6 +328,10 @@ pub struct ConfiguredActivationHeights { /// Activation height for `NU7` network upgrade. #[serde(rename = "NU7")] pub nu7: Option, + /// Activation height for `ZFuture` network upgrade. + #[serde(rename = "ZFuture")] + #[cfg(zcash_unstable = "zfuture")] + pub zfuture: Option, } impl ConfiguredActivationHeights { @@ -344,6 +349,8 @@ impl ConfiguredActivationHeights { nu6, nu6_1, nu7, + #[cfg(zcash_unstable = "zfuture")] + zfuture, } = self; let overwinter = overwinter.or(before_overwinter).or(Some(1)); @@ -363,10 +370,43 @@ impl ConfiguredActivationHeights { nu6, nu6_1, nu7, + #[cfg(zcash_unstable = "zfuture")] + zfuture, } } } +/// Configurable checkpoints, either a path to a checkpoints file, a "default" keyword to indicate +/// that Zebra should use the default Testnet checkpoints, or a list of block heights and hashes. +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[serde(untagged)] +pub enum ConfiguredCheckpoints { + /// A boolean indicating whether Zebra should use the default Testnet checkpoints. + Default(bool), + /// A path to a checkpoints file to be used as Zebra's checkpoints. + Path(std::path::PathBuf), + /// Directly configured block heights and hashes to be used as Zebra's checkpoints. + HeightsAndHashes(Vec<(block::Height, block::Hash)>), +} + +impl Default for ConfiguredCheckpoints { + fn default() -> Self { + Self::Default(false) + } +} + +impl From> for ConfiguredCheckpoints { + fn from(value: Arc) -> Self { + Self::HeightsAndHashes(value.iter_cloned().collect()) + } +} + +impl From for ConfiguredCheckpoints { + fn from(value: bool) -> Self { + Self::Default(value) + } +} + /// Builder for the [`Parameters`] struct. #[derive(Clone, Debug, Eq, PartialEq)] pub struct ParametersBuilder { @@ -398,6 +438,8 @@ pub struct ParametersBuilder { post_blossom_halving_interval: HeightDiff, /// Expected one-time lockbox disbursement outputs in NU6.1 activation block coinbase for this network lockbox_disbursements: Vec<(String, Amount)>, + /// Checkpointed block hashes and heights for this network. + checkpoints: Arc, } impl Default for ParametersBuilder { @@ -433,6 +475,10 @@ impl Default for ParametersBuilder { .iter() .map(|(addr, amount)| (addr.to_string(), *amount)) .collect(), + checkpoints: TESTNET_CHECKPOINTS + .parse() + .map(Arc::new) + .expect("must be able to parse checkpoints"), } } } @@ -500,6 +546,8 @@ impl ParametersBuilder { nu6, nu6_1, nu7, + #[cfg(zcash_unstable = "zfuture")] + zfuture, }: ConfiguredActivationHeights, ) -> Self { use NetworkUpgrade::*; @@ -512,20 +560,28 @@ impl ParametersBuilder { // // These must be in order so that later network upgrades overwrite prior ones // if multiple network upgrades are configured with the same activation height. - let activation_heights: BTreeMap<_, _> = before_overwinter - .into_iter() - .map(|h| (h, BeforeOverwinter)) - .chain(overwinter.into_iter().map(|h| (h, Overwinter))) - .chain(sapling.into_iter().map(|h| (h, Sapling))) - .chain(blossom.into_iter().map(|h| (h, Blossom))) - .chain(heartwood.into_iter().map(|h| (h, Heartwood))) - .chain(canopy.into_iter().map(|h| (h, Canopy))) - .chain(nu5.into_iter().map(|h| (h, Nu5))) - .chain(nu6.into_iter().map(|h| (h, Nu6))) - .chain(nu6_1.into_iter().map(|h| (h, Nu6_1))) - .chain(nu7.into_iter().map(|h| (h, Nu7))) - .map(|(h, nu)| (h.try_into().expect("activation height must be valid"), nu)) - .collect(); + let activation_heights: BTreeMap<_, _> = { + let activation_heights = before_overwinter + .into_iter() + .map(|h| (h, BeforeOverwinter)) + .chain(overwinter.into_iter().map(|h| (h, Overwinter))) + .chain(sapling.into_iter().map(|h| (h, Sapling))) + .chain(blossom.into_iter().map(|h| (h, Blossom))) + .chain(heartwood.into_iter().map(|h| (h, Heartwood))) + .chain(canopy.into_iter().map(|h| (h, Canopy))) + .chain(nu5.into_iter().map(|h| (h, Nu5))) + .chain(nu6.into_iter().map(|h| (h, Nu6))) + .chain(nu6_1.into_iter().map(|h| (h, Nu6_1))) + .chain(nu7.into_iter().map(|h| (h, Nu7))); + + #[cfg(zcash_unstable = "zfuture")] + let activation_heights = + activation_heights.chain(zfuture.into_iter().map(|h| (h, ZFuture))); + + activation_heights + .map(|(h, nu)| (h.try_into().expect("activation height must be valid"), nu)) + .collect() + }; let network_upgrades: Vec<_> = activation_heights.iter().map(|(_h, &nu)| nu).collect(); @@ -663,6 +719,33 @@ impl ParametersBuilder { self } + /// Sets the checkpoints for the network as the provided [`ConfiguredCheckpoints`]. + pub fn with_checkpoints(mut self, checkpoints: impl Into) -> Self { + self.checkpoints = Arc::new(match checkpoints.into() { + ConfiguredCheckpoints::Default(true) => TESTNET_CHECKPOINTS + .parse() + .expect("checkpoints file format must be valid"), + ConfiguredCheckpoints::Default(false) => { + CheckpointList::from_list([(block::Height(0), self.genesis_hash)]) + .expect("must parse checkpoints") + } + ConfiguredCheckpoints::Path(path_buf) => { + let Ok(raw_checkpoints_str) = std::fs::read_to_string(&path_buf) else { + panic!("could not read file at configured checkpoints file path: {path_buf:?}"); + }; + + raw_checkpoints_str.parse().unwrap_or_else(|err| { + panic!("could not parse checkpoints at the provided path: {path_buf:?}, err: {err}") + }) + } + ConfiguredCheckpoints::HeightsAndHashes(items) => { + CheckpointList::from_list(items).expect("configured checkpoints must be valid") + } + }); + + self + } + /// Converts the builder to a [`Parameters`] struct fn finish(self) -> Parameters { let Self { @@ -679,6 +762,7 @@ impl ParametersBuilder { pre_blossom_halving_interval, post_blossom_halving_interval, lockbox_disbursements, + checkpoints, } = self; Parameters { network_name, @@ -694,6 +778,7 @@ impl ParametersBuilder { pre_blossom_halving_interval, post_blossom_halving_interval, lockbox_disbursements, + checkpoints, } } @@ -712,6 +797,17 @@ impl ParametersBuilder { check_funding_stream_address_period(fs, &network); } + // Final check that the configured checkpoints are valid for this network. + assert_eq!( + network.checkpoint_list().hash(Height(0)), + Some(network.genesis_hash()), + "first checkpoint hash must match genesis hash" + ); + assert!( + network.checkpoint_list().max_height() >= network.mandatory_checkpoint_height(), + "checkpoints must be provided for block heights below the mandatory checkpoint height" + ); + network } @@ -731,6 +827,7 @@ impl ParametersBuilder { pre_blossom_halving_interval, post_blossom_halving_interval, lockbox_disbursements, + checkpoints: _, } = Self::default(); self.activation_heights == activation_heights @@ -755,6 +852,10 @@ pub struct RegtestParameters { pub activation_heights: ConfiguredActivationHeights, /// Configured funding streams pub funding_streams: Option>, + /// Expected one-time lockbox disbursement outputs in NU6.1 activation block coinbase for Regtest + pub lockbox_disbursements: Option>, + /// Configured checkpointed block heights and hashes. + pub checkpoints: Option, } impl From for RegtestParameters { @@ -796,6 +897,8 @@ pub struct Parameters { post_blossom_halving_interval: HeightDiff, /// Expected one-time lockbox disbursement outputs in NU6.1 activation block coinbase for this network lockbox_disbursements: Vec<(String, Amount)>, + /// List of checkpointed block heights and hashes + checkpoints: Arc, } impl Default for Parameters { @@ -821,6 +924,8 @@ impl Parameters { RegtestParameters { activation_heights, funding_streams, + lockbox_disbursements, + checkpoints, }: RegtestParameters, ) -> Self { let parameters = Self::build() @@ -835,7 +940,8 @@ impl Parameters { .with_activation_heights(activation_heights.for_regtest()) .with_halving_interval(PRE_BLOSSOM_REGTEST_HALVING_INTERVAL) .with_funding_streams(funding_streams.unwrap_or_default()) - .with_lockbox_disbursements(Vec::new()); + .with_lockbox_disbursements(lockbox_disbursements.unwrap_or_default()) + .with_checkpoints(checkpoints.unwrap_or_default()); Self { network_name: "Regtest".to_string(), @@ -870,7 +976,8 @@ impl Parameters { should_allow_unshielded_coinbase_spends, pre_blossom_halving_interval, post_blossom_halving_interval, - lockbox_disbursements, + lockbox_disbursements: _, + checkpoints: _, } = Self::new_regtest(Default::default()); self.network_name == network_name @@ -883,7 +990,6 @@ impl Parameters { == should_allow_unshielded_coinbase_spends && self.pre_blossom_halving_interval == pre_blossom_halving_interval && self.post_blossom_halving_interval == post_blossom_halving_interval - && self.lockbox_disbursements == lockbox_disbursements } /// Returns the network name @@ -968,6 +1074,11 @@ impl Parameters { }) .collect() } + + /// Returns the checkpoints for this network. + pub fn checkpoints(&self) -> Arc { + self.checkpoints.clone() + } } impl Network { diff --git a/zebra-chain/src/parameters/network/tests/vectors.rs b/zebra-chain/src/parameters/network/tests/vectors.rs index 786f294aa6a..c6c2f05359e 100644 --- a/zebra-chain/src/parameters/network/tests/vectors.rs +++ b/zebra-chain/src/parameters/network/tests/vectors.rs @@ -489,6 +489,7 @@ fn check_configured_funding_stream_regtest() { configured_pre_nu6_funding_streams.clone(), configured_post_nu6_funding_streams.clone(), ]), + ..Default::default() }); let expected_pre_nu6_funding_streams = diff --git a/zebra-chain/src/parameters/network_upgrade.rs b/zebra-chain/src/parameters/network_upgrade.rs index 327f7f18150..274844ef7a1 100644 --- a/zebra-chain/src/parameters/network_upgrade.rs +++ b/zebra-chain/src/parameters/network_upgrade.rs @@ -542,7 +542,10 @@ impl From for NetworkUpgrade { zcash_protocol::consensus::NetworkUpgrade::Nu5 => Self::Nu5, zcash_protocol::consensus::NetworkUpgrade::Nu6 => Self::Nu6, zcash_protocol::consensus::NetworkUpgrade::Nu6_1 => Self::Nu6_1, - // zcash_protocol::consensus::NetworkUpgrade::Nu7 => Self::Nu7, + #[cfg(zcash_unstable = "nu7")] + zcash_protocol::consensus::NetworkUpgrade::Nu7 => Self::Nu7, + #[cfg(zcash_unstable = "zfuture")] + zcash_protocol::consensus::NetworkUpgrade::ZFuture => Self::ZFuture, } } } diff --git a/zebra-chain/src/primitives/zcash_history.rs b/zebra-chain/src/primitives/zcash_history.rs index bf348b56f82..cfb3073399a 100644 --- a/zebra-chain/src/primitives/zcash_history.rs +++ b/zebra-chain/src/primitives/zcash_history.rs @@ -278,20 +278,24 @@ impl Version for zcash_history::V1 { | NetworkUpgrade::Nu5 | NetworkUpgrade::Nu6 | NetworkUpgrade::Nu6_1 - | NetworkUpgrade::Nu7 => zcash_history::NodeData { - consensus_branch_id: branch_id.into(), - subtree_commitment: block_hash, - start_time: time, - end_time: time, - start_target: target, - end_target: target, - start_sapling_root: sapling_root, - end_sapling_root: sapling_root, - subtree_total_work: work, - start_height: height.0 as u64, - end_height: height.0 as u64, - sapling_tx: sapling_tx_count, - }, + | NetworkUpgrade::Nu7 => {} + #[cfg(zcash_unstable = "zfuture")] + NetworkUpgrade::ZFuture => {} + }; + + zcash_history::NodeData { + consensus_branch_id: branch_id.into(), + subtree_commitment: block_hash, + start_time: time, + end_time: time, + start_target: target, + end_target: target, + start_sapling_root: sapling_root, + end_sapling_root: sapling_root, + subtree_total_work: work, + start_height: height.0 as u64, + end_height: height.0 as u64, + sapling_tx: sapling_tx_count, } } } diff --git a/zebra-chain/src/sapling.rs b/zebra-chain/src/sapling.rs index d9b30ba430c..77550a70686 100644 --- a/zebra-chain/src/sapling.rs +++ b/zebra-chain/src/sapling.rs @@ -24,9 +24,7 @@ pub mod shielded_data; pub mod spend; pub mod tree; -pub use commitment::{ - CommitmentRandomness, NotSmallOrderValueCommitment, NoteCommitment, ValueCommitment, -}; +pub use commitment::{CommitmentRandomness, ValueCommitment}; pub use keys::Diversifier; pub use note::{EncryptedNote, Note, Nullifier, WrappedNoteKey}; pub use output::{Output, OutputInTransactionV4, OutputPrefixInTransactionV5}; diff --git a/zebra-chain/src/sapling/arbitrary.rs b/zebra-chain/src/sapling/arbitrary.rs index 7d102965799..7323307e8d5 100644 --- a/zebra-chain/src/sapling/arbitrary.rs +++ b/zebra-chain/src/sapling/arbitrary.rs @@ -1,7 +1,7 @@ //! Randomised data generation for sapling types. use group::Group; -use jubjub::{AffinePoint, ExtendedPoint}; +use jubjub::ExtendedPoint; use rand::SeedableRng; use rand_chacha::ChaChaRng; @@ -11,8 +11,8 @@ use crate::primitives::Groth16Proof; use super::{ keys::{self, ValidatingKey}, - note, tree, FieldNotPresent, NoteCommitment, Output, OutputInTransactionV4, PerSpendAnchor, - SharedAnchor, Spend, + note, tree, FieldNotPresent, Output, OutputInTransactionV4, PerSpendAnchor, SharedAnchor, + Spend, }; impl Arbitrary for Spend { @@ -28,7 +28,7 @@ impl Arbitrary for Spend { ) .prop_map(|(per_spend_anchor, nullifier, rk, proof, sig_bytes)| Self { per_spend_anchor, - cv: ExtendedPoint::generator().try_into().unwrap(), + cv: ExtendedPoint::generator().into(), nullifier, rk, zkproof: proof, @@ -56,7 +56,7 @@ impl Arbitrary for Spend { ) .prop_map(|(nullifier, rk, proof, sig_bytes)| Self { per_spend_anchor: FieldNotPresent, - cv: ExtendedPoint::generator().try_into().unwrap(), + cv: ExtendedPoint::generator().into(), nullifier, rk, zkproof: proof, @@ -82,8 +82,9 @@ impl Arbitrary for Output { any::(), ) .prop_map(|(enc_ciphertext, out_ciphertext, zkproof)| Self { - cv: ExtendedPoint::generator().try_into().unwrap(), - cm_u: NoteCommitment(AffinePoint::identity()).extract_u(), + cv: ExtendedPoint::generator().into(), + cm_u: sapling_crypto::note::ExtractedNoteCommitment::from_bytes(&[0u8; 32]) + .unwrap(), ephemeral_key: keys::EphemeralPublicKey(ExtendedPoint::generator().into()), enc_ciphertext, out_ciphertext, @@ -136,12 +137,25 @@ impl Arbitrary for tree::Root { type Strategy = BoxedStrategy; } -impl Arbitrary for tree::Node { +impl Arbitrary for tree::legacy::Node { type Parameters = (); fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - jubjub_base_strat().prop_map(tree::Node::from).boxed() + jubjub_base_strat() + .prop_map(tree::legacy::Node::from) + .boxed() } type Strategy = BoxedStrategy; } + +impl From for tree::legacy::Node { + fn from(x: jubjub::Fq) -> Self { + let node = sapling_crypto::Node::from_bytes(x.to_bytes()); + if node.is_some().into() { + tree::legacy::Node(node.unwrap()) + } else { + sapling_crypto::Node::from_bytes([0; 32]).unwrap().into() + } + } +} diff --git a/zebra-chain/src/sapling/commitment.rs b/zebra-chain/src/sapling/commitment.rs index a4a85e5dd77..edf1fddc8b9 100644 --- a/zebra-chain/src/sapling/commitment.rs +++ b/zebra-chain/src/sapling/commitment.rs @@ -1,390 +1,49 @@ //! Note and value commitments. -use std::{fmt, io}; +use std::io; -use bitvec::prelude::*; use hex::{FromHex, FromHexError, ToHex}; -use jubjub::ExtendedPoint; -use lazy_static::lazy_static; -use rand_core::{CryptoRng, RngCore}; -use crate::{ - amount::{Amount, NonNegative}, - error::{NoteCommitmentError, RandError}, - serialization::{ - serde_helpers, ReadZcashExt, SerializationError, ZcashDeserialize, ZcashSerialize, - }, -}; - -use super::keys::{find_group_hash, Diversifier, TransmissionKey}; - -pub mod pedersen_hashes; +use crate::serialization::{serde_helpers, SerializationError, ZcashDeserialize, ZcashSerialize}; #[cfg(test)] mod test_vectors; -use pedersen_hashes::*; - -/// Generates a random scalar from the scalar field 𝔽_{r_𝕁}. -/// -/// The prime order subgroup 𝕁^(r) is the order-r_𝕁 subgroup of 𝕁 that consists -/// of the points whose order divides r. This function is useful when generating -/// the uniform distribution on 𝔽_{r_𝕁} needed for Sapling commitment schemes' -/// trapdoor generators. -/// -/// -pub fn generate_trapdoor(csprng: &mut T) -> Result -where - T: RngCore + CryptoRng, -{ - let mut bytes = [0u8; 64]; - csprng - .try_fill_bytes(&mut bytes) - .map_err(|_| RandError::FillBytes)?; - // Fr::from_bytes_wide() reduces the input modulo r via Fr::from_u512() - Ok(jubjub::Fr::from_bytes_wide(&bytes)) -} - /// The randomness used in the Pedersen Hash for note commitment. +/// +/// Equivalent to `sapling_crypto::note::CommitmentRandomness`, +/// but we can't use it directly as it is not public. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct CommitmentRandomness(jubjub::Fr); -/// Note commitments for the output notes. -#[derive(Clone, Copy, Deserialize, PartialEq, Eq, Serialize)] -pub struct NoteCommitment(#[serde(with = "serde_helpers::AffinePoint")] pub jubjub::AffinePoint); - -impl fmt::Debug for NoteCommitment { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("NoteCommitment") - .field("u", &hex::encode(self.0.get_u().to_bytes())) - .field("v", &hex::encode(self.0.get_v().to_bytes())) - .finish() - } -} - -impl From for NoteCommitment { - fn from(extended_point: jubjub::ExtendedPoint) -> Self { - Self(jubjub::AffinePoint::from(extended_point)) - } -} - -impl From for [u8; 32] { - fn from(cm: NoteCommitment) -> [u8; 32] { - cm.0.to_bytes() - } -} - -impl TryFrom<[u8; 32]> for NoteCommitment { - type Error = &'static str; - - fn try_from(bytes: [u8; 32]) -> Result { - let possible_point = jubjub::AffinePoint::from_bytes(bytes); - - if possible_point.is_some().into() { - Ok(Self(possible_point.unwrap())) - } else { - Err("Invalid jubjub::AffinePoint value") - } - } -} - -impl NoteCommitment { - /// Generate a new _NoteCommitment_ and the randomness used to create it. - /// - /// We return the randomness because it is needed to construct a _Note_, - /// before it is encrypted as part of an _Output Description_. This is a - /// higher level function that calls `NoteCommit^Sapling_rcm` internally. - /// - /// NoteCommit^Sapling_rcm (g*_d , pk*_d , v) := - /// WindowedPedersenCommit_rcm([1; 6] || I2LEBSP_64(v) || g*_d || pk*_d) - /// - /// - #[allow(non_snake_case)] - pub fn new( - csprng: &mut T, - diversifier: Diversifier, - transmission_key: TransmissionKey, - value: Amount, - ) -> Result<(CommitmentRandomness, Self), NoteCommitmentError> - where - T: RngCore + CryptoRng, - { - // s as in the argument name for WindowedPedersenCommit_r(s) - let mut s: BitVec = BitVec::new(); - - // Prefix - s.append(&mut bitvec![1; 6]); - - // Jubjub repr_J canonical byte encoding - // https://zips.z.cash/protocol/protocol.pdf#jubjub - // - // The `TryFrom` impls for the `jubjub::*Point`s handles - // calling `DiversifyHash` implicitly. - - let g_d_bytes = jubjub::AffinePoint::try_from(diversifier) - .map_err(|_| NoteCommitmentError::InvalidDiversifier)? - .to_bytes(); - - let pk_d_bytes = <[u8; 32]>::from(transmission_key); - let v_bytes = value.to_bytes(); - - s.extend(g_d_bytes); - s.extend(pk_d_bytes); - s.extend(v_bytes); - - let rcm = CommitmentRandomness(generate_trapdoor(csprng)?); - - Ok(( - rcm, - NoteCommitment::from(windowed_pedersen_commitment(rcm.0, &s)), - )) - } - - /// Hash Extractor for Jubjub (?) - /// - /// - pub fn extract_u(&self) -> jubjub::Fq { - self.0.get_u() - } -} - -/// A Homomorphic Pedersen commitment to the value of a note. -/// -/// This can be used as an intermediate value in some computations. For the -/// type actually stored in Spend and Output descriptions, see -/// [`NotSmallOrderValueCommitment`]. +/// A wrapper for the `sapling_crypto::value::ValueCommitment` type. /// -/// -#[derive(Clone, Copy, Deserialize, PartialEq, Eq, Serialize)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Default))] -pub struct ValueCommitment(#[serde(with = "serde_helpers::AffinePoint")] jubjub::AffinePoint); +/// We need the wrapper to derive Serialize, Deserialize and Equality. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct ValueCommitment( + #[serde(with = "serde_helpers::ValueCommitment")] pub sapling_crypto::value::ValueCommitment, +); -impl<'a> std::ops::Add<&'a ValueCommitment> for ValueCommitment { - type Output = Self; - - fn add(self, rhs: &'a ValueCommitment) -> Self::Output { - self + *rhs - } -} - -impl std::ops::Add for ValueCommitment { - type Output = Self; - - fn add(self, rhs: ValueCommitment) -> Self::Output { - let value = self.0.to_extended() + rhs.0.to_extended(); - ValueCommitment(value.into()) - } -} - -impl std::ops::AddAssign for ValueCommitment { - fn add_assign(&mut self, rhs: ValueCommitment) { - *self = *self + rhs - } -} - -impl fmt::Debug for ValueCommitment { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("ValueCommitment") - .field("u", &hex::encode(self.0.get_u().to_bytes())) - .field("v", &hex::encode(self.0.get_v().to_bytes())) - .finish() - } -} - -impl From for ValueCommitment { - /// Convert a Jubjub point into a ValueCommitment. - fn from(extended_point: jubjub::ExtendedPoint) -> Self { - Self(jubjub::AffinePoint::from(extended_point)) - } -} - -/// LEBS2OSP256(repr_J(cv)) -/// -/// -/// -impl From for [u8; 32] { - fn from(cm: ValueCommitment) -> [u8; 32] { - cm.0.to_bytes() - } -} - -impl<'a> std::ops::Sub<&'a ValueCommitment> for ValueCommitment { - type Output = Self; - - fn sub(self, rhs: &'a ValueCommitment) -> Self::Output { - self - *rhs - } -} - -impl std::ops::Sub for ValueCommitment { - type Output = Self; - - fn sub(self, rhs: ValueCommitment) -> Self::Output { - ValueCommitment((self.0.to_extended() - rhs.0.to_extended()).into()) - } -} - -impl std::ops::SubAssign for ValueCommitment { - fn sub_assign(&mut self, rhs: ValueCommitment) { - *self = *self - rhs; - } -} - -impl std::iter::Sum for ValueCommitment { - fn sum(iter: I) -> Self - where - I: Iterator, - { - iter.fold( - ValueCommitment(jubjub::AffinePoint::identity()), - std::ops::Add::add, - ) - } -} - -/// LEBS2OSP256(repr_J(cv)) -/// -/// -/// -impl TryFrom<[u8; 32]> for ValueCommitment { - type Error = &'static str; - - fn try_from(bytes: [u8; 32]) -> Result { - let possible_point = jubjub::AffinePoint::from_bytes(bytes); - - if possible_point.is_some().into() { - let point = possible_point.unwrap(); - Ok(ExtendedPoint::from(point).into()) - } else { - Err("Invalid jubjub::AffinePoint value") - } +impl PartialEq for ValueCommitment { + fn eq(&self, other: &Self) -> bool { + self.0.as_inner() == other.0.as_inner() } } +impl Eq for ValueCommitment {} impl ValueCommitment { - /// Generate a new _ValueCommitment_. - /// - /// - pub fn randomized(csprng: &mut T, value: Amount) -> Result - where - T: RngCore + CryptoRng, - { - let rcv = generate_trapdoor(csprng)?; - - Ok(Self::new(rcv, value)) - } - - /// Generate a new _ValueCommitment_ from an existing _rcv_ on a _value_. - /// - /// - #[allow(non_snake_case)] - pub fn new(rcv: jubjub::Fr, value: Amount) -> Self { - let v = jubjub::Fr::from(value); - Self::from(*V * v + *R * rcv) - } -} - -lazy_static! { - static ref V: ExtendedPoint = find_group_hash(*b"Zcash_cv", b"v"); - static ref R: ExtendedPoint = find_group_hash(*b"Zcash_cv", b"r"); -} - -/// A Homomorphic Pedersen commitment to the value of a note, used in Spend and -/// Output descriptions. -/// -/// Elements that are of small order are not allowed. This is a separate -/// consensus rule and not intrinsic of value commitments; which is why this -/// type exists. -/// -/// This is denoted by `cv` in the specification. -/// -/// -/// -#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Default))] -pub struct NotSmallOrderValueCommitment(ValueCommitment); - -impl NotSmallOrderValueCommitment { /// Return the hash bytes in big-endian byte-order suitable for printing out byte by byte. /// /// Zebra displays commitment value in big-endian byte-order, /// following the convention set by zcashd. pub fn bytes_in_display_order(&self) -> [u8; 32] { - let mut reversed_bytes = self.0 .0.to_bytes(); + let mut reversed_bytes = self.0.to_bytes(); reversed_bytes.reverse(); reversed_bytes } } -impl From<&NotSmallOrderValueCommitment> for [u8; 32] { - fn from(cv: &NotSmallOrderValueCommitment) -> [u8; 32] { - cv.0.into() - } -} - -impl TryFrom for NotSmallOrderValueCommitment { - type Error = &'static str; - - /// Convert a ValueCommitment into a NotSmallOrderValueCommitment. - /// - /// Returns an error if the point is of small order. - /// - /// # Consensus - /// - /// > cv and rk [MUST NOT be of small order][1], i.e. \[h_J\]cv MUST NOT be 𝒪_J - /// > and \[h_J\]rk MUST NOT be 𝒪_J. - /// - /// > cv and epk [MUST NOT be of small order][2], i.e. \[h_J\]cv MUST NOT be 𝒪_J - /// > and \[ℎ_J\]epk MUST NOT be 𝒪_J. - /// - /// [1]: https://zips.z.cash/protocol/protocol.pdf#spenddesc - /// [2]: https://zips.z.cash/protocol/protocol.pdf#outputdesc - fn try_from(value_commitment: ValueCommitment) -> Result { - if value_commitment.0.is_small_order().into() { - Err("jubjub::AffinePoint value for Sapling ValueCommitment is of small order") - } else { - Ok(Self(value_commitment)) - } - } -} - -impl TryFrom for NotSmallOrderValueCommitment { - type Error = &'static str; - - /// Convert a Jubjub point into a NotSmallOrderValueCommitment. - fn try_from(extended_point: jubjub::ExtendedPoint) -> Result { - ValueCommitment::from(extended_point).try_into() - } -} - -impl From for ValueCommitment { - fn from(cv: NotSmallOrderValueCommitment) -> Self { - cv.0 - } -} - -impl From for jubjub::AffinePoint { - fn from(cv: NotSmallOrderValueCommitment) -> Self { - cv.0 .0 - } -} - -impl ZcashSerialize for NotSmallOrderValueCommitment { - fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { - writer.write_all(&<[u8; 32]>::from(self.0)[..])?; - Ok(()) - } -} - -impl ZcashDeserialize for NotSmallOrderValueCommitment { - fn zcash_deserialize(mut reader: R) -> Result { - let vc = ValueCommitment::try_from(reader.read_32_bytes()?) - .map_err(SerializationError::Parse)?; - vc.try_into().map_err(SerializationError::Parse) - } -} - -impl ToHex for &NotSmallOrderValueCommitment { +impl ToHex for &ValueCommitment { fn encode_hex>(&self) -> T { self.bytes_in_display_order().encode_hex() } @@ -394,7 +53,7 @@ impl ToHex for &NotSmallOrderValueCommitment { } } -impl FromHex for NotSmallOrderValueCommitment { +impl FromHex for ValueCommitment { type Error = FromHexError; fn from_hex>(hex: T) -> Result { @@ -408,194 +67,61 @@ impl FromHex for NotSmallOrderValueCommitment { } } -#[cfg(test)] -mod tests { - - use std::ops::Neg; - - use super::*; - - #[test] - fn pedersen_hash_to_point_test_vectors() { - let _init_guard = zebra_test::init(); - - const D: [u8; 8] = *b"Zcash_PH"; - - for test_vector in test_vectors::TEST_VECTORS.iter() { - let result = jubjub::AffinePoint::from(pedersen_hash_to_point( - D, - &test_vector.input_bits.clone(), - )); - - assert_eq!(result, test_vector.output_point); - } - } - - #[test] - fn add() { - let _init_guard = zebra_test::init(); - - let identity = ValueCommitment(jubjub::AffinePoint::identity()); - - let g = ValueCommitment(jubjub::AffinePoint::from_raw_unchecked( - jubjub::Fq::from_raw([ - 0xe4b3_d35d_f1a7_adfe, - 0xcaf5_5d1b_29bf_81af, - 0x8b0f_03dd_d60a_8187, - 0x62ed_cbb8_bf37_87c8, - ]), - jubjub::Fq::from_raw([ - 0x0000_0000_0000_000b, - 0x0000_0000_0000_0000, - 0x0000_0000_0000_0000, - 0x0000_0000_0000_0000, - ]), - )); - - assert_eq!(identity + g, g); - } - - #[test] - fn add_assign() { - let _init_guard = zebra_test::init(); - - let mut identity = ValueCommitment(jubjub::AffinePoint::identity()); - - let g = ValueCommitment(jubjub::AffinePoint::from_raw_unchecked( - jubjub::Fq::from_raw([ - 0xe4b3_d35d_f1a7_adfe, - 0xcaf5_5d1b_29bf_81af, - 0x8b0f_03dd_d60a_8187, - 0x62ed_cbb8_bf37_87c8, - ]), - jubjub::Fq::from_raw([ - 0x0000_0000_0000_000b, - 0x0000_0000_0000_0000, - 0x0000_0000_0000_0000, - 0x0000_0000_0000_0000, - ]), - )); +#[cfg(any(test, feature = "proptest-impl"))] +impl From for ValueCommitment { + /// Convert a Jubjub point into a ValueCommitment. + /// + /// # Panics + /// + /// Panics if the given point does not correspond to a valid ValueCommitment. + fn from(extended_point: jubjub::ExtendedPoint) -> Self { + let bytes = jubjub::AffinePoint::from(extended_point).to_bytes(); - identity += g; - let new_g = identity; + let value_commitment = + sapling_crypto::value::ValueCommitment::from_bytes_not_small_order(&bytes) + .into_option() + .expect("invalid ValueCommitment bytes"); - assert_eq!(new_g, g); + ValueCommitment(value_commitment) } +} - #[test] - fn sub() { - let _init_guard = zebra_test::init(); - - let g_point = jubjub::AffinePoint::from_raw_unchecked( - jubjub::Fq::from_raw([ - 0xe4b3_d35d_f1a7_adfe, - 0xcaf5_5d1b_29bf_81af, - 0x8b0f_03dd_d60a_8187, - 0x62ed_cbb8_bf37_87c8, - ]), - jubjub::Fq::from_raw([ - 0x0000_0000_0000_000b, - 0x0000_0000_0000_0000, - 0x0000_0000_0000_0000, - 0x0000_0000_0000_0000, - ]), - ); - - let identity = ValueCommitment(jubjub::AffinePoint::identity()); +impl ZcashDeserialize for sapling_crypto::value::ValueCommitment { + fn zcash_deserialize(mut reader: R) -> Result { + let mut buf = [0u8; 32]; + reader.read_exact(&mut buf)?; - let g = ValueCommitment(g_point); + let value_commitment: Option = + sapling_crypto::value::ValueCommitment::from_bytes_not_small_order(&buf).into_option(); - assert_eq!(identity - g, ValueCommitment(g_point.neg())); + value_commitment.ok_or(SerializationError::Parse("invalid ValueCommitment bytes")) } +} - #[test] - fn sub_assign() { - let _init_guard = zebra_test::init(); - - let g_point = jubjub::AffinePoint::from_raw_unchecked( - jubjub::Fq::from_raw([ - 0xe4b3_d35d_f1a7_adfe, - 0xcaf5_5d1b_29bf_81af, - 0x8b0f_03dd_d60a_8187, - 0x62ed_cbb8_bf37_87c8, - ]), - jubjub::Fq::from_raw([ - 0x0000_0000_0000_000b, - 0x0000_0000_0000_0000, - 0x0000_0000_0000_0000, - 0x0000_0000_0000_0000, - ]), - ); - - let mut identity = ValueCommitment(jubjub::AffinePoint::identity()); - - let g = ValueCommitment(g_point); - - identity -= g; - let new_g = identity; - - assert_eq!(new_g, ValueCommitment(g_point.neg())); +impl ZcashDeserialize for ValueCommitment { + fn zcash_deserialize(reader: R) -> Result { + let value_commitment = sapling_crypto::value::ValueCommitment::zcash_deserialize(reader)?; + Ok(Self(value_commitment)) } +} - #[test] - fn sum() { - let _init_guard = zebra_test::init(); - - let g_point = jubjub::AffinePoint::from_raw_unchecked( - jubjub::Fq::from_raw([ - 0xe4b3_d35d_f1a7_adfe, - 0xcaf5_5d1b_29bf_81af, - 0x8b0f_03dd_d60a_8187, - 0x62ed_cbb8_bf37_87c8, - ]), - jubjub::Fq::from_raw([ - 0x0000_0000_0000_000b, - 0x0000_0000_0000_0000, - 0x0000_0000_0000_0000, - 0x0000_0000_0000_0000, - ]), - ); - - let g = ValueCommitment(g_point); - let other_g = ValueCommitment(g_point); - - let sum: ValueCommitment = vec![g, other_g].into_iter().sum(); - - let doubled_g = ValueCommitment(g_point.to_extended().double().into()); - - assert_eq!(sum, doubled_g); +impl ZcashSerialize for ValueCommitment { + fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { + writer.write_all(&self.0.to_bytes())?; + Ok(()) } +} - #[test] - fn value_commitment_hex_roundtrip() { - use hex::{FromHex, ToHex}; - - let _init_guard = zebra_test::init(); - - let g_point = jubjub::AffinePoint::from_raw_unchecked( - jubjub::Fq::from_raw([ - 0xe4b3_d35d_f1a7_adfe, - 0xcaf5_5d1b_29bf_81af, - 0x8b0f_03dd_d60a_8187, - 0x62ed_cbb8_bf37_87c8, - ]), - jubjub::Fq::from_raw([ - 0x0000_0000_0000_000b, - 0x0000_0000_0000_0000, - 0x0000_0000_0000_0000, - 0x0000_0000_0000_0000, - ]), - ); - - let value_commitment = ValueCommitment(g_point); - let original = NotSmallOrderValueCommitment::try_from(value_commitment) - .expect("constructed point must not be small order"); - - let hex_str = (&original).encode_hex::(); +impl ZcashDeserialize for sapling_crypto::note::ExtractedNoteCommitment { + fn zcash_deserialize(mut reader: R) -> Result { + let mut buf = [0u8; 32]; + reader.read_exact(&mut buf)?; - let decoded = NotSmallOrderValueCommitment::from_hex(&hex_str) - .expect("hex string should decode successfully"); + let extracted_note_commitment: Option = + sapling_crypto::note::ExtractedNoteCommitment::from_bytes(&buf).into_option(); - assert_eq!(original, decoded); + extracted_note_commitment.ok_or(SerializationError::Parse( + "invalid ExtractedNoteCommitment bytes", + )) } } diff --git a/zebra-chain/src/sapling/commitment/pedersen_hashes.rs b/zebra-chain/src/sapling/commitment/pedersen_hashes.rs deleted file mode 100644 index 4c033435f9e..00000000000 --- a/zebra-chain/src/sapling/commitment/pedersen_hashes.rs +++ /dev/null @@ -1,112 +0,0 @@ -//! Pedersen hash functions and helpers. - -use bitvec::prelude::*; - -use super::super::keys::find_group_hash; - -/// I_i -/// -/// Expects i to be 1-indexed from the loop it's called in. -/// -/// -#[allow(non_snake_case)] -fn I_i(domain: [u8; 8], i: u32) -> jubjub::ExtendedPoint { - find_group_hash(domain, &(i - 1).to_le_bytes()) -} - -/// The encoding function ⟨Mᵢ⟩ -/// -/// Σ j={0,k-1}: (1 - 2x₂)⋅(1 + x₀ + 2x₁)⋅2^(4⋅j) -/// -/// -#[allow(non_snake_case)] -fn M_i(segment: &BitSlice) -> jubjub::Fr { - let mut m_i = jubjub::Fr::zero(); - - for (j, chunk) in segment.chunks(3).enumerate() { - // Pad each chunk with zeros. - let mut store = 0u8; - let bits = BitSlice::<_, Lsb0>::from_element_mut(&mut store); - chunk - .iter() - .enumerate() - .for_each(|(i, bit)| bits.set(i, *bit)); - - let mut tmp = jubjub::Fr::one(); - - if bits[0] { - tmp += &jubjub::Fr::one(); - } - - if bits[1] { - tmp += &jubjub::Fr::one().double(); - } - - if bits[2] { - tmp -= tmp.double(); - } - - if j > 0 { - // Inclusive range! - tmp *= (1..=(4 * j)).fold(jubjub::Fr::one(), |acc, _| acc.double()); - } - - m_i += tmp; - } - - m_i -} - -/// "...an algebraic hash function with collision resistance (for fixed input -/// length) derived from assumed hardness of the Discrete Logarithm Problem on -/// the Jubjub curve." -/// -/// PedersenHash is used in the definitions of Pedersen commitments (§ -/// 5.4.7.2 'Windowed Pedersen commitments'), and of the Pedersen hash for the -/// Sapling incremental Merkle tree (§ 5.4.1.3 'MerkleCRH^Sapling Hash -/// Function'). -/// -/// -#[allow(non_snake_case)] -pub fn pedersen_hash_to_point(domain: [u8; 8], M: &BitVec) -> jubjub::ExtendedPoint { - let mut result = jubjub::ExtendedPoint::identity(); - - // Split M into n segments of 3 * c bits, where c = 63, padding the last - // segment with zeros. - // - // This loop is 1-indexed per the math definitions in the spec. - // - // https://zips.z.cash/protocol/protocol.pdf#concretepedersenhash - for (i, segment) in M - .chunks(189) - .enumerate() - .map(|(i, segment)| (i + 1, segment)) - { - result += I_i(domain, i as u32) * M_i(segment); - } - - result -} - -/// Pedersen Hash Function -/// -/// This is technically returning 255 (l_MerkleSapling) bits, not 256. -/// -/// -#[allow(non_snake_case)] -pub fn pedersen_hash(domain: [u8; 8], M: &BitVec) -> jubjub::Fq { - jubjub::AffinePoint::from(pedersen_hash_to_point(domain, M)).get_u() -} - -/// Construct a 'windowed' Pedersen commitment by reusing a Pederson hash -/// construction, and adding a randomized point on the Jubjub curve. -/// -/// WindowedPedersenCommit_r (s) := \ -/// PedersenHashToPoint("Zcash_PH", s) + \[r\]FindGroupHash^J^(r)("Zcash_PH", "r") -/// -/// -pub fn windowed_pedersen_commitment(r: jubjub::Fr, s: &BitVec) -> jubjub::ExtendedPoint { - const D: [u8; 8] = *b"Zcash_PH"; - - pedersen_hash_to_point(D, s) + find_group_hash(D, b"r") * r -} diff --git a/zebra-chain/src/sapling/keys.rs b/zebra-chain/src/sapling/keys.rs index c8f9788aa1f..58d9208def9 100644 --- a/zebra-chain/src/sapling/keys.rs +++ b/zebra-chain/src/sapling/keys.rs @@ -73,33 +73,6 @@ fn jubjub_group_hash(d: [u8; 8], m: &[u8]) -> Option { } } -/// FindGroupHash for JubJub, from [zcash_primitives][0] -/// -/// d is an 8-byte domain separator ("personalization"), m is the hash -/// input. -/// -/// [0]: https://github.com/zcash/librustzcash/blob/master/zcash_primitives/src/jubjub/mod.rs#L409 -/// -// TODO: move common functions like these out of the keys module into -// a more appropriate location -pub(super) fn find_group_hash(d: [u8; 8], m: &[u8]) -> jubjub::ExtendedPoint { - let mut tag = m.to_vec(); - let i = tag.len(); - tag.push(0u8); - - loop { - let gh = jubjub_group_hash(d, &tag[..]); - - // We don't want to overflow and start reusing generators - assert!(tag[i] != u8::MAX); - tag[i] += 1; - - if let Some(gh) = gh { - break gh; - } - } -} - /// Used to derive a diversified base point from a diversifier value. /// /// diff --git a/zebra-chain/src/sapling/output.rs b/zebra-chain/src/sapling/output.rs index fb46e50c8bd..a654386bbeb 100644 --- a/zebra-chain/src/sapling/output.rs +++ b/zebra-chain/src/sapling/output.rs @@ -27,10 +27,10 @@ use super::{commitment, keys, note}; #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Getters)] pub struct Output { /// A value commitment to the value of the input note. - pub cv: commitment::NotSmallOrderValueCommitment, + pub cv: commitment::ValueCommitment, /// The u-coordinate of the note commitment for the output note. - #[serde(with = "serde_helpers::Fq")] - pub cm_u: jubjub::Fq, + #[serde(with = "serde_helpers::SaplingExtractedNoteCommitment")] + pub cm_u: sapling_crypto::note::ExtractedNoteCommitment, /// An encoding of an ephemeral Jubjub public key. pub ephemeral_key: keys::EphemeralPublicKey, /// A ciphertext component for the encrypted output note. @@ -58,10 +58,10 @@ pub struct OutputInTransactionV4(pub Output); #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct OutputPrefixInTransactionV5 { /// A value commitment to the value of the input note. - pub cv: commitment::NotSmallOrderValueCommitment, + pub cv: commitment::ValueCommitment, /// The u-coordinate of the note commitment for the output note. - #[serde(with = "serde_helpers::Fq")] - pub cm_u: jubjub::Fq, + #[serde(with = "serde_helpers::SaplingExtractedNoteCommitment")] + pub cm_u: sapling_crypto::note::ExtractedNoteCommitment, /// An encoding of an ephemeral Jubjub public key. pub ephemeral_key: keys::EphemeralPublicKey, /// A ciphertext component for the encrypted output note. @@ -124,7 +124,7 @@ impl OutputInTransactionV4 { impl ZcashSerialize for OutputInTransactionV4 { fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { let output = self.0.clone(); - output.cv.zcash_serialize(&mut writer)?; + writer.write_all(&output.cv.0.to_bytes())?; writer.write_all(&output.cm_u.to_bytes())?; output.ephemeral_key.zcash_serialize(&mut writer)?; output.enc_ciphertext.zcash_serialize(&mut writer)?; @@ -150,12 +150,14 @@ impl ZcashDeserialize for OutputInTransactionV4 { Ok(OutputInTransactionV4(Output { // Type is `ValueCommit^{Sapling}.Output`, i.e. J // https://zips.z.cash/protocol/protocol.pdf#abstractcommit - // See [`commitment::NotSmallOrderValueCommitment::zcash_deserialize`]. - cv: commitment::NotSmallOrderValueCommitment::zcash_deserialize(&mut reader)?, + // See [`sapling_crypto::value::ValueCommitment::zcash_deserialize`]. + cv: commitment::ValueCommitment( + sapling_crypto::value::ValueCommitment::zcash_deserialize(&mut reader)?, + ), // Type is `B^{[ℓ_{Sapling}_{Merkle}]}`, i.e. 32 bytes. // However, the consensus rule above restricts it even more. - // See [`jubjub::Fq::zcash_deserialize`]. - cm_u: jubjub::Fq::zcash_deserialize(&mut reader)?, + // See [`sapling_crypto::note::ExtractedNoteCommitment::zcash_deserialize`]. + cm_u: sapling_crypto::note::ExtractedNoteCommitment::zcash_deserialize(&mut reader)?, // Type is `KA^{Sapling}.Public`, i.e. J // https://zips.z.cash/protocol/protocol.pdf#concretesaplingkeyagreement // See [`keys::EphemeralPublicKey::zcash_deserialize`]. @@ -188,7 +190,7 @@ impl ZcashDeserialize for OutputInTransactionV4 { impl ZcashSerialize for OutputPrefixInTransactionV5 { fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { - self.cv.zcash_serialize(&mut writer)?; + writer.write_all(&self.cv.0.to_bytes())?; writer.write_all(&self.cm_u.to_bytes())?; self.ephemeral_key.zcash_serialize(&mut writer)?; self.enc_ciphertext.zcash_serialize(&mut writer)?; @@ -213,12 +215,14 @@ impl ZcashDeserialize for OutputPrefixInTransactionV5 { Ok(OutputPrefixInTransactionV5 { // Type is `ValueCommit^{Sapling}.Output`, i.e. J // https://zips.z.cash/protocol/protocol.pdf#abstractcommit - // See [`commitment::NotSmallOrderValueCommitment::zcash_deserialize`]. - cv: commitment::NotSmallOrderValueCommitment::zcash_deserialize(&mut reader)?, + // See [`sapling_crypto::value::ValueCommitment::zcash_deserialize`]. + cv: commitment::ValueCommitment( + sapling_crypto::value::ValueCommitment::zcash_deserialize(&mut reader)?, + ), // Type is `B^{[ℓ_{Sapling}_{Merkle}]}`, i.e. 32 bytes. // However, the consensus rule above restricts it even more. - // See [`jubjub::Fq::zcash_deserialize`]. - cm_u: jubjub::Fq::zcash_deserialize(&mut reader)?, + // See [`sapling_crypto::note::ExtractedNoteCommitment::zcash_deserialize`]. + cm_u: sapling_crypto::note::ExtractedNoteCommitment::zcash_deserialize(&mut reader)?, // Type is `KA^{Sapling}.Public`, i.e. J // https://zips.z.cash/protocol/protocol.pdf#concretesaplingkeyagreement // See [`keys::EphemeralPublicKey::zcash_deserialize`]. diff --git a/zebra-chain/src/sapling/shielded_data.rs b/zebra-chain/src/sapling/shielded_data.rs index 3534fe8090b..7a08001fe53 100644 --- a/zebra-chain/src/sapling/shielded_data.rs +++ b/zebra-chain/src/sapling/shielded_data.rs @@ -23,7 +23,7 @@ use crate::{ }, sapling::{ output::OutputPrefixInTransactionV5, spend::SpendPrefixInTransactionV5, tree, Nullifier, - Output, Spend, ValueCommitment, + Output, Spend, }, serialization::{AtLeastOne, TrustedPreallocate}, }; @@ -251,7 +251,9 @@ where /// Collect the cm_u's for this transaction, if it contains [`Output`]s, /// in the order they appear in the transaction. - pub fn note_commitments(&self) -> impl Iterator { + pub fn note_commitments( + &self, + ) -> impl Iterator { self.outputs().map(|output| &output.cm_u) } @@ -278,14 +280,14 @@ where /// /// pub fn binding_verification_key(&self) -> redjubjub::VerificationKeyBytes { - let cv_old: ValueCommitment = self.spends().map(|spend| spend.cv.into()).sum(); - let cv_new: ValueCommitment = self.outputs().map(|output| output.cv.into()).sum(); - let cv_balance: ValueCommitment = - ValueCommitment::new(jubjub::Fr::zero(), self.value_balance); - - let key_bytes: [u8; 32] = (cv_old - cv_new - cv_balance).into(); - - key_bytes.into() + let cv_old: sapling_crypto::value::CommitmentSum = + self.spends().map(|spend| spend.cv.0.clone()).sum(); + let cv_new: sapling_crypto::value::CommitmentSum = + self.outputs().map(|output| output.cv.0.clone()).sum(); + + (cv_old - cv_new) + .into_bvk(self.value_balance.zatoshis()) + .into() } } diff --git a/zebra-chain/src/sapling/spend.rs b/zebra-chain/src/sapling/spend.rs index ee6e753a737..068df147076 100644 --- a/zebra-chain/src/sapling/spend.rs +++ b/zebra-chain/src/sapling/spend.rs @@ -36,7 +36,7 @@ use super::{ #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Getters)] pub struct Spend { /// A value commitment to the value of the input note. - pub cv: commitment::NotSmallOrderValueCommitment, + pub cv: commitment::ValueCommitment, /// An anchor for this spend. /// /// The anchor is the root of the Sapling note commitment tree in a previous @@ -68,7 +68,7 @@ pub struct Spend { #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct SpendPrefixInTransactionV5 { /// A value commitment to the value of the input note. - pub cv: commitment::NotSmallOrderValueCommitment, + pub cv: commitment::ValueCommitment, /// The nullifier of the input note. pub nullifier: note::Nullifier, /// The randomized public key for `spend_auth_sig`. @@ -159,7 +159,7 @@ impl Spend { impl ZcashSerialize for Spend { fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { - self.cv.zcash_serialize(&mut writer)?; + writer.write_all(&self.cv.0.to_bytes())?; self.per_spend_anchor.zcash_serialize(&mut writer)?; writer.write_32_bytes(&self.nullifier.into())?; writer.write_all(&<[u8; 32]>::from(self.rk.clone())[..])?; @@ -202,8 +202,10 @@ impl ZcashDeserialize for Spend { Ok(Spend { // Type is `ValueCommit^{Sapling}.Output`, i.e. J // https://zips.z.cash/protocol/protocol.pdf#abstractcommit - // See [`commitment::NotSmallOrderValueCommitment::zcash_deserialize`]. - cv: commitment::NotSmallOrderValueCommitment::zcash_deserialize(&mut reader)?, + // See [`sapling_crypto::value::ValueCommitment::::zcash_deserialize`]. + cv: commitment::ValueCommitment( + sapling_crypto::value::ValueCommitment::zcash_deserialize(&mut reader)?, + ), // Type is `B^{[ℓ_{Sapling}_{Merkle}]}`, i.e. 32 bytes. // But as mentioned above, we validate it further as an integer. per_spend_anchor: (&mut reader).zcash_deserialize_into()?, @@ -238,7 +240,7 @@ impl ZcashDeserialize for Spend { impl ZcashSerialize for SpendPrefixInTransactionV5 { fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { - self.cv.zcash_serialize(&mut writer)?; + writer.write_all(&self.cv.0.to_bytes())?; writer.write_32_bytes(&self.nullifier.into())?; writer.write_all(&<[u8; 32]>::from(self.rk.clone())[..])?; Ok(()) @@ -257,8 +259,8 @@ impl ZcashDeserialize for SpendPrefixInTransactionV5 { Ok(SpendPrefixInTransactionV5 { // Type is `ValueCommit^{Sapling}.Output`, i.e. J // https://zips.z.cash/protocol/protocol.pdf#abstractcommit - // See [`commitment::NotSmallOrderValueCommitment::zcash_deserialize`]. - cv: commitment::NotSmallOrderValueCommitment::zcash_deserialize(&mut reader)?, + // See [`sapling_crypto::value::ValueCommitment::zcash_deserialize`]. + cv: commitment::ValueCommitment::zcash_deserialize(&mut reader)?, // Type is `B^Y^{[ℓ_{PRFnfSapling}/8]}`, i.e. 32 bytes nullifier: note::Nullifier::from(reader.read_32_bytes()?), // Type is `SpendAuthSig^{Sapling}.Public`, i.e. J diff --git a/zebra-chain/src/sapling/tests.rs b/zebra-chain/src/sapling/tests.rs index 2b8339b2276..a7ac2ac35a7 100644 --- a/zebra-chain/src/sapling/tests.rs +++ b/zebra-chain/src/sapling/tests.rs @@ -1,4 +1,2 @@ mod preallocate; mod prop; -mod test_vectors; -mod tree; diff --git a/zebra-chain/src/sapling/tests/test_vectors.rs b/zebra-chain/src/sapling/tests/test_vectors.rs deleted file mode 100644 index c9188b441ab..00000000000 --- a/zebra-chain/src/sapling/tests/test_vectors.rs +++ /dev/null @@ -1,79 +0,0 @@ -// From https://github.com/zcash/librustzcash/blob/master/zcash_primitives/src/merkle_tree.rs#L585 -pub const HEX_EMPTY_ROOTS: [&str; 33] = [ - "0100000000000000000000000000000000000000000000000000000000000000", - "817de36ab2d57feb077634bca77819c8e0bd298c04f6fed0e6a83cc1356ca155", - "ffe9fc03f18b176c998806439ff0bb8ad193afdb27b2ccbc88856916dd804e34", - "d8283386ef2ef07ebdbb4383c12a739a953a4d6e0d6fb1139a4036d693bfbb6c", - "e110de65c907b9dea4ae0bd83a4b0a51bea175646a64c12b4c9f931b2cb31b49", - "912d82b2c2bca231f71efcf61737fbf0a08befa0416215aeef53e8bb6d23390a", - "8ac9cf9c391e3fd42891d27238a81a8a5c1d3a72b1bcbea8cf44a58ce7389613", - "d6c639ac24b46bd19341c91b13fdcab31581ddaf7f1411336a271f3d0aa52813", - "7b99abdc3730991cc9274727d7d82d28cb794edbc7034b4f0053ff7c4b680444", - "43ff5457f13b926b61df552d4e402ee6dc1463f99a535f9a713439264d5b616b", - "ba49b659fbd0b7334211ea6a9d9df185c757e70aa81da562fb912b84f49bce72", - "4777c8776a3b1e69b73a62fa701fa4f7a6282d9aee2c7a6b82e7937d7081c23c", - "ec677114c27206f5debc1c1ed66f95e2b1885da5b7be3d736b1de98579473048", - "1b77dac4d24fb7258c3c528704c59430b630718bec486421837021cf75dab651", - "bd74b25aacb92378a871bf27d225cfc26baca344a1ea35fdd94510f3d157082c", - "d6acdedf95f608e09fa53fb43dcd0990475726c5131210c9e5caeab97f0e642f", - "1ea6675f9551eeb9dfaaa9247bc9858270d3d3a4c5afa7177a984d5ed1be2451", - "6edb16d01907b759977d7650dad7e3ec049af1a3d875380b697c862c9ec5d51c", - "cd1c8dbf6e3acc7a80439bc4962cf25b9dce7c896f3a5bd70803fc5a0e33cf00", - "6aca8448d8263e547d5ff2950e2ed3839e998d31cbc6ac9fd57bc6002b159216", - "8d5fa43e5a10d11605ac7430ba1f5d81fb1b68d29a640405767749e841527673", - "08eeab0c13abd6069e6310197bf80f9c1ea6de78fd19cbae24d4a520e6cf3023", - "0769557bc682b1bf308646fd0b22e648e8b9e98f57e29f5af40f6edb833e2c49", - "4c6937d78f42685f84b43ad3b7b00f81285662f85c6a68ef11d62ad1a3ee0850", - "fee0e52802cb0c46b1eb4d376c62697f4759f6c8917fa352571202fd778fd712", - "16d6252968971a83da8521d65382e61f0176646d771c91528e3276ee45383e4a", - "d2e1642c9a462229289e5b0e3b7f9008e0301cbb93385ee0e21da2545073cb58", - "a5122c08ff9c161d9ca6fc462073396c7d7d38e8ee48cdb3bea7e2230134ed6a", - "28e7b841dcbc47cceb69d7cb8d94245fb7cb2ba3a7a6bc18f13f945f7dbd6e2a", - "e1f34b034d4a3cd28557e2907ebf990c918f64ecb50a94f01d6fda5ca5c7ef72", - "12935f14b676509b81eb49ef25f39269ed72309238b4c145803544b646dca62d", - "b2eed031d4d6a4f02a097f80b54cc1541d4163c6b6f5971f88b6e41d35c53814", - "fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", -]; - -// From https://github.com/zcash/zcash/blob/master/src/test/data/merkle_commitments_sapling.json -// Byte-reversed from those ones because the original test vectors are loaded using uint256S() -pub const COMMITMENTS: [&str; 16] = [ - "b02310f2e087e55bfd07ef5e242e3b87ee5d00c9ab52f61e6bd42542f93a6f55", - "225747f3b5d5dab4e5a424f81f85c904ff43286e0f3fd07ef0b8c6a627b11458", - "7c3ea01a6e3a3d90cf59cd789e467044b5cd78eb2c84cc6816f960746d0e036c", - "50421d6c2c94571dfaaa135a4ff15bf916681ebd62c0e43e69e3b90684d0a030", - "aaec63863aaa0b2e3b8009429bdddd455e59be6f40ccab887a32eb98723efc12", - "f76748d40d5ee5f9a608512e7954dd515f86e8f6d009141c89163de1cf351a02", - "bc8a5ec71647415c380203b681f7717366f3501661512225b6dc3e121efc0b2e", - "da1adda2ccde9381e11151686c121e7f52d19a990439161c7eb5a9f94be5a511", - "3a27fed5dbbc475d3880360e38638c882fd9b273b618fc433106896083f77446", - "c7ca8f7df8fd997931d33985d935ee2d696856cc09cc516d419ea6365f163008", - "f0fa37e8063b139d342246142fc48e7c0c50d0a62c97768589e06466742c3702", - "e6d4d7685894d01b32f7e081ab188930be6c2b9f76d6847b7f382e3dddd7c608", - "8cebb73be883466d18d3b0c06990520e80b936440a2c9fd184d92a1f06c4e826", - "22fab8bcdb88154dbf5877ad1e2d7f1b541bc8a5ec1b52266095381339c27c03", - "f43e3aac61e5a753062d4d0508c26ceaf5e4c0c58ba3c956e104b5d2cf67c41c", - "3a3661bc12b72646c94bc6c92796e81953985ee62d80a9ec3645a9a95740ac15", -]; - -// Calculated by modifying TestCommitmentTree in -// https://github.com/zcash/librustzcash/blob/master/zcash_primitives/src/merkle_tree.rs -// to compute the full Sapling height root (32). -pub const ROOTS: [&str; 16] = [ - "ee880ed73e96ba0739578c87ba8e6a4bc33b5e63bb98875e6e2f04b214e9fb59", - "321aef631f1a9b7914d40d7bab34c29145ac6cf69d24bf0fc566b33ac9029972", - "ddaa1ab86de5c153993414f34ba97e9674c459dfadde112b89eeeafa0e5a204c", - "0b337c75535b09468955d499e37cb7e2466f1f0c861ddea929aa13c699c1a454", - "5a9b9764d76a45848012eec306d6f6bface319ad5d9bf88db96b3b19edded716", - "004075c72e360d7b2ab113555e97dcf4fb50f211d74841eafb05aaff705e3235", - "ebf2139c2ef10d51f21fee18521963b91b64987f2743d908be2b80b4ae29e622", - "70d07f5662eafaf054327899abce515b1c1cbac6600edea86297c2800e806534", - "f72dad9cd0f4d4783444f6dc64d9be2edc74cffddcb60bf244e56eada508c22a", - "7635d357c7755c91ea4d6b53e8fd42756329118577fe8b9ade3d33b316fa4948", - "fca0c26ce07fc7e563b031d9187f829fa41715f193f08bd0ac25e5122ac75c2e", - "0b727c9c6f66c3c749ef9c1df6c5356db8adf80fcc3c1d7fdf56b82cb8d47a3c", - "d77d030ed3c2521567eae9555b95eca89442b0c263b82fea4359f802e0f31668", - "3d84c8b65e5a8036d115161bb6e3ca2a556e42d376abc3d74a16bc22685b7d61", - "84f752458538a24483e9731e32fa95cabf56aebbbc6bff8475f45299bcdcba35", - "bb3cc8f85773c05f3332a25cc8281a68450a90807cef859b49b2f1d9d2d3a64d", -]; diff --git a/zebra-chain/src/sapling/tests/tree.rs b/zebra-chain/src/sapling/tests/tree.rs deleted file mode 100644 index 37773e01e5d..00000000000 --- a/zebra-chain/src/sapling/tests/tree.rs +++ /dev/null @@ -1,129 +0,0 @@ -use std::sync::Arc; - -use color_eyre::eyre; -use eyre::Result; -use hex::FromHex; - -use crate::block::Block; -use crate::parameters::NetworkUpgrade; -use crate::sapling::{self, tree::*}; -use crate::serialization::ZcashDeserializeInto; -use crate::{parameters::Network, sapling::tests::test_vectors}; - -#[test] -fn empty_roots() { - let _init_guard = zebra_test::init(); - - for i in 0..EMPTY_ROOTS.len() { - assert_eq!( - hex::encode(EMPTY_ROOTS[i]), - // The test vector is in reversed order. - test_vectors::HEX_EMPTY_ROOTS[usize::from(MERKLE_DEPTH) - i] - ); - } -} - -#[test] -fn incremental_roots() { - let _init_guard = zebra_test::init(); - - let mut leaves = vec![]; - - let mut incremental_tree = NoteCommitmentTree::default(); - - for (i, cm_u) in test_vectors::COMMITMENTS.iter().enumerate() { - let bytes = <[u8; 32]>::from_hex(cm_u).unwrap(); - - let cm_u = jubjub::Fq::from_bytes(&bytes).unwrap(); - - leaves.push(cm_u); - - let _ = incremental_tree.append(cm_u); - - assert_eq!(hex::encode(incremental_tree.hash()), test_vectors::ROOTS[i]); - - assert_eq!( - hex::encode((NoteCommitmentTree::from(leaves.clone())).hash()), - test_vectors::ROOTS[i] - ); - } -} - -#[test] -fn incremental_roots_with_blocks() -> Result<()> { - for network in Network::iter() { - incremental_roots_with_blocks_for_network(network)?; - } - Ok(()) -} - -fn incremental_roots_with_blocks_for_network(network: Network) -> Result<()> { - let (blocks, sapling_roots) = network.block_sapling_roots_map(); - - let height = NetworkUpgrade::Sapling - .activation_height(&network) - .unwrap() - .0; - - // Build empty note commitment tree - let mut tree = sapling::tree::NoteCommitmentTree::default(); - - // Load Sapling activation block - let sapling_activation_block = Arc::new( - blocks - .get(&height) - .expect("test vector exists") - .zcash_deserialize_into::() - .expect("block is structurally valid"), - ); - - // Add note commitments from the Sapling activation block to the tree - for transaction in sapling_activation_block.transactions.iter() { - for sapling_note_commitment in transaction.sapling_note_commitments() { - tree.append(*sapling_note_commitment) - .expect("test vector is correct"); - } - } - - // Check if root of the tree of the activation block is correct - let sapling_activation_block_root = - sapling::tree::Root::try_from(**sapling_roots.get(&height).expect("test vector exists"))?; - assert_eq!(sapling_activation_block_root, tree.root()); - - // Load the block immediately after Sapling activation (activation + 1) - let block_after_sapling_activation = Arc::new( - blocks - .get(&(height + 1)) - .expect("test vector exists") - .zcash_deserialize_into::() - .expect("block is structurally valid"), - ); - let block_after_sapling_activation_root = sapling::tree::Root::try_from( - **sapling_roots - .get(&(height + 1)) - .expect("test vector exists"), - )?; - - // Add note commitments from the block after Sapling activatoin to the tree - let mut appended_count = 0; - for transaction in block_after_sapling_activation.transactions.iter() { - for sapling_note_commitment in transaction.sapling_note_commitments() { - tree.append(*sapling_note_commitment) - .expect("test vector is correct"); - appended_count += 1; - } - } - // We also want to make sure that sapling_note_commitments() is returning - // the commitments in the right order. But this will only be actually tested - // if there are more than one note commitment in a block. - // In the test vectors this applies only for the block 1 in mainnet, - // so we make this explicit in this assert. - if network == Network::Mainnet { - assert!(appended_count > 1); - } - - // Check if root of the second block is correct - assert_eq!(block_after_sapling_activation_root, tree.root()); - - Ok(()) -} diff --git a/zebra-chain/src/sapling/tree.rs b/zebra-chain/src/sapling/tree.rs index c5269db5cca..bcd4b0c6559 100644 --- a/zebra-chain/src/sapling/tree.rs +++ b/zebra-chain/src/sapling/tree.rs @@ -17,18 +17,10 @@ use std::{ io, }; -use bitvec::prelude::*; use hex::ToHex; -use incrementalmerkletree::{ - frontier::{Frontier, NonEmptyFrontier}, - Hashable, -}; +use incrementalmerkletree::frontier::{Frontier, NonEmptyFrontier}; -use lazy_static::lazy_static; use thiserror::Error; -use zcash_primitives::merkle_tree::HashSer; - -use super::commitment::pedersen_hashes::pedersen_hash; use crate::{ serialization::{ @@ -43,55 +35,10 @@ use legacy::LegacyNoteCommitmentTree; /// The type that is used to update the note commitment tree. /// /// Unfortunately, this is not the same as `sapling::NoteCommitment`. -pub type NoteCommitmentUpdate = jubjub::Fq; +pub type NoteCommitmentUpdate = sapling_crypto::note::ExtractedNoteCommitment; pub(super) const MERKLE_DEPTH: u8 = 32; -/// MerkleCRH^Sapling Hash Function -/// -/// Used to hash incremental Merkle tree hash values for Sapling. -/// -/// MerkleCRH^Sapling(layer, left, right) := PedersenHash("Zcash_PH", l || left || right) -/// where l = I2LEBSP_6(MerkleDepth^Sapling − 1 − layer) and -/// left, right, and the output are all technically 255 bits (l_MerkleSapling), not 256. -/// -/// -fn merkle_crh_sapling(layer: u8, left: [u8; 32], right: [u8; 32]) -> [u8; 32] { - let mut s = bitvec![u8, Lsb0;]; - - // Prefix: l = I2LEBSP_6(MerkleDepth^Sapling − 1 − layer) - let l = MERKLE_DEPTH - 1 - layer; - s.extend_from_bitslice(&BitSlice::<_, Lsb0>::from_element(&l)[0..6]); - s.extend_from_bitslice(&BitArray::<_, Lsb0>::from(left)[0..255]); - s.extend_from_bitslice(&BitArray::<_, Lsb0>::from(right)[0..255]); - - pedersen_hash(*b"Zcash_PH", &s).to_bytes() -} - -lazy_static! { - /// List of "empty" Sapling note commitment nodes, one for each layer. - /// - /// The list is indexed by the layer number (0: root; MERKLE_DEPTH: leaf). - /// - /// - pub(super) static ref EMPTY_ROOTS: Vec<[u8; 32]> = { - // The empty leaf node. This is layer 32. - let mut v = vec![NoteCommitmentTree::uncommitted()]; - - // Starting with layer 31 (the first internal layer, after the leaves), - // generate the empty roots up to layer 0, the root. - for layer in (0..MERKLE_DEPTH).rev() { - // The vector is generated from the end, pushing new nodes to its beginning. - // For this reason, the layer below is v[0]. - let next = merkle_crh_sapling(layer, v[0], v[0]); - v.insert(0, next); - } - - v - - }; -} - /// Sapling note commitment tree root node hash. /// /// The root hash in LEBS2OSP256(rt) encoding of the Sapling note @@ -193,141 +140,6 @@ impl ZcashDeserialize for Root { } } -/// A node of the Sapling Incremental Note Commitment Tree. -/// -/// Note that it's handled as a byte buffer and not a point coordinate (jubjub::Fq) -/// because that's how the spec handles the MerkleCRH^Sapling function inputs and outputs. -#[derive(Copy, Clone, Eq, PartialEq, Default)] -pub struct Node([u8; 32]); - -impl AsRef<[u8; 32]> for Node { - fn as_ref(&self) -> &[u8; 32] { - &self.0 - } -} - -impl fmt::Display for Node { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(&self.encode_hex::()) - } -} - -impl fmt::Debug for Node { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("sapling::Node") - .field(&self.encode_hex::()) - .finish() - } -} - -impl Node { - /// Return the node bytes in little-endian byte order suitable for printing out byte by byte. - /// - /// `zcashd`'s `z_getsubtreesbyindex` does not reverse the byte order of subtree roots. - pub fn bytes_in_display_order(&self) -> [u8; 32] { - self.0 - } -} - -impl ToHex for &Node { - fn encode_hex>(&self) -> T { - self.bytes_in_display_order().encode_hex() - } - - fn encode_hex_upper>(&self) -> T { - self.bytes_in_display_order().encode_hex_upper() - } -} - -impl ToHex for Node { - fn encode_hex>(&self) -> T { - (&self).encode_hex() - } - - fn encode_hex_upper>(&self) -> T { - (&self).encode_hex_upper() - } -} - -/// Required to serialize [`NoteCommitmentTree`]s in a format matching `zcashd`. -/// -/// Zebra stores Sapling note commitment trees as [`Frontier`]s while the -/// [`z_gettreestate`][1] RPC requires [`CommitmentTree`][2]s. Implementing -/// [`incrementalmerkletree::Hashable`] for [`Node`]s allows the conversion. -/// -/// [1]: https://zcash.github.io/rpc/z_gettreestate.html -/// [2]: incrementalmerkletree::frontier::CommitmentTree -impl HashSer for Node { - fn read(mut reader: R) -> io::Result { - let mut node = [0u8; 32]; - reader.read_exact(&mut node)?; - Ok(Self(node)) - } - - fn write(&self, mut writer: W) -> io::Result<()> { - writer.write_all(self.0.as_ref()) - } -} - -impl Hashable for Node { - fn empty_leaf() -> Self { - Self(NoteCommitmentTree::uncommitted()) - } - - /// Combine two nodes to generate a new node in the given level. - /// Level 0 is the layer above the leaves (layer 31). - /// Level 31 is the root (layer 0). - fn combine(level: incrementalmerkletree::Level, a: &Self, b: &Self) -> Self { - let layer = MERKLE_DEPTH - 1 - u8::from(level); - Self(merkle_crh_sapling(layer, a.0, b.0)) - } - - /// Return the node for the level below the given level. (A quirk of the API) - fn empty_root(level: incrementalmerkletree::Level) -> Self { - let layer_below = usize::from(MERKLE_DEPTH) - usize::from(level); - Self(EMPTY_ROOTS[layer_below]) - } -} - -impl From for Node { - fn from(x: jubjub::Fq) -> Self { - Node(x.into()) - } -} - -impl TryFrom<&[u8]> for Node { - type Error = &'static str; - - fn try_from(bytes: &[u8]) -> Result { - Option::::from(jubjub::Fq::from_bytes( - bytes.try_into().map_err(|_| "wrong byte slice len")?, - )) - .map(Node::from) - .ok_or("invalid jubjub field element") - } -} - -impl serde::Serialize for Node { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - self.0.serialize(serializer) - } -} - -impl<'de> serde::Deserialize<'de> for Node { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let bytes = <[u8; 32]>::deserialize(deserializer)?; - Option::::from(jubjub::Fq::from_bytes(&bytes)) - .map(Node::from) - .ok_or_else(|| serde::de::Error::custom("invalid JubJub field element")) - } -} - #[derive(Error, Copy, Clone, Debug, Eq, PartialEq, Hash)] #[allow(missing_docs)] pub enum NoteCommitmentTreeError { @@ -360,7 +172,7 @@ pub struct NoteCommitmentTree { /// /// /// Note: MerkleDepth^Sapling = MERKLE_DEPTH = 32. - inner: Frontier, + inner: Frontier, /// A cached root of the tree. /// @@ -390,7 +202,7 @@ impl NoteCommitmentTree { /// Returns an error if the tree is full. #[allow(clippy::unwrap_in_result)] pub fn append(&mut self, cm_u: NoteCommitmentUpdate) -> Result<(), NoteCommitmentTreeError> { - if self.inner.append(cm_u.into()) { + if self.inner.append(sapling_crypto::Node::from_cmu(&cm_u)) { // Invalidate cached root let cached_root = self .cached_root @@ -406,7 +218,7 @@ impl NoteCommitmentTree { } /// Returns frontier of non-empty tree, or None. - fn frontier(&self) -> Option<&NonEmptyFrontier> { + fn frontier(&self) -> Option<&NonEmptyFrontier> { self.inner.value() } @@ -551,7 +363,9 @@ impl NoteCommitmentTree { } /// Returns subtree index and root if the most recently appended leaf completes the subtree - pub fn completed_subtree_index_and_root(&self) -> Option<(NoteCommitmentSubtreeIndex, Node)> { + pub fn completed_subtree_index_and_root( + &self, + ) -> Option<(NoteCommitmentSubtreeIndex, sapling_crypto::Node)> { if !self.is_complete_subtree() { return None; } @@ -599,7 +413,7 @@ impl NoteCommitmentTree { /// Calculates and returns the current root of the tree, ignoring any caching. pub fn recalculate_root(&self) -> Root { - Root::try_from(self.inner.root().0).unwrap() + Root::try_from(self.inner.root().to_bytes()).unwrap() } /// Gets the Jubjub-based Pedersen hash of root node of this merkle tree of @@ -698,9 +512,9 @@ impl PartialEq for NoteCommitmentTree { } } -impl From> for NoteCommitmentTree { +impl From> for NoteCommitmentTree { /// Computes the tree from a whole bunch of note commitments at once. - fn from(values: Vec) -> Self { + fn from(values: Vec) -> Self { let mut tree = Self::default(); if values.is_empty() { diff --git a/zebra-chain/src/sapling/tree/legacy.rs b/zebra-chain/src/sapling/tree/legacy.rs index 6fce18337dd..a32204ce1ea 100644 --- a/zebra-chain/src/sapling/tree/legacy.rs +++ b/zebra-chain/src/sapling/tree/legacy.rs @@ -12,8 +12,25 @@ //! [2]: https://github.com/zcash/incrementalmerkletree/blob/incrementalmerkletree-v0.3.1/src/lib.rs use incrementalmerkletree::{frontier::Frontier, Position}; +use serde::Serialize; -use super::{Node, NoteCommitmentTree, Root, MERKLE_DEPTH}; +use super::{serde_helpers, NoteCommitmentTree, Root, MERKLE_DEPTH}; + +/// A serializable version of `sapling_crypto::Node`. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Node(#[serde(with = "serde_helpers::Node")] pub sapling_crypto::Node); + +impl From for Node { + fn from(n: sapling_crypto::Node) -> Self { + Node(n) + } +} + +impl From for sapling_crypto::Node { + fn from(n: Node) -> Self { + n.0 + } +} /// A legacy version of [`NoteCommitmentTree`]. #[derive(Debug, Serialize, Deserialize)] @@ -49,19 +66,23 @@ pub struct LegacyFrontier { pub frontier: Option>, } -impl From> for Frontier { +impl From> for Frontier { fn from(legacy_frontier: LegacyFrontier) -> Self { if let Some(legacy_frontier_data) = legacy_frontier.frontier { - let mut ommers = legacy_frontier_data.ommers; + let mut ommers = legacy_frontier_data + .ommers + .into_iter() + .map(|o| o.0) + .collect::>(); let position = Position::from( u64::try_from(legacy_frontier_data.position.0) .expect("old `usize` always fits in `u64`"), ); let leaf = match legacy_frontier_data.leaf { - LegacyLeaf::Left(a) => a, + LegacyLeaf::Left(a) => a.0, LegacyLeaf::Right(a, b) => { - ommers.insert(0, a); - b + ommers.insert(0, a.0); + b.0 } }; Frontier::from_parts( @@ -76,12 +97,16 @@ impl From> for Frontier { } } -impl From> for LegacyFrontier { - fn from(frontier: Frontier) -> Self { +impl From> for LegacyFrontier { + fn from(frontier: Frontier) -> Self { if let Some(frontier_data) = frontier.value() { - let leaf_from_frontier = *frontier_data.leaf(); - let mut leaf = LegacyLeaf::Left(leaf_from_frontier); - let mut ommers = frontier_data.ommers().to_vec(); + let leaf_from_frontier = Node(*frontier_data.leaf()); + let mut leaf = LegacyLeaf::Left(leaf_from_frontier.clone()); + let mut ommers = frontier_data + .ommers() + .iter() + .map(|o| Node(*o)) + .collect::>(); let position = usize::try_from(u64::from(frontier_data.position())) .expect("new position should fit in a `usize`"); if frontier_data.position().is_right_child() { diff --git a/zebra-chain/src/serialization/constraint.rs b/zebra-chain/src/serialization/constraint.rs index a4ffc565991..5cf6a0eefa6 100644 --- a/zebra-chain/src/serialization/constraint.rs +++ b/zebra-chain/src/serialization/constraint.rs @@ -258,7 +258,7 @@ macro_rules! at_least_one { ($element:expr; $count:expr) => ( { as std::convert::TryInto<$crate::serialization::AtLeastOne<_>>>::try_into( - vec![$element; $expr], + vec![$element; $count], ).expect("at least one element in `AtLeastOne<_>`") } ); @@ -271,3 +271,17 @@ macro_rules! at_least_one { } ); } + +#[cfg(test)] +mod tests { + use super::AtLeastOne; + + #[test] + fn at_least_one_count_form_works() { + let v: AtLeastOne = at_least_one![42; 1]; + assert_eq!(v.as_slice(), [42]); + + let v2: AtLeastOne = at_least_one![5; 2]; + assert_eq!(v2.as_slice(), [5, 5]); + } +} diff --git a/zebra-chain/src/serialization/serde_helpers.rs b/zebra-chain/src/serialization/serde_helpers.rs index cba829839c0..b5a4dd7f718 100644 --- a/zebra-chain/src/serialization/serde_helpers.rs +++ b/zebra-chain/src/serialization/serde_helpers.rs @@ -65,3 +65,54 @@ impl From for pallas::Base { pallas::Base::from_repr(local.bytes).unwrap() } } + +#[derive(Deserialize, Serialize)] +#[serde(remote = "sapling_crypto::value::ValueCommitment")] +pub struct ValueCommitment { + #[serde(getter = "sapling_crypto::value::ValueCommitment::to_bytes")] + bytes: [u8; 32], +} + +impl From for sapling_crypto::value::ValueCommitment { + fn from(local: ValueCommitment) -> Self { + sapling_crypto::value::ValueCommitment::from_bytes_not_small_order(&local.bytes).unwrap() + } +} + +#[derive(Deserialize, Serialize)] +#[serde(remote = "sapling_crypto::note::ExtractedNoteCommitment")] +pub struct SaplingExtractedNoteCommitment { + #[serde(getter = "SaplingExtractedNoteCommitment::as_serializable_bytes")] + bytes: [u8; 32], +} + +impl From for sapling_crypto::note::ExtractedNoteCommitment { + fn from(local: SaplingExtractedNoteCommitment) -> Self { + sapling_crypto::note::ExtractedNoteCommitment::from_bytes(&local.bytes).unwrap() + } +} + +impl SaplingExtractedNoteCommitment { + fn as_serializable_bytes(remote: &sapling_crypto::note::ExtractedNoteCommitment) -> [u8; 32] { + remote.to_bytes() + } +} + +#[derive(Deserialize, Serialize)] +#[serde(remote = "sapling_crypto::Node")] +pub struct Node { + #[serde(getter = "Node::as_serializable_bytes")] + bytes: [u8; 32], +} + +impl From for sapling_crypto::Node { + fn from(local: Node) -> Self { + sapling_crypto::Node::from_bytes(local.bytes).unwrap() + } +} + +impl Node { + fn as_serializable_bytes(remote: &sapling_crypto::Node) -> [u8; 32] { + remote.to_bytes() + } +} diff --git a/zebra-chain/src/transaction.rs b/zebra-chain/src/transaction.rs index 1bbc0cdd2f0..1952782257b 100644 --- a/zebra-chain/src/transaction.rs +++ b/zebra-chain/src/transaction.rs @@ -995,7 +995,9 @@ impl Transaction { } /// Returns the Sapling note commitments in this transaction, regardless of version. - pub fn sapling_note_commitments(&self) -> Box + '_> { + pub fn sapling_note_commitments( + &self, + ) -> Box + '_> { // This function returns a boxed iterator because the different // transaction variants end up having different iterator types match self { diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 010b0dfc730..6f28df866b2 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -77,9 +77,6 @@ proptest-derive = { workspace = true, optional = true } [dev-dependencies] color-eyre = { workspace = true } -# This is a transitive dependency via color-eyre. -# Enable a feature that makes tinyvec compile much faster. -tinyvec = { workspace = true, features = ["rustc_1_55"] } hex = { workspace = true } num-integer = { workspace = true } @@ -94,3 +91,6 @@ tracing-subscriber = { workspace = true } zebra-state = { path = "../zebra-state", version = "2.0.0", features = ["proptest-impl"] } zebra-chain = { path = "../zebra-chain", version = "2.0.0", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/", version = "1.0.1" } + +[lints] +workspace = true diff --git a/zebra-consensus/src/block/tests.rs b/zebra-consensus/src/block/tests.rs index 13f7b03d7ec..898721aedd5 100644 --- a/zebra-consensus/src/block/tests.rs +++ b/zebra-consensus/src/block/tests.rs @@ -135,7 +135,7 @@ async fn check_transcripts() -> Result<(), Report> { let _init_guard = zebra_test::init(); let network = Network::Mainnet; - let state_service = zebra_state::init_test(&network); + let state_service = zebra_state::init_test(&network).await; let transaction = transaction::Verifier::new_for_tests(&network, state_service.clone()); let transaction = Buffer::new(BoxService::new(transaction), 1); diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index e22b0e9754b..03a7bd5b355 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -31,6 +31,7 @@ use zebra_chain::{ amount::{self, DeferredPoolBalanceChange}, block::{self, Block}, parameters::{ + checkpoint::list::CheckpointList, subsidy::{block_subsidy, funding_stream_values, FundingStreamReceiver, SubsidyError}, Network, GENESIS_PREVIOUS_BLOCK_HASH, }, @@ -45,10 +46,9 @@ use crate::{ TargetHeight::{self, *}, }, error::BlockError, - BoxError, ParameterCheckpoint as _, + BoxError, }; -pub(crate) mod list; mod types; #[cfg(test)] @@ -56,8 +56,6 @@ mod tests; pub use zebra_node_services::constants::{MAX_CHECKPOINT_BYTE_COUNT, MAX_CHECKPOINT_HEIGHT_GAP}; -pub use list::CheckpointList; - /// An unverified block, which is in the queue for checkpoint verification. #[derive(Debug)] struct QueuedBlock { @@ -126,7 +124,7 @@ where S::Future: Send + 'static, { /// The checkpoint list for this verifier. - checkpoint_list: CheckpointList, + checkpoint_list: Arc, /// The network rules used by this verifier. network: Network, @@ -240,7 +238,9 @@ where state_service: S, ) -> Result { Ok(Self::from_checkpoint_list( - CheckpointList::from_list(list).map_err(VerifyCheckpointError::CheckpointList)?, + CheckpointList::from_list(list) + .map(Arc::new) + .map_err(VerifyCheckpointError::CheckpointList)?, network, initial_tip, state_service, @@ -255,7 +255,7 @@ where /// Callers should prefer `CheckpointVerifier::new`, which uses the /// hard-coded checkpoint lists. See that function for more details. pub(crate) fn from_checkpoint_list( - checkpoint_list: CheckpointList, + checkpoint_list: Arc, network: &Network, initial_tip: Option<(block::Height, block::Hash)>, state_service: S, diff --git a/zebra-consensus/src/checkpoint/tests.rs b/zebra-consensus/src/checkpoint/tests.rs index e13629c4964..4742a915439 100644 --- a/zebra-consensus/src/checkpoint/tests.rs +++ b/zebra-consensus/src/checkpoint/tests.rs @@ -41,7 +41,7 @@ async fn single_item_checkpoint_list() -> Result<(), Report> { .cloned() .collect(); - let state_service = zebra_state::init_test(&Mainnet); + let state_service = zebra_state::init_test(&Mainnet).await; let mut checkpoint_verifier = CheckpointVerifier::from_list(genesis_checkpoint_list, &Mainnet, None, state_service) .map_err(|e| eyre!(e))?; @@ -120,7 +120,7 @@ async fn multi_item_checkpoint_list() -> Result<(), Report> { .map(|(_block, height, hash)| (*height, *hash)) .collect(); - let state_service = zebra_state::init_test(&Mainnet); + let state_service = zebra_state::init_test(&Mainnet).await; let mut checkpoint_verifier = CheckpointVerifier::from_list(checkpoint_list, &Mainnet, None, state_service) .map_err(|e| eyre!(e))?; @@ -269,7 +269,7 @@ async fn continuous_blockchain( let initial_tip = restart_height.map(|block::Height(height)| { (blockchain[height as usize].1, blockchain[height as usize].2) }); - let state_service = zebra_state::init_test(&Mainnet); + let state_service = zebra_state::init_test(&Mainnet).await; let mut checkpoint_verifier = CheckpointVerifier::from_list( checkpoint_list, &network, @@ -438,7 +438,7 @@ async fn block_higher_than_max_checkpoint_fail() -> Result<(), Report> { .cloned() .collect(); - let state_service = zebra_state::init_test(&Mainnet); + let state_service = zebra_state::init_test(&Mainnet).await; let mut checkpoint_verifier = CheckpointVerifier::from_list(genesis_checkpoint_list, &Mainnet, None, state_service) .map_err(|e| eyre!(e))?; @@ -512,7 +512,7 @@ async fn wrong_checkpoint_hash_fail() -> Result<(), Report> { .cloned() .collect(); - let state_service = zebra_state::init_test(&Mainnet); + let state_service = zebra_state::init_test(&Mainnet).await; let mut checkpoint_verifier = CheckpointVerifier::from_list(genesis_checkpoint_list, &Mainnet, None, state_service) .map_err(|e| eyre!(e))?; @@ -685,7 +685,7 @@ async fn checkpoint_drop_cancel() -> Result<(), Report> { .map(|(_block, height, hash)| (*height, *hash)) .collect(); - let state_service = zebra_state::init_test(&Mainnet); + let state_service = zebra_state::init_test(&Mainnet).await; let mut checkpoint_verifier = CheckpointVerifier::from_list(checkpoint_list, &Mainnet, None, state_service) .map_err(|e| eyre!(e))?; @@ -768,7 +768,7 @@ async fn hard_coded_mainnet() -> Result<(), Report> { Arc::::zcash_deserialize(&zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES[..])?; let hash0 = block0.hash(); - let state_service = zebra_state::init_test(&Mainnet); + let state_service = zebra_state::init_test(&Mainnet).await; // Use the hard-coded checkpoint list let mut checkpoint_verifier = CheckpointVerifier::new(&Network::Mainnet, None, state_service); diff --git a/zebra-consensus/src/lib.rs b/zebra-consensus/src/lib.rs index 8d265ec3334..9cda93b8470 100644 --- a/zebra-consensus/src/lib.rs +++ b/zebra-consensus/src/lib.rs @@ -51,10 +51,7 @@ pub use block::{ subsidy::{funding_streams::funding_stream_address, new_coinbase_script}, Request, VerifyBlockError, MAX_BLOCK_SIGOPS, }; -pub use checkpoint::{ - list::ParameterCheckpoint, CheckpointList, VerifyCheckpointError, MAX_CHECKPOINT_BYTE_COUNT, - MAX_CHECKPOINT_HEIGHT_GAP, -}; +pub use checkpoint::{VerifyCheckpointError, MAX_CHECKPOINT_BYTE_COUNT, MAX_CHECKPOINT_HEIGHT_GAP}; pub use config::Config; pub use error::BlockError; pub use primitives::{ed25519, groth16, halo2, redjubjub, redpallas}; diff --git a/zebra-consensus/src/primitives/groth16.rs b/zebra-consensus/src/primitives/groth16.rs index be5cf3762e6..857bea315da 100644 --- a/zebra-consensus/src/primitives/groth16.rs +++ b/zebra-consensus/src/primitives/groth16.rs @@ -126,7 +126,7 @@ impl Description for Spend { inputs.push(rk_affine.get_u()); inputs.push(rk_affine.get_v()); - let cv_affine = jubjub::AffinePoint::from(self.cv); + let cv_affine = jubjub::AffinePoint::from_bytes(self.cv.0.to_bytes()).unwrap(); inputs.push(cv_affine.get_u()); inputs.push(cv_affine.get_v()); @@ -157,7 +157,7 @@ impl Description for Output { fn primary_inputs(&self) -> Vec { let mut inputs = vec![]; - let cv_affine = jubjub::AffinePoint::from(self.cv); + let cv_affine = jubjub::AffinePoint::from_bytes(self.cv.0.to_bytes()).unwrap(); inputs.push(cv_affine.get_u()); inputs.push(cv_affine.get_v()); @@ -165,7 +165,7 @@ impl Description for Output { inputs.push(epk_affine.get_u()); inputs.push(epk_affine.get_v()); - inputs.push(self.cm_u); + inputs.push(jubjub::Fq::from_bytes(&self.cm_u.to_bytes()).unwrap()); inputs } diff --git a/zebra-consensus/src/router.rs b/zebra-consensus/src/router.rs index 83caa83a712..1b7ce7f60fe 100644 --- a/zebra-consensus/src/router.rs +++ b/zebra-consensus/src/router.rs @@ -16,6 +16,7 @@ use core::fmt; use std::{ future::Future, pin::Pin, + sync::Arc, task::{Context, Poll}, }; @@ -27,7 +28,7 @@ use tracing::{instrument, Instrument, Span}; use zebra_chain::{ block::{self, Height}, - parameters::Network, + parameters::{checkpoint::list::CheckpointList, Network}, }; use zebra_node_services::mempool; @@ -35,9 +36,9 @@ use zebra_state as zs; use crate::{ block::{Request, SemanticBlockVerifier, VerifyBlockError}, - checkpoint::{CheckpointList, CheckpointVerifier, VerifyCheckpointError}, + checkpoint::{CheckpointVerifier, VerifyCheckpointError}, error::TransactionError, - transaction, BoxError, Config, ParameterCheckpoint as _, + transaction, BoxError, Config, }; #[cfg(test)] @@ -390,7 +391,7 @@ where /// Parses the checkpoint list for `network` and `config`. /// Returns the checkpoint list and maximum checkpoint height. -pub fn init_checkpoint_list(config: Config, network: &Network) -> (CheckpointList, Height) { +pub fn init_checkpoint_list(config: Config, network: &Network) -> (Arc, Height) { // TODO: Zebra parses the checkpoint list three times at startup. // Instead, cache the checkpoint list for each `network`. let list = network.checkpoint_list(); diff --git a/zebra-consensus/src/router/tests.rs b/zebra-consensus/src/router/tests.rs index 063cc7394cf..964e2d92375 100644 --- a/zebra-consensus/src/router/tests.rs +++ b/zebra-consensus/src/router/tests.rs @@ -62,7 +62,7 @@ async fn verifiers_from_network( + Clone + 'static, ) { - let state_service = zs::init_test(&network); + let state_service = zs::init_test(&network).await; let ( block_verifier_router, _transaction_verifier, @@ -169,7 +169,7 @@ async fn verify_checkpoint(config: Config) -> Result<(), Report> { _transaction_verifier, _groth16_download_handle, _max_checkpoint_height, - ) = super::init_test(config.clone(), &network, zs::init_test(&network)).await; + ) = super::init_test(config.clone(), &network, zs::init_test(&network).await).await; // Add a timeout layer let block_verifier_router = diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index e66780b4439..5033b778e2d 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -44,6 +44,18 @@ use super::{check, Request, Verifier}; #[cfg(test)] mod prop; +/// Returns the timeout duration for tests, extended when running under coverage +/// instrumentation to account for the performance overhead. +fn test_timeout() -> std::time::Duration { + // Check if we're running under cargo-llvm-cov by looking for its environment variables + if std::env::var("LLVM_COV_FLAGS").is_ok() || std::env::var("CARGO_LLVM_COV").is_ok() { + // Use a 5x longer timeout when running with coverage (150 seconds) + std::time::Duration::from_secs(150) + } else { + std::time::Duration::from_secs(30) + } +} + #[test] fn v5_transactions_basic_check() -> Result<(), Report> { let _init_guard = zebra_test::init(); @@ -2594,7 +2606,7 @@ fn v4_with_sapling_spends() { // Test the transaction verifier let result = timeout( - std::time::Duration::from_secs(30), + test_timeout(), verifier.oneshot(Request::Block { transaction_hash: transaction.hash(), transaction, @@ -2729,7 +2741,7 @@ async fn v5_with_sapling_spends() { assert_eq!( timeout( - std::time::Duration::from_secs(30), + test_timeout(), verifier.oneshot(Request::Block { transaction_hash: tx.hash(), transaction: Arc::new(tx), diff --git a/zebra-consensus/src/transaction/tests/prop.rs b/zebra-consensus/src/transaction/tests/prop.rs index ec530cf4bd4..5a06bda4f78 100644 --- a/zebra-consensus/src/transaction/tests/prop.rs +++ b/zebra-consensus/src/transaction/tests/prop.rs @@ -349,6 +349,9 @@ fn sanitize_transaction_version( Sapling | Blossom | Heartwood | Canopy => 4, // FIXME: Use 6 for Nu7 Nu5 | Nu6 | Nu6_1 | Nu7 => 5, + + #[cfg(zcash_unstable = "zfuture")] + NetworkUpgrade::ZFuture => u8::MAX, } }; diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 167f086214d..a40c7d1b528 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -95,3 +95,6 @@ toml = { workspace = true } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/" } + +[lints] +workspace = true diff --git a/zebra-network/src/address_book.rs b/zebra-network/src/address_book.rs index 6c4bdf5db8a..41a392d1a84 100644 --- a/zebra-network/src/address_book.rs +++ b/zebra-network/src/address_book.rs @@ -700,7 +700,7 @@ impl AddressBook { /// /// # Correctness /// - /// Use [`AddressBook::address_metrics_watcher().borrow()`] in production code, + /// Use [`AddressBook::address_metrics_watcher`] in production code, /// to avoid deadlocks. #[cfg(test)] pub fn address_metrics(&self, now: chrono::DateTime) -> AddressMetrics { @@ -711,7 +711,7 @@ impl AddressBook { /// /// # Correctness /// - /// External callers should use [`AddressBook::address_metrics_watcher().borrow()`] + /// External callers should use [`AddressBook::address_metrics_watcher`] /// in production code, to avoid deadlocks. /// (Using the watch channel receiver does not lock the address book mutex.) fn address_metrics_internal(&self, now: chrono::DateTime) -> AddressMetrics { diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index 8ed3277c204..e52589ec4ce 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -17,7 +17,7 @@ use zebra_chain::{ common::atomic_write, parameters::{ testnet::{ - self, ConfiguredActivationHeights, ConfiguredFundingStreams, + self, ConfiguredActivationHeights, ConfiguredCheckpoints, ConfiguredFundingStreams, ConfiguredLockboxDisbursement, RegtestParameters, }, Magic, Network, NetworkKind, @@ -603,6 +603,8 @@ struct DTestnetParameters { funding_streams: Option>, pre_blossom_halving_interval: Option, lockbox_disbursements: Option>, + #[serde(default)] + checkpoints: ConfiguredCheckpoints, } #[derive(Serialize, Deserialize)] @@ -665,6 +667,11 @@ impl From> for DTestnetParameters { .map(Into::into) .collect(), ), + checkpoints: if params.checkpoints() == testnet::Parameters::default().checkpoints() { + ConfiguredCheckpoints::Default(true) + } else { + params.checkpoints().into() + }, } } } @@ -749,6 +756,8 @@ impl<'de> Deserialize<'de> for Config { pre_nu6_funding_streams, post_nu6_funding_streams, funding_streams, + lockbox_disbursements, + checkpoints, .. }| { let mut funding_streams_vec = funding_streams.unwrap_or_default(); @@ -761,6 +770,8 @@ impl<'de> Deserialize<'de> for Config { RegtestParameters { activation_heights: activation_heights.unwrap_or_default(), funding_streams: Some(funding_streams_vec), + lockbox_disbursements, + checkpoints: Some(checkpoints), } }, ) @@ -783,6 +794,7 @@ impl<'de> Deserialize<'de> for Config { funding_streams, pre_blossom_halving_interval, lockbox_disbursements, + checkpoints, }), ) => { let mut params_builder = testnet::Parameters::build(); @@ -837,13 +849,17 @@ impl<'de> Deserialize<'de> for Config { funding_streams_vec.insert(0, funding_streams); } - params_builder = params_builder.with_funding_streams(funding_streams_vec); + if !funding_streams_vec.is_empty() { + params_builder = params_builder.with_funding_streams(funding_streams_vec); + } if let Some(lockbox_disbursements) = lockbox_disbursements { params_builder = params_builder.with_lockbox_disbursements(lockbox_disbursements); } + params_builder = params_builder.with_checkpoints(checkpoints); + // Return an error if the initial testnet peers includes any of the default initial Mainnet or Testnet // peers and the configured network parameters are incompatible with the default public Testnet. // if !params_builder.is_compatible_with_default_parameters() diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index d9c997dfcfe..1c77cdc70cb 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -24,7 +24,7 @@ use zebra_chain::{ /// A multiplier used to calculate the inbound connection limit for the peer set, /// -/// When it starts up, Zebra opens [`Config.peerset_initial_target_size`] +/// When it starts up, Zebra opens [`crate::Config::peerset_initial_target_size`] /// outbound connections. /// /// Then it opens additional outbound connections as needed for network requests, @@ -33,11 +33,11 @@ use zebra_chain::{ /// The inbound and outbound connection limits are calculated from: /// /// The inbound limit is: -/// `Config.peerset_initial_target_size * INBOUND_PEER_LIMIT_MULTIPLIER`. +/// `crate::Config::peerset_initial_target_size * INBOUND_PEER_LIMIT_MULTIPLIER`. /// (This is similar to `zcashd`'s default inbound limit.) /// /// The outbound limit is: -/// `Config.peerset_initial_target_size * OUTBOUND_PEER_LIMIT_MULTIPLIER`. +/// `crate::Config::peerset_initial_target_size * OUTBOUND_PEER_LIMIT_MULTIPLIER`. /// (This is a bit larger than `zcashd`'s default outbound limit.) /// /// # Security diff --git a/zebra-network/src/lib.rs b/zebra-network/src/lib.rs index 9b61224dc14..3be314d4383 100644 --- a/zebra-network/src/lib.rs +++ b/zebra-network/src/lib.rs @@ -40,7 +40,7 @@ //! //! ## `zebra-network` Structure //! -//! [`zebra-network::init`] is the main entry point for `zebra-network`. +//! [`init`] is the main entry point for `zebra-network`. //! It uses the following services, tasks, and endpoints: //! //! ### Low-Level Network Connections @@ -99,7 +99,7 @@ //! * handles each message as a [`Request`] to the local node //! * sends the [`Response`] to the [`peer::Connection`] //! -//! Note: the inbound service is implemented by the [`zebra-network::init`] caller. +//! Note: the inbound service is implemented by the [`init`] caller. //! //! Peer Inventory Service: //! * tracks gossiped `inv` advertisements for each peer diff --git a/zebra-network/src/peer/connection.rs b/zebra-network/src/peer/connection.rs index 8417a37bf39..28772d1d19e 100644 --- a/zebra-network/src/peer/connection.rs +++ b/zebra-network/src/peer/connection.rs @@ -664,7 +664,7 @@ where /// Consume this `Connection` to form a spawnable future containing its event loop. /// /// `peer_rx` is a channel for receiving Zcash [`Message`]s from the connected peer. - /// The corresponding peer message receiver is [`Connection.peer_tx`]. + /// The corresponding peer message receiver is [`Connection::peer_tx`]. pub async fn run(mut self, mut peer_rx: Rx) where Rx: Stream> + Unpin, diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 50b50145412..6496872dc39 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -50,3 +50,6 @@ reqwest = { workspace = true, features = ["rustls-tls"] } serde = { workspace = true } serde_json = { workspace = true } jsonrpsee-types = { workspace = true } + +[lints] +workspace = true diff --git a/zebra-node-services/src/lib.rs b/zebra-node-services/src/lib.rs index f521a00301b..02242e6fda1 100644 --- a/zebra-node-services/src/lib.rs +++ b/zebra-node-services/src/lib.rs @@ -1,6 +1,6 @@ //! The interfaces of some Zebra node services. -pub mod constants; +pub use zebra_chain::parameters::checkpoint::constants; pub mod mempool; #[cfg(any(test, feature = "rpc-client"))] diff --git a/zebra-node-services/src/mempool.rs b/zebra-node-services/src/mempool.rs index e25b8a77932..32755d60dd0 100644 --- a/zebra-node-services/src/mempool.rs +++ b/zebra-node-services/src/mempool.rs @@ -102,6 +102,9 @@ pub enum Request { /// when too many slots are reserved but unused: /// CheckForVerifiedTransactions, + + /// Request summary statistics from the mempool for `getmempoolinfo`. + QueueStats, } /// A response to a mempool service request. @@ -159,4 +162,16 @@ pub enum Response { /// Confirms that the mempool has checked for recently verified transactions. CheckedForVerifiedTransactions, + + /// Summary statistics for the mempool: count, total size, memory usage, and regtest info. + QueueStats { + /// Number of transactions currently in the mempool + size: usize, + /// Total size in bytes of all transactions + bytes: usize, + /// Estimated memory usage in bytes + usage: usize, + /// Whether all transactions have been fully notified (regtest only) + fully_notified: Option, + }, } diff --git a/zebra-rpc/CHANGELOG.md b/zebra-rpc/CHANGELOG.md index 329805b24f2..7613bb9798e 100644 --- a/zebra-rpc/CHANGELOG.md +++ b/zebra-rpc/CHANGELOG.md @@ -18,6 +18,12 @@ delete this before the release.) changed `Commitments::new()` to take the `final_root` parameter. - Added new arguments to `Orchard::new()` +## [2.0.1] - 2025-08-22 + +### Changed + +- Removed dependency on `protoc` + ## [2.0.0] - 2025-08-07 ### Breaking Changes diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index edad94113cc..247e7ae454e 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -19,6 +19,9 @@ categories = [ "network-programming", ] +# Exclude proto files so crates.io consumers don't need protoc. +exclude = ["*.proto"] + [features] # Production features that activate extra dependencies, or extra features in @@ -83,6 +86,8 @@ zcash_primitives = { workspace = true, features = ["transparent-inputs"] } zcash_protocol = { workspace = true } zcash_transparent = { workspace = true } +sapling-crypto = { workspace = true } + # Test-only feature proptest-impl proptest = { workspace = true, optional = true } @@ -100,6 +105,7 @@ zebra-state = { path = "../zebra-state", version = "2.0.0" } [build-dependencies] tonic-prost-build = { workspace = true } +which = { workspace = true } [dev-dependencies] insta = { workspace = true, features = ["redactions", "json", "ron"] } @@ -123,3 +129,6 @@ zebra-state = { path = "../zebra-state", version = "2.0.0", features = [ ] } zebra-test = { path = "../zebra-test", version = "1.0.1" } + +[lints] +workspace = true diff --git a/zebra-rpc/build.rs b/zebra-rpc/build.rs index 37fa799f637..058af0b5689 100644 --- a/zebra-rpc/build.rs +++ b/zebra-rpc/build.rs @@ -1,15 +1,60 @@ //! Compile proto files -use std::{env, fs, path::PathBuf, process::Command}; +use std::{ + env, fs, + path::{Path, PathBuf}, + process::Command, +}; -const ZALLET_COMMIT: Option<&str> = None; +const ZALLET_COMMIT: Option<&str> = Some("de70e46e37f903de4e182c5a823551b90a5bf80b"); fn main() -> Result<(), Box> { - let out_dir = env::var("OUT_DIR").map(PathBuf::from); - tonic_prost_build::configure() - .type_attribute(".", "#[derive(serde::Deserialize, serde::Serialize)]") - .file_descriptor_set_path(out_dir.unwrap().join("indexer_descriptor.bin")) - .compile_protos(&["proto/indexer.proto"], &[""])?; + build_or_copy_proto()?; + build_zallet_for_qa_tests(); + Ok(()) +} + +fn build_or_copy_proto() -> Result<(), Box> { + const PROTO_FILE_PATH: &str = "proto/indexer.proto"; + + let out_dir = env::var("OUT_DIR") + .map(PathBuf::from) + .expect("requires OUT_DIR environment variable definition"); + let file_names = ["indexer_descriptor.bin", "zebra.indexer.rpc.rs"]; + + let is_proto_file_available = Path::new(PROTO_FILE_PATH).exists(); + let is_protoc_available = env::var_os("PROTOC") + .map(PathBuf::from) + .or_else(|| which::which("protoc").ok()) + .is_some(); + + if is_proto_file_available && is_protoc_available { + tonic_prost_build::configure() + .type_attribute(".", "#[derive(serde::Deserialize, serde::Serialize)]") + .file_descriptor_set_path(out_dir.join("indexer_descriptor.bin")) + .compile_protos(&[PROTO_FILE_PATH], &[""])?; + + for file_name in file_names { + let out_path = out_dir.join(file_name); + let generated_path = format!("proto/__generated__/{file_name}"); + if fs::read_to_string(&out_path).ok() != fs::read_to_string(&generated_path).ok() { + fs::copy(out_path, generated_path)?; + } + } + } else { + for file_name in file_names { + let out_path = out_dir.join(file_name); + let generated_path = format!("proto/__generated__/{file_name}"); + if fs::read_to_string(&out_path).ok() != fs::read_to_string(&generated_path).ok() { + fs::copy(generated_path, out_path)?; + } + } + } + + Ok(()) +} + +fn build_zallet_for_qa_tests() { if env::var_os("ZALLET").is_some() { // The following code will clone the zallet repo and build the binary, // then copy the binary to the project target directory. @@ -65,6 +110,4 @@ fn main() -> Result<(), Box> { ) }); } - - Ok(()) } diff --git a/zebra-rpc/proto/__generated__/indexer_descriptor.bin b/zebra-rpc/proto/__generated__/indexer_descriptor.bin new file mode 100644 index 00000000000..7050145f1c7 Binary files /dev/null and b/zebra-rpc/proto/__generated__/indexer_descriptor.bin differ diff --git a/zebra-rpc/proto/__generated__/zebra.indexer.rpc.rs b/zebra-rpc/proto/__generated__/zebra.indexer.rpc.rs new file mode 100644 index 00000000000..588cae10b4b --- /dev/null +++ b/zebra-rpc/proto/__generated__/zebra.indexer.rpc.rs @@ -0,0 +1,567 @@ +// This file is @generated by prost-build. +/// Used by methods that take no arguments. +#[derive(serde::Deserialize, serde::Serialize)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct Empty {} +/// A block hash and height. +#[derive(serde::Deserialize, serde::Serialize)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockHashAndHeight { + /// The hash of the block in display order. + #[prost(bytes = "vec", tag = "1")] + pub hash: ::prost::alloc::vec::Vec, + /// The height of the block in the chain. + #[prost(uint32, tag = "2")] + pub height: u32, +} +/// An encoded block and its hash. +#[derive(serde::Deserialize, serde::Serialize)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockAndHash { + /// The hash of the block in display order. + #[prost(bytes = "vec", tag = "1")] + pub hash: ::prost::alloc::vec::Vec, + /// The encoded block data. + #[prost(bytes = "vec", tag = "2")] + pub data: ::prost::alloc::vec::Vec, +} +/// Represents a change in the mempool. +#[derive(serde::Deserialize, serde::Serialize)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct MempoolChangeMessage { + /// The type of change that occurred. + #[prost(enumeration = "mempool_change_message::ChangeType", tag = "1")] + pub change_type: i32, + /// The hash of the transaction that changed. + #[prost(bytes = "vec", tag = "2")] + pub tx_hash: ::prost::alloc::vec::Vec, + /// The transaction auth digest. + #[prost(bytes = "vec", tag = "3")] + pub auth_digest: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `MempoolChangeMessage`. +pub mod mempool_change_message { + /// The type of change that occurred. + #[derive(serde::Deserialize, serde::Serialize)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum ChangeType { + /// Represents a transaction being added to the mempool. + Added = 0, + /// Represents a transaction being invalidated and rejected from the mempool. + Invalidated = 1, + /// Represents a transaction being mined into a block on the best chain and + /// removed from the mempool. + Mined = 2, + } + impl ChangeType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Added => "ADDED", + Self::Invalidated => "INVALIDATED", + Self::Mined => "MINED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ADDED" => Some(Self::Added), + "INVALIDATED" => Some(Self::Invalidated), + "MINED" => Some(Self::Mined), + _ => None, + } + } + } +} +/// Generated client implementations. +pub mod indexer_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct IndexerClient { + inner: tonic::client::Grpc, + } + impl IndexerClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl IndexerClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> IndexerClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + IndexerClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Notifies listeners of chain tip changes + pub async fn chain_tip_change( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/zebra.indexer.rpc.Indexer/ChainTipChange", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("zebra.indexer.rpc.Indexer", "ChainTipChange")); + self.inner.server_streaming(req, path, codec).await + } + /// Notifies listeners of new blocks in the non-finalized state. + pub async fn non_finalized_state_change( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/zebra.indexer.rpc.Indexer/NonFinalizedStateChange", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "zebra.indexer.rpc.Indexer", + "NonFinalizedStateChange", + ), + ); + self.inner.server_streaming(req, path, codec).await + } + /// Notifies listeners of mempool changes + pub async fn mempool_change( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/zebra.indexer.rpc.Indexer/MempoolChange", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("zebra.indexer.rpc.Indexer", "MempoolChange")); + self.inner.server_streaming(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod indexer_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with IndexerServer. + #[async_trait] + pub trait Indexer: std::marker::Send + std::marker::Sync + 'static { + /// Server streaming response type for the ChainTipChange method. + type ChainTipChangeStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + std::marker::Send + + 'static; + /// Notifies listeners of chain tip changes + async fn chain_tip_change( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the NonFinalizedStateChange method. + type NonFinalizedStateChangeStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + std::marker::Send + + 'static; + /// Notifies listeners of new blocks in the non-finalized state. + async fn non_finalized_state_change( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the MempoolChange method. + type MempoolChangeStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + std::marker::Send + + 'static; + /// Notifies listeners of mempool changes + async fn mempool_change( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct IndexerServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl IndexerServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for IndexerServer + where + T: Indexer, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/zebra.indexer.rpc.Indexer/ChainTipChange" => { + #[allow(non_camel_case_types)] + struct ChainTipChangeSvc(pub Arc); + impl tonic::server::ServerStreamingService + for ChainTipChangeSvc { + type Response = super::BlockHashAndHeight; + type ResponseStream = T::ChainTipChangeStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::chain_tip_change(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ChainTipChangeSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/zebra.indexer.rpc.Indexer/NonFinalizedStateChange" => { + #[allow(non_camel_case_types)] + struct NonFinalizedStateChangeSvc(pub Arc); + impl tonic::server::ServerStreamingService + for NonFinalizedStateChangeSvc { + type Response = super::BlockAndHash; + type ResponseStream = T::NonFinalizedStateChangeStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::non_finalized_state_change(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = NonFinalizedStateChangeSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/zebra.indexer.rpc.Indexer/MempoolChange" => { + #[allow(non_camel_case_types)] + struct MempoolChangeSvc(pub Arc); + impl tonic::server::ServerStreamingService + for MempoolChangeSvc { + type Response = super::MempoolChangeMessage; + type ResponseStream = T::MempoolChangeStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::mempool_change(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = MempoolChangeSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for IndexerServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "zebra.indexer.rpc.Indexer"; + impl tonic::server::NamedService for IndexerServer { + const NAME: &'static str = SERVICE_NAME; + } +} diff --git a/zebra-rpc/qa/base_config.toml b/zebra-rpc/qa/base_config.toml index 9784c64cc66..55f7dad5e27 100644 --- a/zebra-rpc/qa/base_config.toml +++ b/zebra-rpc/qa/base_config.toml @@ -6,6 +6,9 @@ listen_addr = "127.0.0.1:0" network = "Regtest" max_connections_per_ip = 10 cache_dir = false +crawl_new_peer_interval = "5 seconds" +initial_mainnet_peers = [] +initial_testnet_peers = [] [rpc] listen_addr = "127.0.0.1:0" @@ -17,4 +20,3 @@ cache_dir = "" [network.testnet_parameters.activation_heights] NU5 = 290 NU6 = 291 - diff --git a/zebra-rpc/qa/pull-tester/rpc-tests.py b/zebra-rpc/qa/pull-tester/rpc-tests.py index 0e16376b382..d2351ceaf98 100755 --- a/zebra-rpc/qa/pull-tester/rpc-tests.py +++ b/zebra-rpc/qa/pull-tester/rpc-tests.py @@ -38,12 +38,13 @@ BASE_SCRIPTS= [ # Scripts that are run by the travis build process # Longest test should go first, to favor running tests in parallel - 'reindex.py', 'getmininginfo.py', 'nuparams.py', 'addnode.py', 'wallet.py', - 'feature_nu6.py'] + 'feature_nu6.py', + 'feature_nu6_1.py', + 'feature_backup_non_finalized_state.py'] ZMQ_SCRIPTS = [ # ZMQ test can only be run if bitcoin was built with zmq-enabled. diff --git a/zebra-rpc/qa/rpc-tests/addnode.py b/zebra-rpc/qa/rpc-tests/addnode.py index 93f5370d4ea..c2b74e533e9 100755 --- a/zebra-rpc/qa/rpc-tests/addnode.py +++ b/zebra-rpc/qa/rpc-tests/addnode.py @@ -16,11 +16,7 @@ def __init__(self): self.num_nodes = 3 def setup_network(self, split=False): - args = [ - [False, "tmSRd1r8gs77Ja67Fw1JcdoXytxsyrLTPJm"], - [False, "tmSRd1r8gs77Ja67Fw1JcdoXytxsyrLTPJm"], - [False, "tmSRd1r8gs77Ja67Fw1JcdoXytxsyrLTPJm"] - ] + args = [None, None, None] self.nodes = start_nodes(self.num_nodes , self.options.tmpdir, extra_args=args) # connect all the nodes to each other @@ -28,29 +24,11 @@ def setup_network(self, split=False): connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) self.is_network_split=split - self.sync_all(False) self.nodes[0].generate(1) self.sync_all(False) def run_test(self): - print("checking connections...") - - # As we connected the nodes to each other, they should have, - # at least 4 peers. Poll for that. - # TODO: Move this check to its own function. - timeout_for_connetions = 180 - wait_time = 1 - while timeout_for_connetions > 0: - if (len(self.nodes[0].getpeerinfo()) < 4 or - len(self.nodes[1].getpeerinfo()) < 4 or - len(self.nodes[2].getpeerinfo()) < 4): - timeout_for_connetions -= wait_time - time.sleep(wait_time) - else: - break - assert timeout_for_connetions > 0, "Timeout waiting for connections" - print("Mining blocks...") # Mine a block from node0 diff --git a/zebra-rpc/qa/rpc-tests/feature_backup_non_finalized_state.py b/zebra-rpc/qa/rpc-tests/feature_backup_non_finalized_state.py new file mode 100755 index 00000000000..7725e14c55a --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/feature_backup_non_finalized_state.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, start_nodes +import time + +# Test that Zebra can backup and restore non finalized state +class BackupNonFinalized(BitcoinTestFramework): + + def __init__(self): + super().__init__() + self.num_nodes = 1 + self.cache_behavior = 'clean' + + def setup_network(self): + # Start a node with default configuration + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [None]) + + def run_test(self): + self.nodes[0].generate(10) + # Wait for 5 seconds (`MIN_DURATION_BETWEEN_BACKUP_UPDATES`) plus 1 second for I/O + time.sleep(6) + + # Check that we have 10 blocks + blocks = self.nodes[0].getblockchaininfo()['blocks'] + assert_equal(blocks, 10) + + # Stop the node + self.nodes[0].stop() + time.sleep(1) + + # Restart the node, it should recover the non finalized state + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [None]) + + # The node has recovered the non finalized state + blocks = self.nodes[0].getblockchaininfo()['blocks'] + assert_equal(blocks, 10) + + # Generate more blocks and make sure the blockchain is not stall + self.nodes[0].generate(1) + blocks = self.nodes[0].getblockchaininfo()['blocks'] + assert_equal(blocks, 11) + + self.nodes[0].generate(100) + blocks = self.nodes[0].getblockchaininfo()['blocks'] + assert_equal(blocks, 111) + + # Wait for 5 seconds (`MIN_DURATION_BETWEEN_BACKUP_UPDATES`) plus 1 second for I/O + time.sleep(6) + + # Stop the node + self.nodes[0].stop() + time.sleep(1) + + # Restart the node, it should recover the non finalized state again + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [None]) + + # The node has recovered the non finalized state again + blocks = self.nodes[0].getblockchaininfo()['blocks'] + assert_equal(blocks, 111) + + +if __name__ == '__main__': + BackupNonFinalized().main() + diff --git a/zebra-rpc/qa/rpc-tests/feature_nu6.py b/zebra-rpc/qa/rpc-tests/feature_nu6.py index b1a174422ff..c1314acd8a8 100755 --- a/zebra-rpc/qa/rpc-tests/feature_nu6.py +++ b/zebra-rpc/qa/rpc-tests/feature_nu6.py @@ -5,13 +5,10 @@ from decimal import Decimal -from test_framework.util import ( - assert_equal, - start_nodes, -) +from test_framework.config import ZebraExtraArgs from test_framework.test_framework import BitcoinTestFramework - +from test_framework.util import assert_equal, start_nodes # Check the behaviour of the value pools and funding streams at NU6. # @@ -27,8 +24,8 @@ def __init__(self): self.cache_behavior = 'clean' def setup_network(self): - # Add pre and post NU6 funding streams to the node. - args = [[True, "tmSRd1r8gs77Ja67Fw1JcdoXytxsyrLTPJm"]] + # Add test pre and post NU6 funding streams to the node. + args = ZebraExtraArgs(funding_streams=[pre_nu6_funding_streams(), post_nu6_funding_streams()]), self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args=args) @@ -162,7 +159,6 @@ def assert_value_pools_equals(pool1, pool2): assert_equal(block_subsidy['founders'], Decimal('0')) assert_equal(block_subsidy['fundingstreamstotal'], Decimal('0.625')) assert_equal(block_subsidy['lockboxtotal'], Decimal('0')) - print(block_subsidy) assert_equal(block_subsidy['totalblocksubsidy'], Decimal('3.125')) print("Activating NU6") @@ -249,6 +245,48 @@ def assert_value_pools_equals(pool1, pool2): assert_equal(block_subsidy['lockboxtotal'], Decimal('0')) assert_equal(block_subsidy['totalblocksubsidy'], Decimal('3.125')) +def pre_nu6_funding_streams() : return { + 'recipients': [ + { + 'receiver': 'ECC', + 'numerator': 7, + 'addresses': ['t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz'] + }, + { + 'receiver': 'ZcashFoundation', + 'numerator': 5, + 'addresses': ['t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v'] + }, + { + 'receiver': 'MajorGrants', + 'numerator': 8, + 'addresses': ['t2Gvxv2uNM7hbbACjNox4H6DjByoKZ2Fa3P'] + }, + ], + 'height_range': { + 'start': 290, + 'end': 291 + } +} + +def post_nu6_funding_streams() : return { + 'recipients': [ + { + 'receiver': 'MajorGrants', + 'numerator': 8, + 'addresses': ['t2Gvxv2uNM7hbbACjNox4H6DjByoKZ2Fa3P'] + }, + { + 'receiver': 'Deferred', + 'numerator': 12 + # No addresses field is valid for Deferred + } + ], + 'height_range': { + 'start': 291, + 'end': 293 + } +} if __name__ == '__main__': - PoolsTest().main() \ No newline at end of file + PoolsTest().main() diff --git a/zebra-rpc/qa/rpc-tests/feature_nu6_1.py b/zebra-rpc/qa/rpc-tests/feature_nu6_1.py new file mode 100755 index 00000000000..45d55fd9122 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/feature_nu6_1.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +from decimal import Decimal + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.config import ZebraExtraArgs +from test_framework.util import ( + assert_equal, + start_node, +) + +# Verify the NU6.1 activation block contains the expected lockbox disbursement. +# This is a reduced version (no wallet functionality, no multiple nodes) of: +# https://github.com/zcash/zcash/blob/v6.3.0/qa/rpc-tests/feature_nu6_1.py +class OnetimeLockboxDisbursementTest(BitcoinTestFramework): + + def __init__(self): + super().__init__() + self.num_nodes = 1 + self.cache_behavior = 'clean' + + def start_node_with(self, index, extra_args=[]): + + args = ZebraExtraArgs( + activation_heights={"NU5": 2, "NU6": 4, "NU6.1": 8}, + funding_streams=[pre_nu_6_1_funding_streams(), post_nu_6_1_funding_streams()], + lockbox_disbursements=[{'address': 't26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz', 'amount': Decimal('200_000_000')}] + ) + + return start_node(index, self.options.tmpdir, args) + + def setup_network(self, split=False): + self.nodes = [] + self.nodes.append(self.start_node_with(0)) + + def run_test(self): + + print("Activating NU5") + self.nodes[0].generate(2) + + assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 2) + + fs_lockbox_per_block = Decimal('0.75') + ld_amount = Decimal('2.0') + + print("Reaching block before NU6") + self.nodes[0].generate(1) + assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 3) + + # The lockbox should have zero value. + lastBlock = self.nodes[0].getblock('3') + def check_lockbox(blk, expected): + lockbox = next(elem for elem in blk['valuePools'] if elem['id'] == "lockbox") + assert_equal(Decimal(lockbox['chainValue']), expected) + check_lockbox(lastBlock, Decimal('0.0')) + + print("Activating NU6") + self.nodes[0].generate(1) + assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 4) + + # We should see the lockbox balance increase. + check_lockbox(self.nodes[0].getblock('4'), fs_lockbox_per_block) + + print("Reaching block before NU6.1") + self.nodes[0].generate(3) + assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 7) + + # We should see the lockbox balance increase. + check_lockbox(self.nodes[0].getblock('7'), 4 * fs_lockbox_per_block) + + print("Activating NU6.1") + self.nodes[0].generate(1) + assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 8) + + # We should see the lockbox balance decrease from the disbursement, + # and increase from the FS. + check_lockbox( + self.nodes[0].getblock('8'), + (5 * fs_lockbox_per_block) - ld_amount, + ) + +def pre_nu_6_1_funding_streams() : return { + 'recipients': [ + { + 'receiver': 'MajorGrants', + 'numerator': 8, + 'addresses': ['t2Gvxv2uNM7hbbACjNox4H6DjByoKZ2Fa3P', 't2Gvxv2uNM7hbbACjNox4H6DjByoKZ2Fa3P'] + }, + { + 'receiver': 'Deferred', + 'numerator': 12 + } + ], + 'height_range': { + 'start': 4, + 'end': 8 + } +} + +def post_nu_6_1_funding_streams() : return { + 'recipients': [ + { + 'receiver': 'MajorGrants', + 'numerator': 8, + 'addresses': ['t2Gvxv2uNM7hbbACjNox4H6DjByoKZ2Fa3P', 't2Gvxv2uNM7hbbACjNox4H6DjByoKZ2Fa3P'] + }, + { + 'receiver': 'Deferred', + 'numerator': 12 + } + ], + 'height_range': { + 'start': 8, + 'end': 12 + } +} + +if __name__ == '__main__': + OnetimeLockboxDisbursementTest().main() diff --git a/zebra-rpc/qa/rpc-tests/getmininginfo.py b/zebra-rpc/qa/rpc-tests/getmininginfo.py index a097efd2e64..9ef00b4bb4f 100755 --- a/zebra-rpc/qa/rpc-tests/getmininginfo.py +++ b/zebra-rpc/qa/rpc-tests/getmininginfo.py @@ -18,7 +18,7 @@ def __init__(self): self.cache_behavior = 'clean' def setup_network(self, split=False): - args = [[False, "tmSRd1r8gs77Ja67Fw1JcdoXytxsyrLTPJm"]] + args = [None] self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, args) self.is_network_split = False self.sync_all() diff --git a/zebra-rpc/qa/rpc-tests/nuparams.py b/zebra-rpc/qa/rpc-tests/nuparams.py index fb3f104fdaa..bb6929377b3 100755 --- a/zebra-rpc/qa/rpc-tests/nuparams.py +++ b/zebra-rpc/qa/rpc-tests/nuparams.py @@ -32,7 +32,7 @@ def __init__(self): self.cache_behavior = 'clean' def setup_network(self, split=False): - args = [[False, "tmSRd1r8gs77Ja67Fw1JcdoXytxsyrLTPJm"]] + args = [None] self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, args) self.is_network_split = False diff --git a/zebra-rpc/qa/rpc-tests/reindex.py b/zebra-rpc/qa/rpc-tests/reindex.py deleted file mode 100755 index 4897638eb43..00000000000 --- a/zebra-rpc/qa/rpc-tests/reindex.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers -# Copyright (c) 2017-2022 The Zcash developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or https://www.opensource.org/licenses/mit-license.php . - -# -# Test -reindex and -reindex-chainstate with CheckBlockIndex -# - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, \ - start_node, stop_node, wait_bitcoinds -import time - -class ReindexTest(BitcoinTestFramework): - - def __init__(self): - super().__init__() - self.cache_behavior = 'clean' - self.num_nodes = 1 - - def setup_network(self): - self.nodes = [] - self.is_network_split = False - args = [[False, "tmSRd1r8gs77Ja67Fw1JcdoXytxsyrLTPJm"]] - - self.nodes.append(start_node(0, self.options.tmpdir, args)) - - def reindex(self, justchainstate=False): - # When zebra reindexes, it will only do it up to the finalized chain height. - # This happens after the first 100 blocks, so we need to generate 100 blocks - # for the reindex to be able to catch block 1. - # https://github.com/ZcashFoundation/zebra/issues/9708 - finalized_height = 100 - - self.nodes[0].generate(finalized_height) - blockcount = self.nodes[0].getblockcount() - (finalized_height - 1) - - stop_node(self.nodes[0], 0) - wait_bitcoinds() - - self.nodes[0]=start_node(0, self.options.tmpdir) - - while self.nodes[0].getblockcount() < blockcount: - time.sleep(0.1) - assert_equal(self.nodes[0].getblockcount(), blockcount) - print("Success") - - def run_test(self): - self.reindex(False) - self.reindex(True) - self.reindex(False) - self.reindex(True) - -if __name__ == '__main__': - ReindexTest().main() diff --git a/zebra-rpc/qa/rpc-tests/test_framework/config.py b/zebra-rpc/qa/rpc-tests/test_framework/config.py new file mode 100644 index 00000000000..7f895e13cd0 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/config.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +class ZebraExtraArgs: + defaults = { + "miner_address": "tmSRd1r8gs77Ja67Fw1JcdoXytxsyrLTPJm", + "funding_streams": [], + "activation_heights": {"NU5": 290, "NU6": 291}, + "lockbox_disbursements": [] + } + + def __init__(self, **kwargs): + for key, default in self.defaults.items(): + setattr(self, key, kwargs.get(key, default)) + +class ZebraConfig: + defaults = { + "network_listen_address": "127.0.0.1:0", + "rpc_listen_address": "127.0.0.1:0", + "data_dir": None, + "extra_args": ZebraExtraArgs, + } + + def __init__(self, **kwargs): + for key, default in self.defaults.items(): + setattr(self, key, kwargs.get(key, default)) + + def update(self, config_file): + # Base config updates + config_file['rpc']['listen_addr'] = self.rpc_listen_address + config_file['network']['listen_addr'] = self.network_listen_address + config_file['state']['cache_dir'] = self.data_dir + + # Extra args updates + config_file['mining']['miner_address'] = self.extra_args.miner_address + config_file['network']['testnet_parameters']['funding_streams'] = self.extra_args.funding_streams + config_file['network']['testnet_parameters']['activation_heights'] = self.extra_args.activation_heights + config_file['network']['testnet_parameters']['lockbox_disbursements'] = self.extra_args.lockbox_disbursements + + return config_file diff --git a/zebra-rpc/qa/rpc-tests/test_framework/util.py b/zebra-rpc/qa/rpc-tests/test_framework/util.py index 77729347b2c..6db7ea7e9bb 100644 --- a/zebra-rpc/qa/rpc-tests/test_framework/util.py +++ b/zebra-rpc/qa/rpc-tests/test_framework/util.py @@ -23,12 +23,15 @@ import tarfile import tempfile import time +import toml import re import errno from . import coverage from .proxy import ServiceProxy, JSONRPCException +from test_framework.config import ZebraConfig, ZebraExtraArgs + LEGACY_DEFAULT_FEE = Decimal('0.00001') COVERAGE_DIR = None @@ -198,6 +201,7 @@ def initialize_datadir(dirname, n, clock_offset=0): config_rpc_port = rpc_port(n) config_p2p_port = p2p_port(n) + """ TODO: Can create zebrad base_config here, or remove. with open(os.path.join(datadir, "zcash.conf"), 'w', encoding='utf8') as f: f.write("regtest=1\n") f.write("showmetrics=0\n") @@ -208,72 +212,26 @@ def initialize_datadir(dirname, n, clock_offset=0): f.write("listenonion=0\n") if clock_offset != 0: f.write('clockoffset='+str(clock_offset)+'\n') + """ - update_zebrad_conf(datadir, config_rpc_port, config_p2p_port) + update_zebrad_conf(datadir, config_rpc_port, config_p2p_port, None) return datadir -def update_zebrad_conf(datadir, rpc_port, p2p_port, funding_streams=False, miner_address = "tmSRd1r8gs77Ja67Fw1JcdoXytxsyrLTPJm"): - import toml - +def update_zebrad_conf(datadir, rpc_port, p2p_port, extra_args=None): config_path = zebrad_config(datadir) with open(config_path, 'r') as f: config_file = toml.load(f) - config_file['rpc']['listen_addr'] = '127.0.0.1:'+str(rpc_port) - config_file['network']['listen_addr'] = '127.0.0.1:'+str(p2p_port) - config_file['state']['cache_dir'] = datadir - - config_file['mining']['miner_address'] = miner_address - - # TODO: Add More config options. zcashd uses extra arguments to pass options - # to the binary, but zebrad uses a config file. - # We want to make the config accept different options. - # For now, we hardcode the funding streams to be enabled. - if funding_streams == True: - config_file['network']['testnet_parameters']['pre_nu6_funding_streams'] = { - 'recipients': [ - { - 'receiver': 'ECC', - 'numerator': 7, - 'addresses': ['t26ovBdKAJLtrvBsE2QGF4nqBkEuptuPFZz'] - }, - { - 'receiver': 'ZcashFoundation', - 'numerator': 5, - 'addresses': ['t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v'] - }, - { - 'receiver': 'MajorGrants', - 'numerator': 8, - 'addresses': ['t2Gvxv2uNM7hbbACjNox4H6DjByoKZ2Fa3P'] - }, - ], - 'height_range': { - 'start': 290, - 'end': 291 - } - } - - config_file['network']['testnet_parameters']['post_nu6_funding_streams'] = { - 'recipients': [ - { - 'receiver': 'MajorGrants', - 'numerator': 8, - 'addresses': ['t2Gvxv2uNM7hbbACjNox4H6DjByoKZ2Fa3P'] - }, - { - 'receiver': 'Deferred', - 'numerator': 12 - # No addresses field is valid for Deferred - } - ], - 'height_range': { - 'start': 291, - 'end': 293 - } - } + zebra_config = ZebraConfig( + network_listen_address='127.0.0.1:'+str(p2p_port), + rpc_listen_address='127.0.0.1:'+str(rpc_port), + data_dir=datadir) + + zebra_config.extra_args = extra_args or ZebraExtraArgs() + + config_file = zebra_config.update(config_file) with open(config_path, 'w') as f: toml.dump(config_file, f) @@ -580,7 +538,7 @@ def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary= binary = zcashd_binary() if extra_args is not None: - config = update_zebrad_conf(datadir, rpc_port(i), p2p_port(i), funding_streams = extra_args[i][0], miner_address = extra_args[i][1]) + config = update_zebrad_conf(datadir, rpc_port(i), p2p_port(i), extra_args) else: config = update_zebrad_conf(datadir, rpc_port(i), p2p_port(i)) args = [ binary, "-c="+config, "start" ] @@ -628,7 +586,7 @@ def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None): rpcs = [] try: for i in range(num_nodes): - rpcs.append(start_node(i, dirname, extra_args, rpchost, binary=binary[i])) + rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i])) except: # If one node failed to start, stop the others stop_nodes(rpcs) raise @@ -935,8 +893,6 @@ def start_wallet(i, dirname, extra_args=None, rpchost=None, timewait=None, binar return proxy def update_zallet_conf(datadir, validator_port, zallet_port): - import toml - config_path = zallet_config(datadir) with open(config_path, 'r') as f: @@ -957,7 +913,6 @@ def update_zallet_conf(datadir, validator_port, zallet_port): def stop_wallets(wallets): for wallet in wallets: try: - # TODO: Implement `stop` in zallet: https://github.com/zcash/wallet/issues/153 wallet.stop() except http.client.CannotSendRequest as e: print("WARN: Unable to stop wallet: " + repr(e)) diff --git a/zebra-rpc/qa/rpc-tests/wallet.py b/zebra-rpc/qa/rpc-tests/wallet.py index fa850d1fbaf..6813590b1c2 100755 --- a/zebra-rpc/qa/rpc-tests/wallet.py +++ b/zebra-rpc/qa/rpc-tests/wallet.py @@ -9,6 +9,7 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, start_nodes, start_wallets +from test_framework.config import ZebraExtraArgs # Test that we can create a wallet and use an address from it to mine blocks. class WalletTest (BitcoinTestFramework): @@ -19,7 +20,7 @@ def __init__(self): self.num_nodes = 1 def setup_network(self, split=False): - args = [[False, "tmSRd1r8gs77Ja67Fw1JcdoXytxsyrLTPJm"]] + args = [None] self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, args) # Zallet needs a block to start @@ -47,17 +48,12 @@ def run_test(self): print("Ignoring stopping wallet error: ", e) time.sleep(1) - # Hack for https://github.com/ZcashFoundation/zebra/issues/9708 - # We Stop the wallet which has 1 block, generate 100 blocks in zebra, - # so when restarting Zebra it will have 1 block, just as the wallet. - self.nodes[0].generate(100) - # Stop the node self.nodes[0].stop() time.sleep(1) # Restart the node with the generated address as the miner address - args = [[False, transparent_address]] + args = [ZebraExtraArgs(miner_address=transparent_address)] self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, args) # Restart the wallet diff --git a/zebra-rpc/src/client.rs b/zebra-rpc/src/client.rs index 112dbfaaa77..c30d6a70c39 100644 --- a/zebra-rpc/src/client.rs +++ b/zebra-rpc/src/client.rs @@ -9,6 +9,7 @@ pub use zebra_chain; +#[allow(deprecated)] pub use crate::methods::{ hex_data::HexData, trees::{ @@ -35,8 +36,9 @@ pub use crate::methods::{ validate_address::ValidateAddressResponse, z_validate_address::{ZValidateAddressResponse, ZValidateAddressType}, }, - BlockHeaderObject, BlockObject, GetAddressBalanceRequest, GetAddressBalanceResponse, - GetAddressTxIdsRequest, GetAddressUtxosResponse, GetBlockHashResponse, GetBlockHeaderResponse, + AddressStrings, BlockHeaderObject, BlockObject, GetAddressBalanceRequest, + GetAddressBalanceResponse, GetAddressTxIdsRequest, GetAddressUtxosResponse, + GetAddressUtxosResponseObject, GetBlockHashResponse, GetBlockHeaderResponse, GetBlockHeightAndHashResponse, GetBlockResponse, GetBlockTransaction, GetBlockTrees, GetBlockchainInfoResponse, GetInfoResponse, GetRawTransactionResponse, Hash, SendRawTransactionResponse, Utxo, diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index a06c802d560..0b72497a47c 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -83,7 +83,7 @@ use zebra_chain::{ equihash::Solution, }, }; -use zebra_consensus::{funding_stream_address, ParameterCheckpoint, RouterError}; +use zebra_consensus::{funding_stream_address, RouterError}; use zebra_network::{address_book_peers::AddressBookPeers, PeerSocketAddr}; use zebra_node_services::mempool; use zebra_state::crosslink::{ @@ -119,6 +119,7 @@ use types::{ GetBlockTemplateParameters, GetBlockTemplateResponse, }, get_blockchain_info::GetBlockchainInfoBalance, + get_mempool_info::GetMempoolInfoResponse, get_mining_info::GetMiningInfoResponse, get_raw_mempool::{self, GetRawMempoolResponse}, long_poll::LongPollInput, @@ -533,6 +534,12 @@ pub trait Rpc { #[method(name = "getbestblockheightandhash")] fn get_best_block_height_and_hash(&self) -> Result; + /// Returns details on the active state of the TX memory pool. + /// + /// zcash reference: [`getmempoolinfo`](https://zcash.github.io/rpc/getmempoolinfo.html) + #[method(name = "getmempoolinfo")] + async fn get_mempool_info(&self) -> Result; + /// Returns all transaction ids in the memory pool, as a JSON array. /// /// # Parameters @@ -617,7 +624,7 @@ pub trait Rpc { /// /// # Parameters /// - /// - `params`: (required) Either: + /// - `request`: (required) Either: /// - A single address string (e.g., `"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ"`), or /// - An object with the following named fields: /// - `addresses`: (array of strings, required) The addresses to get transactions from. @@ -639,7 +646,7 @@ pub trait Rpc { /// /// - It is recommended that users call the method with start/end heights such that the response can't be too large. #[method(name = "getaddresstxids")] - async fn get_address_tx_ids(&self, params: GetAddressTxIdsParams) -> Result>; + async fn get_address_tx_ids(&self, request: GetAddressTxIdsRequest) -> Result>; /// Returns all unspent outputs for a list of addresses. /// @@ -649,7 +656,11 @@ pub trait Rpc { /// /// # Parameters /// - /// - `addresses`: (array, required, example={\"addresses\": [\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"]}) The addresses to get outputs from. + /// - `request`: (required) Either: + /// - A single address string (e.g., `"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ"`), or + /// - An object with the following named fields: + /// - `addresses`: (array, required, example=[\"tmYXBYJj1K7vhejSec5osXK2QsGa5MTisUQ\"]) The addresses to get outputs from. + /// - `chaininfo`: (boolean, optional, default=false) Include chain info with results /// /// # Notes /// @@ -658,7 +669,7 @@ pub trait Rpc { #[method(name = "getaddressutxos")] async fn get_address_utxos( &self, - address_strings: AddressStrings, + request: GetAddressUtxosRequest, ) -> Result; /// Stop the running zebrad process. @@ -1409,6 +1420,12 @@ where // TODO: Add a `genesis_block_time()` method on `Network` to use here. .unwrap_or((Height::MIN, 0.0)); + let verification_progress = if network.is_regtest() { + 1.0 + } else { + verification_progress + }; + // `upgrades` object // // Get the network upgrades in height order, like `zcashd`. @@ -2299,6 +2316,33 @@ where .ok_or_misc_error("No blocks in state") } + async fn get_mempool_info(&self) -> Result { + let mut mempool = self.mempool.clone(); + + let response = mempool + .ready() + .and_then(|service| service.call(mempool::Request::QueueStats)) + .await + .map_misc_error()?; + + if let mempool::Response::QueueStats { + size, + bytes, + usage, + fully_notified, + } = response + { + Ok(GetMempoolInfoResponse { + size, + bytes, + usage, + fully_notified, + }) + } else { + unreachable!("unexpected response to QueueStats request") + } + } + async fn get_raw_mempool(&self, verbose: Option) -> Result { #[allow(unused)] let verbose = verbose.unwrap_or(false); @@ -2632,7 +2676,7 @@ where let subtrees = subtrees .values() .map(|subtree| SubtreeRpcData { - root: subtree.root.encode_hex(), + root: subtree.root.to_bytes().encode_hex(), end_height: subtree.end_height, }) .collect(); @@ -2677,9 +2721,7 @@ where } } - async fn get_address_tx_ids(&self, params: GetAddressTxIdsParams) -> Result> { - let request = params.into_request(); - + async fn get_address_tx_ids(&self, request: GetAddressTxIdsRequest) -> Result> { let mut read_state = self.read_state.clone(); let latest_chain_tip = self.latest_chain_tip.clone(); @@ -2689,10 +2731,7 @@ where best_chain_tip_height(&latest_chain_tip)?, )?; - let valid_addresses = AddressStrings { - addresses: request.addresses, - } - .valid_addresses()?; + let valid_addresses = request.valid_addresses()?; let request = zebra_state::ReadRequest::TransactionIdsByAddresses { addresses: valid_addresses, @@ -2733,12 +2772,12 @@ where async fn get_address_utxos( &self, - address_strings: AddressStrings, + utxos_request: GetAddressUtxosRequest, ) -> Result { let mut read_state = self.read_state.clone(); let mut response_utxos = vec![]; - let valid_addresses = address_strings.valid_addresses()?; + let valid_addresses = utxos_request.valid_addresses()?; // get utxos data for addresses let request = zebra_state::ReadRequest::UtxosByAddresses(valid_addresses); @@ -2784,7 +2823,21 @@ where last_output_location = output_location; } - Ok(response_utxos) + if !utxos_request.chain_info { + Ok(GetAddressUtxosResponse::Utxos(response_utxos)) + } else { + let (height, hash) = utxos + .last_height_and_hash() + .ok_or_misc_error("No blocks in state")?; + + Ok(GetAddressUtxosResponse::UtxosAndChainInfo( + GetAddressUtxosResponseObject { + utxos: response_utxos, + hash, + height, + }, + )) + } } fn stop(&self) -> Result { @@ -3917,22 +3970,21 @@ impl GetBlockchainInfoResponse { } } -/// A wrapper type with a list of transparent address strings. -/// -/// This is used for the input parameter of [`RpcServer::get_address_balance`], -/// [`RpcServer::get_address_tx_ids`] and [`RpcServer::get_address_utxos`]. +/// A request for [`RpcServer::get_address_balance`]. #[derive(Clone, Debug, Eq, PartialEq, Hash, serde::Deserialize, serde::Serialize)] -#[serde(from = "DAddressStrings")] -pub struct AddressStrings { +#[serde(from = "DGetAddressBalanceRequest")] +pub struct GetAddressBalanceRequest { /// A list of transparent address strings. addresses: Vec, } -impl From for AddressStrings { - fn from(address_strings: DAddressStrings) -> Self { +impl From for GetAddressBalanceRequest { + fn from(address_strings: DGetAddressBalanceRequest) -> Self { match address_strings { - DAddressStrings::Addresses { addresses } => AddressStrings { addresses }, - DAddressStrings::Address(address) => AddressStrings { + DGetAddressBalanceRequest::Addresses { addresses } => { + GetAddressBalanceRequest { addresses } + } + DGetAddressBalanceRequest::Address(address) => GetAddressBalanceRequest { addresses: vec![address], }, } @@ -3942,7 +3994,7 @@ impl From for AddressStrings { /// An intermediate type used to deserialize [`AddressStrings`]. #[derive(Clone, Debug, Eq, PartialEq, Hash, serde::Deserialize)] #[serde(untagged)] -enum DAddressStrings { +enum DGetAddressBalanceRequest { /// A list of address strings. Addresses { addresses: Vec }, /// A single address string. @@ -3950,33 +4002,19 @@ enum DAddressStrings { } /// A request to get the transparent balance of a set of addresses. -pub type GetAddressBalanceRequest = AddressStrings; - -impl AddressStrings { - /// Creates a new `AddressStrings` given a vector. - pub fn new(addresses: Vec) -> AddressStrings { - AddressStrings { addresses } - } - - /// Creates a new [`AddressStrings`] from a given vector, returns an error if any addresses are incorrect. - #[deprecated( - note = "Use `AddressStrings::new` instead. Validity will be checked by the server." - )] - pub fn new_valid(addresses: Vec) -> Result { - let address_strings = Self { addresses }; - address_strings.clone().valid_addresses()?; - Ok(address_strings) - } +#[deprecated(note = "Use `GetAddressBalanceRequest` instead.")] +pub type AddressStrings = GetAddressBalanceRequest; +trait ValidateAddresses { /// Given a list of addresses as strings: /// - check if provided list have all valid transparent addresses. /// - return valid addresses as a set of `Address`. - pub fn valid_addresses(self) -> Result> { + fn valid_addresses(&self) -> Result> { // Reference for the legacy error code: // let valid_addresses: HashSet
= self - .addresses - .into_iter() + .addresses() + .iter() .map(|address| { address .parse() @@ -3987,12 +4025,30 @@ impl AddressStrings { Ok(valid_addresses) } - /// Given a list of addresses as strings: - /// - check if provided list have all valid transparent addresses. - /// - return valid addresses as a vec of strings. - pub fn valid_address_strings(self) -> Result> { - self.clone().valid_addresses()?; - Ok(self.addresses) + /// Returns string-encoded Zcash addresses in the type implementing this trait. + fn addresses(&self) -> &[String]; +} + +impl ValidateAddresses for GetAddressBalanceRequest { + fn addresses(&self) -> &[String] { + &self.addresses + } +} + +impl GetAddressBalanceRequest { + /// Creates a new `AddressStrings` given a vector. + pub fn new(addresses: Vec) -> GetAddressBalanceRequest { + GetAddressBalanceRequest { addresses } + } + + /// Creates a new [`AddressStrings`] from a given vector, returns an error if any addresses are incorrect. + #[deprecated( + note = "Use `AddressStrings::new` instead. Validity will be checked by the server." + )] + pub fn new_valid(addresses: Vec) -> Result { + let req = Self { addresses }; + req.valid_addresses()?; + Ok(req) } } @@ -4020,6 +4076,59 @@ pub struct GetAddressBalanceResponse { #[deprecated(note = "Use `GetAddressBalanceResponse` instead.")] pub use self::GetAddressBalanceResponse as AddressBalance; +/// Parameters of [`RpcServer::get_address_utxos`] RPC method. +#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize, Getters, new)] +#[serde(from = "DGetAddressUtxosRequest")] +pub struct GetAddressUtxosRequest { + /// A list of addresses to get transactions from. + addresses: Vec, + /// The height to start looking for transactions. + #[serde(default)] + #[serde(rename = "chainInfo")] + chain_info: bool, +} + +impl From for GetAddressUtxosRequest { + fn from(request: DGetAddressUtxosRequest) -> Self { + match request { + DGetAddressUtxosRequest::Single(addr) => GetAddressUtxosRequest { + addresses: vec![addr], + chain_info: false, + }, + DGetAddressUtxosRequest::Object { + addresses, + chain_info, + } => GetAddressUtxosRequest { + addresses, + chain_info, + }, + } + } +} + +/// An intermediate type used to deserialize [`GetAddressUtxosRequest`]. +#[derive(Debug, serde::Deserialize)] +#[serde(untagged)] +enum DGetAddressUtxosRequest { + /// A single address string. + Single(String), + /// A full request object with address list and chainInfo flag. + Object { + /// A list of addresses to get transactions from. + addresses: Vec, + /// The height to start looking for transactions. + #[serde(default)] + #[serde(rename = "chainInfo")] + chain_info: bool, + }, +} + +impl ValidateAddresses for GetAddressUtxosRequest { + fn addresses(&self) -> &[String] { + &self.addresses + } +} + /// A hex-encoded [`ConsensusBranchId`] string. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)] pub struct ConsensusBranchIdHex(#[serde(with = "hex")] ConsensusBranchId); @@ -4539,7 +4648,25 @@ impl Default for GetRawTransactionResponse { } /// Response to a `getaddressutxos` RPC request. -pub type GetAddressUtxosResponse = Vec; +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(untagged)] +pub enum GetAddressUtxosResponse { + /// Response when `chainInfo` is false or not provided. + Utxos(Vec), + /// Response when `chainInfo` is true. + UtxosAndChainInfo(GetAddressUtxosResponseObject), +} + +/// Response to a `getaddressutxos` RPC request, when `chainInfo` is true. +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize, Getters, new)] +pub struct GetAddressUtxosResponseObject { + utxos: Vec, + #[serde(with = "hex")] + #[getter(copy)] + hash: block::Hash, + #[getter(copy)] + height: block::Height, +} /// A UTXO returned by the `getaddressutxos` RPC request. /// @@ -4635,12 +4762,14 @@ impl Utxo { } } -/// A struct to use as parameter of the `getaddresstxids`. +/// Parameters of [`RpcServer::get_address_tx_ids`] RPC method. /// -/// See the notes for the [`Rpc::get_address_tx_ids` method]. +/// See [`RpcServer::get_address_tx_ids`] for more details. #[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize, Getters, new)] +#[serde(from = "DGetAddressTxIdsRequest")] pub struct GetAddressTxIdsRequest { - // A list of addresses to get transactions from. + /// A list of addresses. The RPC method will get transactions IDs that sent or received + /// funds to or from these addresses. addresses: Vec, // The height to start looking for transactions. start: Option, @@ -4669,27 +4798,47 @@ impl GetAddressTxIdsRequest { } } -/// Parameters for the `getaddresstxids` RPC method. +impl From for GetAddressTxIdsRequest { + fn from(request: DGetAddressTxIdsRequest) -> Self { + match request { + DGetAddressTxIdsRequest::Single(addr) => GetAddressTxIdsRequest { + addresses: vec![addr], + start: None, + end: None, + }, + DGetAddressTxIdsRequest::Object { + addresses, + start, + end, + } => GetAddressTxIdsRequest { + addresses, + start, + end, + }, + } + } +} + +/// An intermediate type used to deserialize [`GetAddressTxIdsRequest`]. #[derive(Debug, serde::Deserialize)] #[serde(untagged)] -pub enum GetAddressTxIdsParams { +enum DGetAddressTxIdsRequest { /// A single address string. Single(String), /// A full request object with address list and optional height range. - Object(GetAddressTxIdsRequest), + Object { + /// A list of addresses to get transactions from. + addresses: Vec, + /// The height to start looking for transactions. + start: Option, + /// The height to end looking for transactions. + end: Option, + }, } -impl GetAddressTxIdsParams { - /// Converts the enum into a `GetAddressTxIdsRequest`, normalizing the input format. - pub fn into_request(self) -> GetAddressTxIdsRequest { - match self { - GetAddressTxIdsParams::Single(addr) => GetAddressTxIdsRequest { - addresses: vec![addr], - start: None, - end: None, - }, - GetAddressTxIdsParams::Object(req) => req, - } +impl ValidateAddresses for GetAddressTxIdsRequest { + fn addresses(&self) -> &[String] { + &self.addresses } } diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index bf06aea07d3..d5d1c450258 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -26,7 +26,6 @@ use zebra_chain::{ transparent, value_balance::ValueBalance, }; -use zebra_consensus::ParameterCheckpoint; use zebra_network::address_book_peers::MockAddressBookPeers; use zebra_node_services::mempool; use zebra_state::{BoxError, GetBlockTemplateChainInfo}; @@ -41,7 +40,7 @@ use crate::methods::{ }; use super::super::{ - AddressStrings, GetAddressBalanceResponse, NetworkUpgradeStatus, RpcImpl, RpcServer, + GetAddressBalanceRequest, GetAddressBalanceResponse, NetworkUpgradeStatus, RpcImpl, RpcServer, SendRawTransactionResponse, }; @@ -630,7 +629,7 @@ proptest! { tokio::time::pause(); // Prepare the list of addresses. - let address_strings = AddressStrings { + let address_strings = GetAddressBalanceRequest { addresses: addresses .iter() .map(|address| address.to_string()) @@ -692,7 +691,7 @@ proptest! { runtime.block_on(async move { - let address_strings = AddressStrings { + let address_strings = GetAddressBalanceRequest { addresses: at_least_one_invalid_address, }; diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index d8b30dbdcc9..741bd590e9e 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -31,7 +31,6 @@ use zebra_chain::{ Network::{self, Mainnet}, NetworkKind, NetworkUpgrade, }, - sapling, serialization::{DateTime32, ZcashDeserializeInto}, subtree::NoteCommitmentSubtreeData, transaction::Transaction, @@ -295,7 +294,7 @@ async fn test_rpc_response_data_for_network(network: &Network) { // `getaddressbalance` let get_address_balance = rpc - .get_address_balance(AddressStrings { + .get_address_balance(GetAddressBalanceRequest { addresses: addresses.clone(), }) .await @@ -428,6 +427,29 @@ async fn test_rpc_response_data_for_network(network: &Network) { .expect("We should have a GetBlockHash struct"); snapshot_rpc_getbestblockhash(get_best_block_hash, &settings); + // `getmempoolinfo` + // + // - this RPC method returns mempool stats like size and bytes + // - we simulate a call to the mempool with the `QueueStats` request, + // and respond with mock stats to verify RPC output formatting. + let mempool_req = mempool + .expect_request_that(|request| matches!(request, mempool::Request::QueueStats)) + .map(|responder| { + responder.respond(mempool::Response::QueueStats { + size: 67, + bytes: 32_500, + usage: 41_000, + fully_notified: None, + }); + }); + + let (rsp, _) = futures::join!(rpc.get_mempool_info(), mempool_req); + if let Ok(inner) = rsp { + insta::assert_json_snapshot!("get_mempool_info", inner); + } else { + panic!("getmempoolinfo RPC must return a valid response"); + } + // `getrawmempool` // // - a request to get all mempool transactions will be made by `getrawmempool` behind the scenes. @@ -525,60 +547,63 @@ async fn test_rpc_response_data_for_network(network: &Network) { // `getaddresstxids` let get_address_tx_ids = rpc - .get_address_tx_ids(GetAddressTxIdsParams::Object(GetAddressTxIdsRequest { + .get_address_tx_ids(GetAddressTxIdsRequest { addresses: addresses.clone(), start: Some(1), end: Some(10), - })) + }) .await .expect("We should have a vector of strings"); snapshot_rpc_getaddresstxids_valid("multi_block", get_address_tx_ids, &settings); let get_address_tx_ids = rpc - .get_address_tx_ids(GetAddressTxIdsParams::Object(GetAddressTxIdsRequest { + .get_address_tx_ids(GetAddressTxIdsRequest { addresses: addresses.clone(), start: Some(2), end: Some(2), - })) + }) .await .expect("We should have a vector of strings"); snapshot_rpc_getaddresstxids_valid("single_block", get_address_tx_ids, &settings); let get_address_tx_ids = rpc - .get_address_tx_ids(GetAddressTxIdsParams::Object(GetAddressTxIdsRequest { + .get_address_tx_ids(GetAddressTxIdsRequest { addresses: addresses.clone(), start: Some(3), end: Some(EXCESSIVE_BLOCK_HEIGHT), - })) + }) .await .expect("We should have a vector of strings"); snapshot_rpc_getaddresstxids_valid("excessive_end", get_address_tx_ids, &settings); let get_address_tx_ids = rpc - .get_address_tx_ids(GetAddressTxIdsParams::Object(GetAddressTxIdsRequest { + .get_address_tx_ids(GetAddressTxIdsRequest { addresses: addresses.clone(), start: Some(EXCESSIVE_BLOCK_HEIGHT), end: Some(EXCESSIVE_BLOCK_HEIGHT + 1), - })) + }) .await .expect("We should have a vector of strings"); snapshot_rpc_getaddresstxids_valid("excessive_start", get_address_tx_ids, &settings); let get_address_tx_ids = rpc - .get_address_tx_ids(GetAddressTxIdsParams::Object(GetAddressTxIdsRequest { + .get_address_tx_ids(GetAddressTxIdsRequest { addresses: addresses.clone(), start: Some(2), end: Some(1), - })) + }) .await; snapshot_rpc_getaddresstxids_invalid("end_greater_start", get_address_tx_ids, &settings); // `getaddressutxos` let get_address_utxos = rpc - .get_address_utxos(AddressStrings { addresses }) + .get_address_utxos(GetAddressUtxosRequest::new(addresses, false)) .await .expect("We should have a vector of strings"); - snapshot_rpc_getaddressutxos(get_address_utxos, &settings); + let GetAddressUtxosResponse::Utxos(addresses) = get_address_utxos else { + panic!("We should have a GetAddressUtxosResponse::ChainInfoFalse struct"); + }; + snapshot_rpc_getaddressutxos(addresses, &settings); } async fn test_mocked_rpc_response_data_for_network(network: &Network) { @@ -616,7 +641,7 @@ async fn test_mocked_rpc_response_data_for_network(network: &Network) { // Mock the data for the response. let mut subtrees = BTreeMap::new(); - let subtree_root = sapling::tree::Node::default(); + let subtree_root = sapling_crypto::Node::from_bytes([0; 32]).unwrap(); for i in 0..2u16 { let subtree = NoteCommitmentSubtreeData::new(Height(i.into()), subtree_root); diff --git a/zebra-rpc/src/methods/tests/snapshots/get_mempool_info.snap b/zebra-rpc/src/methods/tests/snapshots/get_mempool_info.snap new file mode 100644 index 00000000000..4d358404385 --- /dev/null +++ b/zebra-rpc/src/methods/tests/snapshots/get_mempool_info.snap @@ -0,0 +1,9 @@ +--- +source: zebra-rpc/src/methods/tests/snapshot.rs +expression: inner +--- +{ + "size": 67, + "bytes": 32500, + "usage": 41000 +} diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index e0ed677fbfd..a8823e7fdd2 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -1246,11 +1246,11 @@ async fn rpc_getaddresstxids_invalid_arguments() { // call the method with an invalid address string let rpc_rsp = rpc - .get_address_tx_ids(GetAddressTxIdsParams::Object(GetAddressTxIdsRequest { + .get_address_tx_ids(GetAddressTxIdsRequest { addresses: vec!["t1invalidaddress".to_owned()], start: Some(1), end: Some(2), - })) + }) .await .unwrap_err(); @@ -1266,11 +1266,11 @@ async fn rpc_getaddresstxids_invalid_arguments() { let start: Option = Some(2); let end: Option = Some(1); let error = rpc - .get_address_tx_ids(GetAddressTxIdsParams::Object(GetAddressTxIdsRequest { + .get_address_tx_ids(GetAddressTxIdsRequest { addresses: addresses.clone(), start, end, - })) + }) .await .unwrap_err(); assert_eq!( @@ -1439,11 +1439,11 @@ async fn rpc_getaddresstxids_response_with( // call the method with valid arguments let addresses = vec![address.to_string()]; let response = rpc - .get_address_tx_ids(GetAddressTxIdsParams::Object(GetAddressTxIdsRequest { + .get_address_tx_ids(GetAddressTxIdsRequest { addresses, start, end, - })) + }) .await .expect("arguments are valid so no error can happen here"); @@ -1516,16 +1516,18 @@ async fn getaddresstxids_single_equals_object_full_range() { let addr_str = address.to_string(); let object_response = rpc - .get_address_tx_ids(GetAddressTxIdsParams::Object(GetAddressTxIdsRequest { + .get_address_tx_ids(GetAddressTxIdsRequest { addresses: vec![addr_str.clone()], start: None, end: None, - })) + }) .await .expect("Object variant should succeed"); + let request = DGetAddressTxIdsRequest::Single(addr_str.clone()); + let single_response = rpc - .get_address_tx_ids(GetAddressTxIdsParams::Single(addr_str)) + .get_address_tx_ids(request.into()) .await .expect("Single variant should succeed"); @@ -1568,7 +1570,10 @@ async fn rpc_getaddressutxos_invalid_arguments() { // call the method with an invalid address string let error = rpc - .get_address_utxos(AddressStrings::new(vec!["t1invalidaddress".to_owned()])) + .get_address_utxos(GetAddressUtxosRequest::new( + vec!["t1invalidaddress".to_owned()], + false, + )) .await .unwrap_err(); @@ -1587,6 +1592,12 @@ async fn rpc_getaddressutxos_response() { .map(|block_bytes| block_bytes.zcash_deserialize_into().unwrap()) .collect(); + // Get the hash of the block at the tip using hardcoded block tip bytes. + // We want to test the RPC response is equal to this hash + let tip_block = blocks.last().unwrap(); + let tip_block_hash = tip_block.hash(); + let tip_block_height = tip_block.coinbase_height().unwrap(); + // get the first transaction of the first block let first_block_first_transaction = &blocks[1].transactions[0]; // get the address, this is always `t3Vz22vK5z2LcKEdg16Yv4FFneEL1zg9ojd` @@ -1621,14 +1632,48 @@ async fn rpc_getaddressutxos_response() { // call the method with a valid address let addresses = vec![address.to_string()]; let response = rpc - .get_address_utxos(AddressStrings::new(addresses)) + .get_address_utxos(GetAddressUtxosRequest::new(addresses, false)) .await .expect("address is valid so no error can happen here"); // there are 10 outputs for provided address + let GetAddressUtxosResponse::Utxos(response) = response else { + panic!("expected GetAddressUtxosResponse::ChainInfoFalse variant"); + }; assert_eq!(response.len(), 10); mempool.expect_no_requests().await; + + // call the method with a valid address, single argument + let response = rpc + .get_address_utxos(DGetAddressUtxosRequest::Single(address.to_string()).into()) + .await + .expect("address is valid so no error can happen here"); + + // there are 10 outputs for provided address + let GetAddressUtxosResponse::Utxos(response) = response else { + panic!("expected GetAddressUtxosResponse::ChainInfoFalse variant"); + }; + assert_eq!(response.len(), 10); + + mempool.expect_no_requests().await; + + // call the method with a valid address, and chainInfo = true + let addresses = vec![address.to_string()]; + let response = rpc + .get_address_utxos(GetAddressUtxosRequest::new(addresses, true)) + .await + .expect("address is valid so no error can happen here"); + + // there are 10 outputs for provided address + let GetAddressUtxosResponse::UtxosAndChainInfo(response) = response else { + panic!("expected GetAddressUtxosResponse::ChainInfoTrue variant"); + }; + assert_eq!(response.utxos().len(), 10); + assert_eq!(response.hash(), tip_block_hash); + assert_eq!(response.height(), tip_block_height); + + mempool.expect_no_requests().await; } #[tokio::test(flavor = "multi_thread")] @@ -1696,7 +1741,7 @@ async fn rpc_getblockcount_empty_state() { // Get a mempool handle let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); // Create an empty state - let (state, read_state, tip, _) = zebra_state::init_test_services(&Mainnet); + let (state, read_state, tip, _) = zebra_state::init_test_services(&Mainnet).await; let (block_verifier_router, _, _, _) = zebra_consensus::router::init_test( zebra_consensus::Config::default(), @@ -1749,7 +1794,7 @@ async fn rpc_getpeerinfo() { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let tfl_service: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let (state, read_state, tip, _) = zebra_state::init_test_services(&Mainnet); + let (state, read_state, tip, _) = zebra_state::init_test_services(&Mainnet).await; let (block_verifier_router, _, _, _) = zebra_consensus::router::init_test( zebra_consensus::Config::default(), @@ -2886,7 +2931,7 @@ async fn rpc_addnode() { ))); let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let (state, read_state, tip, _) = zebra_state::init_test_services(&Mainnet); + let (state, read_state, tip, _) = zebra_state::init_test_services(&Mainnet).await; let (block_verifier_router, _, _, _) = zebra_consensus::router::init_test( zebra_consensus::Config::default(), diff --git a/zebra-rpc/src/methods/types.rs b/zebra-rpc/src/methods/types.rs index b596950b0d7..75a5003eaf7 100644 --- a/zebra-rpc/src/methods/types.rs +++ b/zebra-rpc/src/methods/types.rs @@ -3,6 +3,7 @@ pub mod default_roots; pub mod get_block_template; pub mod get_blockchain_info; +pub mod get_mempool_info; pub mod get_mining_info; pub mod get_raw_mempool; pub mod long_poll; diff --git a/zebra-rpc/src/methods/types/default_roots.rs b/zebra-rpc/src/methods/types/default_roots.rs index 95898062cb6..441e51aac8c 100644 --- a/zebra-rpc/src/methods/types/default_roots.rs +++ b/zebra-rpc/src/methods/types/default_roots.rs @@ -7,7 +7,7 @@ use zebra_chain::block::{ ChainHistoryBlockTxAuthCommitmentHash, ChainHistoryMmrRootHash, }; -/// The block header roots for [`GetBlockTemplate.transactions`]. +/// The block header roots for the transactions in a block template. /// /// If the transactions in the block template are modified, these roots must be recalculated /// [according to the specification](https://zcash.github.io/rpc/getblocktemplate.html). diff --git a/zebra-rpc/src/methods/types/get_block_template.rs b/zebra-rpc/src/methods/types/get_block_template.rs index edc199088e5..3893372da85 100644 --- a/zebra-rpc/src/methods/types/get_block_template.rs +++ b/zebra-rpc/src/methods/types/get_block_template.rs @@ -96,7 +96,7 @@ pub struct BlockTemplateResponse { /// The block commitment for the new block's header. /// - /// Same as [`DefaultRoots.block_commitments_hash`], see that field for details. + /// Same as [`DefaultRoots::block_commitments_hash`], see that field for details. #[serde(rename = "blockcommitmentshash")] #[serde(with = "hex")] #[getter(copy)] @@ -104,7 +104,7 @@ pub struct BlockTemplateResponse { /// Legacy backwards-compatibility header root field. /// - /// Same as [`DefaultRoots.block_commitments_hash`], see that field for details. + /// Same as [`DefaultRoots::block_commitments_hash`], see that field for details. #[serde(rename = "lightclientroothash")] #[serde(with = "hex")] #[getter(copy)] @@ -112,13 +112,13 @@ pub struct BlockTemplateResponse { /// Legacy backwards-compatibility header root field. /// - /// Same as [`DefaultRoots.block_commitments_hash`], see that field for details. + /// Same as [`DefaultRoots::block_commitments_hash`], see that field for details. #[serde(rename = "finalsaplingroothash")] #[serde(with = "hex")] #[getter(copy)] pub(crate) final_sapling_root_hash: ChainHistoryBlockTxAuthCommitmentHash, - /// The block header roots for [`GetBlockTemplate.transactions`]. + /// The block header roots for the transactions in the block template. /// /// If the transactions in the block template are modified, these roots must be recalculated /// [according to the specification](https://zcash.github.io/rpc/getblocktemplate.html). diff --git a/zebra-rpc/src/methods/types/get_block_template/parameters.rs b/zebra-rpc/src/methods/types/get_block_template/parameters.rs index 6b2ab97bf70..a3d72c05837 100644 --- a/zebra-rpc/src/methods/types/get_block_template/parameters.rs +++ b/zebra-rpc/src/methods/types/get_block_template/parameters.rs @@ -7,22 +7,17 @@ use crate::methods::{hex_data::HexData, types::long_poll::LongPollId}; /// Defines whether the RPC method should generate a block template or attempt to validate a block /// proposal. -#[derive(Clone, Debug, serde::Deserialize, serde::Serialize, PartialEq, Eq)] +#[derive(Clone, Debug, Default, serde::Deserialize, serde::Serialize, PartialEq, Eq)] #[serde(rename_all = "lowercase")] pub enum GetBlockTemplateRequestMode { /// Indicates a request for a block template. + #[default] Template, /// Indicates a request to validate block data. Proposal, } -impl Default for GetBlockTemplateRequestMode { - fn default() -> Self { - Self::Template - } -} - /// Valid `capabilities` values that indicate client-side support. #[derive(Clone, Debug, serde::Deserialize, serde::Serialize, PartialEq, Eq)] #[serde(rename_all = "lowercase")] diff --git a/zebra-rpc/src/methods/types/get_mempool_info.rs b/zebra-rpc/src/methods/types/get_mempool_info.rs new file mode 100644 index 00000000000..11fa86942be --- /dev/null +++ b/zebra-rpc/src/methods/types/get_mempool_info.rs @@ -0,0 +1,18 @@ +//! Types used in `getmempoolinfo` RPC method. + +/// Response to a `getmempoolinfo` RPC request. +/// +/// See the notes for the [`Rpc::get_mempool_info` method]. +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)] +pub struct GetMempoolInfoResponse { + /// Current tx count + pub size: usize, + /// Sum of all tx sizes + pub bytes: usize, + /// Total memory usage for the mempool + pub usage: usize, + /// Whether the node has finished notifying all listeners/tests about every transaction currently in the mempool. + /// This key is returned only when the node is running in regtest. + #[serde(skip_serializing_if = "Option::is_none")] + pub fully_notified: Option, +} diff --git a/zebra-rpc/src/methods/types/transaction.rs b/zebra-rpc/src/methods/types/transaction.rs index 62cc9e7b009..1c75c03c6f5 100644 --- a/zebra-rpc/src/methods/types/transaction.rs +++ b/zebra-rpc/src/methods/types/transaction.rs @@ -15,7 +15,7 @@ use zebra_chain::{ orchard, parameters::Network, primitives::ed25519, - sapling::NotSmallOrderValueCommitment, + sapling::ValueCommitment, serialization::ZcashSerialize, transaction::{self, SerializedTransaction, Transaction, UnminedTx, VerifiedUnminedTx}, transparent::Script, @@ -436,8 +436,8 @@ pub struct JoinSplit { pub struct ShieldedSpend { /// Value commitment to the input note. #[serde(with = "hex")] - #[getter(copy)] - cv: NotSmallOrderValueCommitment, + #[getter(skip)] + cv: ValueCommitment, /// Merkle root of the Sapling note commitment tree. #[serde(with = "hex")] #[getter(copy)] @@ -460,13 +460,21 @@ pub struct ShieldedSpend { spend_auth_sig: [u8; 64], } +// We can't use `#[getter(copy)]` as upstream `sapling_crypto::note::ValueCommitment` is not `Copy`. +impl ShieldedSpend { + /// The value commitment to the input note. + pub fn cv(&self) -> ValueCommitment { + self.cv.clone() + } +} + /// A Sapling output of a transaction. #[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize, Getters, new)] pub struct ShieldedOutput { /// Value commitment to the input note. #[serde(with = "hex")] - #[getter(copy)] - cv: NotSmallOrderValueCommitment, + #[getter(skip)] + cv: ValueCommitment, /// The u-coordinate of the note commitment for the output note. #[serde(rename = "cmu", with = "hex")] cm_u: [u8; 32], @@ -484,6 +492,14 @@ pub struct ShieldedOutput { proof: [u8; 192], } +// We can't use `#[getter(copy)]` as upstream `sapling_crypto::note::ValueCommitment` is not `Copy`. +impl ShieldedOutput { + /// The value commitment to the output note. + pub fn cv(&self) -> ValueCommitment { + self.cv.clone() + } +} + /// Object with Orchard-specific information. #[serde_with::serde_as] #[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize, Getters, new)] @@ -682,7 +698,7 @@ impl TransactionObject { let spend_auth_sig: [u8; 64] = spend.spend_auth_sig.into(); ShieldedSpend { - cv: spend.cv, + cv: spend.cv.clone(), anchor, nullifier, rk, @@ -702,7 +718,7 @@ impl TransactionObject { let out_ciphertext: [u8; 80] = output.out_ciphertext.into(); ShieldedOutput { - cv: output.cv, + cv: output.cv.clone(), cm_u, ephemeral_key, enc_ciphertext, diff --git a/zebra-rpc/tests/serialization_tests.rs b/zebra-rpc/tests/serialization_tests.rs index 2a2e0a78ef2..4547a506d40 100644 --- a/zebra-rpc/tests/serialization_tests.rs +++ b/zebra-rpc/tests/serialization_tests.rs @@ -13,26 +13,24 @@ use vectors::{ GET_BLOCK_TEMPLATE_RESPONSE_TEMPLATE, GET_RAW_TRANSACTION_RESPONSE_TRUE, }; -use zebra_rpc::client::{ - zebra_chain::{ - sapling::NotSmallOrderValueCommitment, - serialization::{ZcashDeserialize, ZcashSerialize}, - subtree::NoteCommitmentSubtreeIndex, - transparent::{OutputIndex, Script}, - work::difficulty::{CompactDifficulty, ExpandedDifficulty}, - }, - GetBlockchainInfoBalance, JoinSplit, OrchardFlags, +use zebra_rpc::client::zebra_chain::{ + sapling::ValueCommitment, + serialization::{ZcashDeserialize, ZcashSerialize}, + subtree::NoteCommitmentSubtreeIndex, + transparent::{OutputIndex, Script}, + work::difficulty::{CompactDifficulty, ExpandedDifficulty}, }; use zebra_rpc::client::{ BlockHeaderObject, BlockObject, BlockTemplateResponse, Commitments, DefaultRoots, FundingStream, GetAddressBalanceRequest, GetAddressBalanceResponse, GetAddressTxIdsRequest, - GetAddressUtxosResponse, GetBlockHashResponse, GetBlockHeaderResponse, - GetBlockHeightAndHashResponse, GetBlockResponse, GetBlockSubsidyResponse, - GetBlockTemplateParameters, GetBlockTemplateRequestMode, GetBlockTemplateResponse, - GetBlockTransaction, GetBlockTrees, GetBlockchainInfoResponse, GetInfoResponse, - GetMiningInfoResponse, GetPeerInfoResponse, GetRawMempoolResponse, GetRawTransactionResponse, - GetSubtreesByIndexResponse, GetTreestateResponse, Hash, Input, MempoolObject, Orchard, - OrchardAction, Output, PeerInfo, ScriptPubKey, ScriptSig, SendRawTransactionResponse, + GetAddressUtxosResponse, GetAddressUtxosResponseObject, GetBlockHashResponse, + GetBlockHeaderResponse, GetBlockHeightAndHashResponse, GetBlockResponse, + GetBlockSubsidyResponse, GetBlockTemplateParameters, GetBlockTemplateRequestMode, + GetBlockTemplateResponse, GetBlockTransaction, GetBlockTrees, GetBlockchainInfoBalance, + GetBlockchainInfoResponse, GetInfoResponse, GetMiningInfoResponse, GetPeerInfoResponse, + GetRawMempoolResponse, GetRawTransactionResponse, GetSubtreesByIndexResponse, + GetTreestateResponse, Hash, Input, JoinSplit, MempoolObject, Orchard, OrchardAction, + OrchardFlags, Output, PeerInfo, ScriptPubKey, ScriptSig, SendRawTransactionResponse, ShieldedOutput, ShieldedSpend, SubmitBlockErrorResponse, SubmitBlockResponse, SubtreeRpcData, TransactionObject, TransactionTemplate, Treestate, Utxo, ValidateAddressResponse, ZListUnifiedReceiversResponse, ZValidateAddressResponse, @@ -749,8 +747,7 @@ fn test_get_raw_transaction_true() -> Result<(), Box> { let proof = spend.proof(); let spend_auth_sig = spend.spend_auth_sig(); ShieldedSpend::new( - NotSmallOrderValueCommitment::zcash_deserialize(Cursor::new(cv)) - .expect("was just serialized"), + ValueCommitment::zcash_deserialize(Cursor::new(cv)).expect("was just serialized"), anchor, nullifier, rk, @@ -770,8 +767,7 @@ fn test_get_raw_transaction_true() -> Result<(), Box> { let out_ciphertext = output.out_ciphertext(); let proof = output.proof(); ShieldedOutput::new( - NotSmallOrderValueCommitment::zcash_deserialize(Cursor::new(cv)) - .expect("was just serialized"), + ValueCommitment::zcash_deserialize(Cursor::new(cv)).expect("was just serialized"), cm_u, ephemeral_key, enc_ciphertext, @@ -890,7 +886,7 @@ fn test_get_address_tx_ids() -> Result<(), Box> { } #[test] -fn test_get_address_utxos() -> Result<(), Box> { +fn test_get_address_utxos_chain_info_false() -> Result<(), Box> { let json = r#" [ { @@ -905,6 +901,10 @@ fn test_get_address_utxos() -> Result<(), Box> { "#; let obj: GetAddressUtxosResponse = serde_json::from_str(json)?; + let GetAddressUtxosResponse::Utxos(obj) = &obj else { + panic!("Expected ChainInfoFalse variant"); + }; + let new_obj = obj .iter() .map(|utxo| { @@ -930,7 +930,68 @@ fn test_get_address_utxos() -> Result<(), Box> { }) .collect::>(); - assert_eq!(obj, new_obj); + assert_eq!(obj.clone(), new_obj); + + Ok(()) +} + +#[test] +fn test_get_address_utxos_chain_info_true() -> Result<(), Box> { + let json = r#" +{ + "utxos": [ + { + "address": "t1at7nVNsv6taLRrNRvnQdtfLNRDfsGc3Ak", + "txid": "6ee3e8a86dfeca629aeaf794aacb714db1cf1868bc9fe487de443e6197d8764a", + "outputIndex": 0, + "script": "76a914ba92ff06081d5ff6542af8d3b2d209d29ba6337c88ac", + "satoshis": 125000000, + "height": 2931856 + } + ], + "hash": "000000000079a1a696c9d2073ec4cd8729d2a59bbb26999263cbaab992e09280", + "height": 3053274 +} +"#; + let obj: GetAddressUtxosResponse = serde_json::from_str(json)?; + + let GetAddressUtxosResponse::UtxosAndChainInfo(obj) = &obj else { + panic!("Expected ChainInfoTrue variant"); + }; + + let hash = obj.hash(); + let height = obj.height(); + + let new_obj = GetAddressUtxosResponseObject::new( + obj.utxos() + .iter() + .map(|utxo| { + // Address extractability was checked manually + let address = utxo.address().clone(); + // Hash extractability was checked in other test + let txid = utxo.txid(); + let output_index = utxo.output_index().index(); + // Script extractability was checked in other test + let script = utxo.script().clone(); + let satoshis = utxo.satoshis(); + // Height extractability was checked in other test + let height = utxo.height(); + + Utxo::new( + address, + txid, + OutputIndex::from_index(output_index), + script, + satoshis, + height, + ) + }) + .collect::>(), + hash, + height, + ); + + assert_eq!(obj.clone(), new_obj); Ok(()) } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index d208ac28acf..1a71c9f35d7 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -32,3 +32,6 @@ thiserror = { workspace = true } hex = { workspace = true } lazy_static = { workspace = true } zebra-test = { path = "../zebra-test", version = "1.0.1" } + +[lints] +workspace = true diff --git a/zebra-script/src/lib.rs b/zebra-script/src/lib.rs index bf3a7ac009a..2db88ec40af 100644 --- a/zebra-script/src/lib.rs +++ b/zebra-script/src/lib.rs @@ -96,7 +96,7 @@ pub struct CachedFfiTransaction { } impl CachedFfiTransaction { - /// Construct a `PrecomputedTransaction` from a `Transaction` and the outputs + /// Construct a `CachedFfiTransaction` from a `Transaction` and the outputs /// from previous transactions that match each input in the transaction /// being verified. pub fn new( diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 0c4ce605d0b..6f40e263e24 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -66,10 +66,12 @@ tempfile = { workspace = true } thiserror = { workspace = true } rayon = { workspace = true } -tokio = { workspace = true, features = ["rt-multi-thread", "sync", "tracing"] } +tokio = { workspace = true, features = ["rt-multi-thread", "sync", "tracing", "macros", "time"] } tower = { workspace = true, features = ["buffer", "util"] } tracing = { workspace = true } +sapling-crypto = { workspace = true } + # elasticsearch specific dependencies. # Security: avoid default dependency on openssl elasticsearch = { workspace = true, features = ["rustls-tls"], optional = true } @@ -84,12 +86,10 @@ howudoin = { workspace = true, optional = true } zebra-test = { path = "../zebra-test/", version = "1.0.1", optional = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } +derive-getters.workspace = true [dev-dependencies] color-eyre = { workspace = true } -# This is a transitive dependency via color-eyre. -# Enable a feature that makes tinyvec compile much faster. -tinyvec = { workspace = true, features = ["rustc_1_55"] } once_cell = { workspace = true } spandoc = { workspace = true } @@ -108,3 +108,6 @@ tokio = { workspace = true, features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", version = "2.0.0", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/", version = "1.0.1" } + +[lints] +workspace = true diff --git a/zebra-state/src/arbitrary.rs b/zebra-state/src/arbitrary.rs index 5cf9e588c82..1dc9b7ce33d 100644 --- a/zebra-state/src/arbitrary.rs +++ b/zebra-state/src/arbitrary.rs @@ -24,21 +24,7 @@ pub trait Prepare { impl Prepare for Arc { fn prepare(self) -> SemanticallyVerifiedBlock { - let block = self; - let hash = block.hash(); - let height = block.coinbase_height().unwrap(); - let transaction_hashes: Arc<[_]> = block.transactions.iter().map(|tx| tx.hash()).collect(); - let new_outputs = - transparent::new_ordered_outputs_with_height(&block, height, &transaction_hashes); - - SemanticallyVerifiedBlock { - block, - hash, - height, - new_outputs, - transaction_hashes, - deferred_pool_balance_change: None, - } + self.into() } } diff --git a/zebra-state/src/config.rs b/zebra-state/src/config.rs index dd4da087ddb..6ac48432e86 100644 --- a/zebra-state/src/config.rs +++ b/zebra-state/src/config.rs @@ -82,6 +82,17 @@ pub struct Config { /// [`cache_dir`]: struct.Config.html#structfield.cache_dir pub ephemeral: bool, + /// Whether to cache non-finalized blocks on disk to be restored when Zebra restarts. + /// + /// Set to `true` by default. If this is set to `false`, Zebra will irrecoverably drop + /// non-finalized blocks when the process exits and will have to re-download them from + /// the network when it restarts, if those blocks are still available in the network. + /// + /// Note: The non-finalized state will be written to a backup cache once per 5 seconds at most. + /// If blocks are added to the non-finalized state more frequently, the backup may not reflect + /// Zebra's last non-finalized state before it shut down. + pub should_backup_non_finalized_state: bool, + /// Whether to delete the old database directories when present. /// /// Set to `true` by default. If this is set to `false`, @@ -148,6 +159,20 @@ impl Config { } } + /// Returns the path for the non-finalized state backup directory, based on the network. + /// Non-finalized state backup files are encoded in the network protocol format and remain + /// valid across db format upgrades. + pub fn non_finalized_state_backup_dir(&self, network: &Network) -> Option { + if self.ephemeral || !self.should_backup_non_finalized_state { + // Ephemeral databases are intended to be irrecoverable across restarts and don't + // require a backup for the non-finalized state. + return None; + } + + let net_dir = network.lowercase_name(); + Some(self.cache_dir.join("non_finalized_state").join(net_dir)) + } + /// Returns the path for the database format minor/patch version file, /// based on the kind, major version and network. pub fn version_file_path( @@ -177,6 +202,7 @@ impl Default for Config { Self { cache_dir: default_cache_dir(), ephemeral: false, + should_backup_non_finalized_state: true, delete_old_database: true, debug_stop_at_height: None, debug_validity_check_interval: None, diff --git a/zebra-state/src/error.rs b/zebra-state/src/error.rs index 7c1f6f0ccbe..247c161e745 100644 --- a/zebra-state/src/error.rs +++ b/zebra-state/src/error.rs @@ -44,7 +44,7 @@ pub type BoxError = Box; /// An error describing the reason a semantically verified block could not be committed to the state. #[derive(Debug, Clone, Error, PartialEq, Eq)] #[error("block is not contextually valid: {}", .0)] -pub struct CommitSemanticallyVerifiedError(#[from] ValidateContextError); +pub struct CommitSemanticallyVerifiedError(#[from] Box); /// An error describing the reason a block or its descendants could not be reconsidered after /// potentially being invalidated from the chain_set. @@ -60,7 +60,7 @@ pub enum ReconsiderError { InvalidatedBlocksEmpty, #[error("{0}")] - ValidationError(#[from] ValidateContextError), + ValidationError(#[from] Box), } /// An error describing why a block failed contextual validation. @@ -236,8 +236,8 @@ pub enum ValidateContextError { #[non_exhaustive] AddValuePool { value_balance_error: ValueBalanceError, - chain_value_pools: ValueBalance, - block_value_pool_change: ValueBalance, + chain_value_pools: Box>, + block_value_pool_change: Box>, height: Option, }, @@ -286,7 +286,7 @@ pub enum ValidateContextError { transaction_hash: transaction::Hash, }, - #[error("block hash {block_hash} has already been sent to be commited to the state")] + #[error("block hash {block_hash} has already been sent to be committed to the state")] #[non_exhaustive] DuplicateCommitRequest { block_hash: block::Hash }, diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index ab940e573e4..1c811ba65fe 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -59,7 +59,7 @@ pub use service::{ finalized_state::FinalizedState, init, init_read_only, non_finalized_state::NonFinalizedState, - spawn_init, spawn_init_read_only, + spawn_init_read_only, watch_receiver::WatchReceiver, OutputLocation, TransactionIndex, TransactionLocation, }; diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 468beb32f50..3989dd7dbd8 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -331,7 +331,7 @@ impl Treestate { sprout: Arc, sapling: Arc, orchard: Arc, - sapling_subtree: Option>, + sapling_subtree: Option>, orchard_subtree: Option>, history_tree: Arc, ) -> Self { diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index 192787f87e8..48cd9bbe1c6 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -231,7 +231,7 @@ impl NonFinalizedBlocksListener { for new_block_with_hash in new_blocks { if sender.send(new_block_with_hash).await.is_err() { - tracing::debug!("non-finalized state change receiver closed, ending task"); + tracing::debug!("non-finalized blocks receiver closed, ending task"); return; } } @@ -240,7 +240,10 @@ impl NonFinalizedBlocksListener { // Wait for the next update to the non-finalized state if let Err(error) = non_finalized_state_receiver.changed().await { - warn!(?error, "non-finalized state receiver closed, ending task"); + warn!( + ?error, + "non-finalized state receiver closed, is Zebra shutting down?" + ); break; } } @@ -360,7 +363,7 @@ pub enum ReadResponse { /// Response to [`ReadRequest::SaplingSubtrees`] with the specified Sapling note commitment /// subtrees. SaplingSubtrees( - BTreeMap>, + BTreeMap>, ), /// Response to [`ReadRequest::OrchardSubtrees`] with the specified Orchard note commitment diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 9e8e207fa79..e040aa4047b 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -25,7 +25,7 @@ use std::{ }; use futures::future::FutureExt; -use tokio::sync::{oneshot, watch}; +use tokio::sync::oneshot; use tower::{util::BoxService, Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; @@ -234,9 +234,9 @@ impl Drop for StateService { self.clear_finalized_block_queue( "dropping the state: dropped unused finalized state queue block", ); - self.clear_non_finalized_block_queue(CommitSemanticallyVerifiedError::from( + self.clear_non_finalized_block_queue(CommitSemanticallyVerifiedError::from(Box::new( ValidateContextError::DroppedUnusedBlock, - )); + ))); // Log database metrics before shutting down info!("dropping the state: logging database metrics"); @@ -296,43 +296,81 @@ impl StateService { /// /// Returns the read-write and read-only state services, /// and read-only watch channels for its best chain tip. - pub fn new( + pub async fn new( config: Config, network: &Network, max_checkpoint_height: block::Height, checkpoint_verify_concurrency_limit: usize, ) -> (Self, ReadStateService, LatestChainTip, ChainTipChange) { - let timer = CodeTimer::start(); - let finalized_state = FinalizedState::new( - &config, - network, - #[cfg(feature = "elasticsearch")] - true, - ); - timer.finish(module_path!(), line!(), "opening finalized state database"); + let (finalized_state, finalized_tip, timer) = { + let config = config.clone(); + let network = network.clone(); + tokio::task::spawn_blocking(move || { + let timer = CodeTimer::start(); + let finalized_state = FinalizedState::new( + &config, + &network, + #[cfg(feature = "elasticsearch")] + true, + ); + timer.finish(module_path!(), line!(), "opening finalized state database"); - let timer = CodeTimer::start(); - let initial_tip = finalized_state - .db - .tip_block() + let timer = CodeTimer::start(); + let finalized_tip = finalized_state.db.tip_block(); + + (finalized_state, finalized_tip, timer) + }) + .await + .expect("failed to join blocking task") + }; + + // # Correctness + // + // The state service must set the finalized block write sender to `None` + // if there are blocks in the restored non-finalized state that are above + // the max checkpoint height so that non-finalized blocks can be written, otherwise, + // Zebra will be unable to commit semantically verified blocks, and its chain sync will stall. + // + // The state service must not set the finalized block write sender to `None` if there + // aren't blocks in the restored non-finalized state that are above the max checkpoint height, + // otherwise, unless checkpoint sync is disabled in the zebra-consensus configuration, + // Zebra will be unable to commit checkpoint verified blocks, and its chain sync will stall. + let is_finalized_tip_past_max_checkpoint = if let Some(tip) = &finalized_tip { + tip.coinbase_height().expect("valid block must have height") >= max_checkpoint_height + } else { + false + }; + let (non_finalized_state, non_finalized_state_sender, non_finalized_state_receiver) = + NonFinalizedState::new(network) + .with_backup( + config.non_finalized_state_backup_dir(network), + &finalized_state.db, + is_finalized_tip_past_max_checkpoint, + ) + .await; + + let non_finalized_block_write_sent_hashes = SentHashes::new(&non_finalized_state); + let initial_tip = non_finalized_state + .best_tip_block() + .map(|cv_block| cv_block.block.clone()) + .or(finalized_tip) .map(CheckpointVerifiedBlock::from) .map(ChainTipBlock::from); + tracing::info!(chain_tip = ?initial_tip.as_ref().map(|tip| (tip.hash, tip.height)), "loaded Zebra state cache"); + let (chain_tip_sender, latest_chain_tip, chain_tip_change) = ChainTipSender::new(initial_tip, network); - let non_finalized_state = NonFinalizedState::new(network); - - let (non_finalized_state_sender, non_finalized_state_receiver) = - watch::channel(NonFinalizedState::new(&finalized_state.network())); - let finalized_state_for_writing = finalized_state.clone(); + let should_use_finalized_block_write_sender = non_finalized_state.is_chain_set_empty(); let (block_write_sender, invalid_block_write_reset_receiver, block_write_task) = write::BlockWriteSender::spawn( finalized_state_for_writing, non_finalized_state, chain_tip_sender, non_finalized_state_sender, + should_use_finalized_block_write_sender, ); let read_service = ReadStateService::new( @@ -349,7 +387,10 @@ impl StateService { let non_finalized_state_queued_blocks = QueuedBlocks::default(); let pending_utxos = PendingUtxos::default(); - let finalized_block_write_last_sent_hash = finalized_state.db.finalized_tip_hash(); + let finalized_block_write_last_sent_hash = + tokio::task::spawn_blocking(move || finalized_state.db.finalized_tip_hash()) + .await + .expect("failed to join blocking task"); let state = Self { network: network.clone(), @@ -358,7 +399,7 @@ impl StateService { finalized_state_queued_blocks: HashMap::new(), block_write_sender, finalized_block_write_last_sent_hash, - non_finalized_block_write_sent_hashes: SentHashes::default(), + non_finalized_block_write_sent_hashes, invalid_block_write_reset_receiver, pending_utxos, last_prune: Instant::now(), @@ -371,7 +412,12 @@ impl StateService { let timer = CodeTimer::start(); if let (Some(tip), Some(nu5_activation_height)) = ( - state.best_tip(), + { + let read_state = state.read_service.clone(); + tokio::task::spawn_blocking(move || read_state.best_tip()) + .await + .expect("task should not panic") + }, NetworkUpgrade::Nu5.activation_height(network), ) { if let Err(error) = check::legacy_chain( @@ -601,11 +647,11 @@ impl StateService { .contains(&semantically_verrified.hash) { let (rsp_tx, rsp_rx) = oneshot::channel(); - let _ = rsp_tx.send(Err(CommitSemanticallyVerifiedError::from( + let _ = rsp_tx.send(Err(CommitSemanticallyVerifiedError::from(Box::new( ValidateContextError::DuplicateCommitRequest { block_hash: semantically_verrified.hash, }, - ))); + )))); return rsp_rx; } @@ -615,11 +661,11 @@ impl StateService { .contains_height(semantically_verrified.height) { let (rsp_tx, rsp_rx) = oneshot::channel(); - let _ = rsp_tx.send(Err(CommitSemanticallyVerifiedError::from( + let _ = rsp_tx.send(Err(CommitSemanticallyVerifiedError::from(Box::new( ValidateContextError::AlreadyFinalized { block_height: semantically_verrified.height, }, - ))); + )))); return rsp_rx; } @@ -633,11 +679,11 @@ impl StateService { tracing::debug!("replacing older queued request with new request"); let (mut rsp_tx, rsp_rx) = oneshot::channel(); std::mem::swap(old_rsp_tx, &mut rsp_tx); - let _ = rsp_tx.send(Err(CommitSemanticallyVerifiedError::from( + let _ = rsp_tx.send(Err(CommitSemanticallyVerifiedError::from(Box::new( ValidateContextError::ReplacedByNewerRequest { block_hash: semantically_verrified.hash, }, - ))); + )))); rsp_rx } else { let (rsp_tx, rsp_rx) = oneshot::channel(); @@ -764,15 +810,15 @@ impl StateService { // If Zebra is shutting down, drop blocks and return an error. Self::send_semantically_verified_block_error( queued, - CommitSemanticallyVerifiedError::from( + CommitSemanticallyVerifiedError::from(Box::new( ValidateContextError::CommitTaskExited, - ), + )), ); self.clear_non_finalized_block_queue( - CommitSemanticallyVerifiedError::from( + CommitSemanticallyVerifiedError::from(Box::new( ValidateContextError::CommitTaskExited, - ), + )), ); return; @@ -868,12 +914,12 @@ impl ReadStateService { pub(crate) fn new( finalized_state: &FinalizedState, block_write_task: Option>>, - non_finalized_state_receiver: watch::Receiver, + non_finalized_state_receiver: WatchReceiver, ) -> Self { let read_service = Self { network: finalized_state.network(), db: finalized_state.db.clone(), - non_finalized_state_receiver: WatchReceiver::new(non_finalized_state_receiver), + non_finalized_state_receiver, block_write_task, }; @@ -997,9 +1043,9 @@ impl Service for StateService { rsp_rx .await .map_err(|_recv_error| { - BoxError::from(CommitSemanticallyVerifiedError::from( + BoxError::from(CommitSemanticallyVerifiedError::from(Box::new( ValidateContextError::NotReadyToBeCommitted, - )) + ))) }) // TODO: replace with Result::flatten once it stabilises // https://github.com/rust-lang/rust/issues/70142 @@ -2238,7 +2284,7 @@ impl Service for ReadStateService { /// It's possible to construct multiple state services in the same application (as /// long as they, e.g., use different storage locations), but doing so is /// probably not what you want. -pub fn init( +pub async fn init( config: Config, network: &Network, max_checkpoint_height: block::Height, @@ -2255,7 +2301,8 @@ pub fn init( network, max_checkpoint_height, checkpoint_verify_concurrency_limit, - ); + ) + .await; ( BoxService::new(state_service), @@ -2291,7 +2338,11 @@ pub fn init_read_only( tokio::sync::watch::channel(NonFinalizedState::new(network)); ( - ReadStateService::new(&finalized_state, None, non_finalized_state_receiver), + ReadStateService::new( + &finalized_state, + None, + WatchReceiver::new(non_finalized_state_receiver), + ), finalized_state.db.clone(), non_finalized_state_sender, ) @@ -2311,40 +2362,17 @@ pub fn spawn_init_read_only( tokio::task::spawn_blocking(move || init_read_only(config, &network)) } -/// Calls [`init`] with the provided [`Config`] and [`Network`] from a blocking task. -/// Returns a [`tokio::task::JoinHandle`] with a boxed state service, -/// a read state service, and receivers for state chain tip updates. -pub fn spawn_init( - config: Config, - network: &Network, - max_checkpoint_height: block::Height, - checkpoint_verify_concurrency_limit: usize, -) -> tokio::task::JoinHandle<( - BoxService, - ReadStateService, - LatestChainTip, - ChainTipChange, -)> { - let network = network.clone(); - tokio::task::spawn_blocking(move || { - init( - config, - &network, - max_checkpoint_height, - checkpoint_verify_concurrency_limit, - ) - }) -} - /// Returns a [`StateService`] with an ephemeral [`Config`] and a buffer with a single slot. /// /// This can be used to create a state service for testing. See also [`init`]. #[cfg(any(test, feature = "proptest-impl"))] -pub fn init_test(network: &Network) -> Buffer, Request> { +pub async fn init_test( + network: &Network, +) -> Buffer, Request> { // TODO: pass max_checkpoint_height and checkpoint_verify_concurrency limit // if we ever need to test final checkpoint sent UTXO queries let (state_service, _, _, _) = - StateService::new(Config::ephemeral(), network, block::Height::MAX, 0); + StateService::new(Config::ephemeral(), network, block::Height::MAX, 0).await; Buffer::new(BoxService::new(state_service), 1) } @@ -2354,7 +2382,7 @@ pub fn init_test(network: &Network) -> Buffer ( Buffer, Request>, @@ -2365,7 +2393,7 @@ pub fn init_test_services( // TODO: pass max_checkpoint_height and checkpoint_verify_concurrency limit // if we ever need to test final checkpoint sent UTXO queries let (state_service, read_state_service, latest_chain_tip, chain_tip_change) = - StateService::new(Config::ephemeral(), network, block::Height::MAX, 0); + StateService::new(Config::ephemeral(), network, block::Height::MAX, 0).await; let state_service = Buffer::new(BoxService::new(state_service), 1); diff --git a/zebra-state/src/service/arbitrary.rs b/zebra-state/src/service/arbitrary.rs index fa3aec65998..594151f6687 100644 --- a/zebra-state/src/service/arbitrary.rs +++ b/zebra-state/src/service/arbitrary.rs @@ -218,7 +218,7 @@ pub async fn populated_state( // TODO: write a test that checks the finalized to non-finalized transition with UTXOs, // and set max_checkpoint_height and checkpoint_verify_concurrency_limit correctly. let (state, read_state, latest_chain_tip, mut chain_tip_change) = - StateService::new(Config::ephemeral(), network, Height::MAX, 0); + StateService::new(Config::ephemeral(), network, Height::MAX, 0).await; let mut state = Buffer::new(BoxService::new(state), 1); let mut responses = FuturesUnordered::new(); diff --git a/zebra-state/src/service/check/tests/nullifier.rs b/zebra-state/src/service/check/tests/nullifier.rs index 0392f1c8e79..1f4000b4db1 100644 --- a/zebra-state/src/service/check/tests/nullifier.rs +++ b/zebra-state/src/service/check/tests/nullifier.rs @@ -165,10 +165,10 @@ proptest! { // we might need to just check `is_err()` here prop_assert_eq!( commit_result, - Err(DuplicateSproutNullifier { + Err(Box::new(DuplicateSproutNullifier { nullifier: duplicate_nullifier, in_finalized_state: false, - } + }) .into()) ); // block was rejected @@ -224,10 +224,10 @@ proptest! { prop_assert_eq!( commit_result, - Err(DuplicateSproutNullifier { + Err(Box::new(DuplicateSproutNullifier { nullifier: duplicate_nullifier, in_finalized_state: false, - } + }) .into()) ); prop_assert_eq!(Some((Height(0), genesis.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -285,10 +285,10 @@ proptest! { prop_assert_eq!( commit_result, - Err(DuplicateSproutNullifier { + Err(Box::new(DuplicateSproutNullifier { nullifier: duplicate_nullifier, in_finalized_state: false, - } + }) .into()) ); prop_assert_eq!(Some((Height(0), genesis.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -392,10 +392,10 @@ proptest! { prop_assert_eq!( commit_result, - Err(DuplicateSproutNullifier { + Err(Box::new(DuplicateSproutNullifier { nullifier: duplicate_nullifier, in_finalized_state: duplicate_in_finalized_state, - } + }) .into()) ); @@ -517,10 +517,10 @@ proptest! { prop_assert_eq!( commit_result, - Err(DuplicateSaplingNullifier { + Err(Box::new(DuplicateSaplingNullifier { nullifier: duplicate_nullifier, in_finalized_state: false, - } + }) .into()) ); prop_assert_eq!(Some((Height(0), genesis.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -573,10 +573,10 @@ proptest! { prop_assert_eq!( commit_result, - Err(DuplicateSaplingNullifier { + Err(Box::new(DuplicateSaplingNullifier { nullifier: duplicate_nullifier, in_finalized_state: false, - } + }) .into()) ); prop_assert_eq!(Some((Height(0), genesis.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -670,10 +670,10 @@ proptest! { prop_assert_eq!( commit_result, - Err(DuplicateSaplingNullifier { + Err(Box::new(DuplicateSaplingNullifier { nullifier: duplicate_nullifier, in_finalized_state: duplicate_in_finalized_state, - } + }) .into()) ); @@ -798,10 +798,10 @@ proptest! { prop_assert_eq!( commit_result, - Err(DuplicateOrchardNullifier { + Err(Box::new(DuplicateOrchardNullifier { nullifier: duplicate_nullifier, in_finalized_state: false, - } + }) .into()) ); prop_assert_eq!(Some((Height(0), genesis.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -858,10 +858,10 @@ proptest! { prop_assert_eq!( commit_result, - Err(DuplicateOrchardNullifier { + Err(Box::new(DuplicateOrchardNullifier { nullifier: duplicate_nullifier, in_finalized_state: false, - } + }) .into()) ); prop_assert_eq!(Some((Height(0), genesis.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -958,10 +958,10 @@ proptest! { prop_assert_eq!( commit_result, - Err(DuplicateOrchardNullifier { + Err(Box::new(DuplicateOrchardNullifier { nullifier: duplicate_nullifier, in_finalized_state: duplicate_in_finalized_state, - } + }) .into()) ); diff --git a/zebra-state/src/service/check/tests/utxo.rs b/zebra-state/src/service/check/tests/utxo.rs index 3e37fdc8173..d878f6290b4 100644 --- a/zebra-state/src/service/check/tests/utxo.rs +++ b/zebra-state/src/service/check/tests/utxo.rs @@ -374,10 +374,10 @@ proptest! { // the block was rejected prop_assert_eq!( commit_result, - Err(DuplicateTransparentSpend { + Err(Box::new(DuplicateTransparentSpend { outpoint: expected_outpoint, location: "the same block", - } + }) .into()) ); prop_assert_eq!(Some((Height(0), genesis.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -438,10 +438,10 @@ proptest! { // the block was rejected prop_assert_eq!( commit_result, - Err(DuplicateTransparentSpend { + Err(Box::new(DuplicateTransparentSpend { outpoint: expected_outpoint, location: "the same block", - } + }) .into()) ); prop_assert_eq!(Some((Height(1), block1.hash())), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -523,10 +523,10 @@ proptest! { // the block was rejected prop_assert_eq!( commit_result, - Err(DuplicateTransparentSpend { + Err(Box::new(DuplicateTransparentSpend { outpoint: expected_outpoint, location: "the same block", - } + }) .into()) ); prop_assert_eq!(Some((Height(1), block1.hash())), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -674,19 +674,19 @@ proptest! { if use_finalized_state_spend { prop_assert_eq!( commit_result, - Err(MissingTransparentOutput { + Err(Box::new(MissingTransparentOutput { outpoint: expected_outpoint, location: "the non-finalized and finalized chain", - } + }) .into()) ); } else { prop_assert_eq!( commit_result, - Err(DuplicateTransparentSpend { + Err(Box::new(DuplicateTransparentSpend { outpoint: expected_outpoint, location: "the non-finalized chain", - } + }) .into()) ); } @@ -749,10 +749,10 @@ proptest! { // the block was rejected prop_assert_eq!( commit_result, - Err(MissingTransparentOutput { + Err(Box::new(MissingTransparentOutput { outpoint: expected_outpoint, location: "the non-finalized and finalized chain", - } + }) .into()) ); prop_assert_eq!(Some((Height(0), genesis.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -816,9 +816,9 @@ proptest! { // the block was rejected prop_assert_eq!( commit_result, - Err(EarlyTransparentSpend { + Err(Box::new(EarlyTransparentSpend { outpoint: expected_outpoint, - } + }) .into()) ); prop_assert_eq!(Some((Height(0), genesis.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index 198bc335534..b0e44cf673f 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -258,8 +258,6 @@ impl FinalizedState { } } - tracing::info!(tip = ?new_state.db.tip(), "loaded Zebra state cache"); - new_state } diff --git a/zebra-state/src/service/finalized_state/disk_format/shielded.rs b/zebra-state/src/service/finalized_state/disk_format/shielded.rs index bcd24d5c604..a845cda2c30 100644 --- a/zebra-state/src/service/finalized_state/disk_format/shielded.rs +++ b/zebra-state/src/service/finalized_state/disk_format/shielded.rs @@ -162,11 +162,11 @@ impl FromDisk for orchard::tree::NoteCommitmentTree { } } -impl IntoDisk for sapling::tree::Node { +impl IntoDisk for sapling_crypto::Node { type Bytes = Vec; fn as_bytes(&self) -> Self::Bytes { - self.as_ref().to_vec() + self.to_bytes().to_vec() } } @@ -186,9 +186,15 @@ impl>> IntoDisk for NoteCommitmentSubtreeData) -> Self { - Self::try_from(bytes.as_ref()).expect("trusted data should deserialize successfully") + Self::from_bytes( + bytes + .as_ref() + .try_into() + .expect("trusted data should be 32 bytes"), + ) + .expect("trusted data should deserialize successfully") } } diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs b/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs index 3ef15c6257f..91ddaadc359 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs +++ b/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs @@ -378,9 +378,9 @@ fn roundtrip_sapling_tree_root() { fn roundtrip_sapling_subtree_data() { let _init_guard = zebra_test::init(); - proptest!(|(mut val in any::>())| { + proptest!(|(mut val in any::>())| { val.end_height.0 %= MAX_ON_DISK_HEIGHT.0 + 1; - assert_value_properties(val) + assert_value_properties(val.root.0) }); } diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs index 55ce1218139..1f11d21c369 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs @@ -220,17 +220,17 @@ pub fn subtree_format_calculation_pre_checks(db: &ZebraDb) -> Result<(), String> } /// A quick test vector that allows us to fail an incorrect upgrade within a few seconds. -fn first_sapling_mainnet_subtree() -> NoteCommitmentSubtree { +fn first_sapling_mainnet_subtree() -> NoteCommitmentSubtree { // This test vector was generated using the command: // ```sh // zcash-cli z_getsubtreesbyindex sapling 0 1 // ``` NoteCommitmentSubtree { index: 0.into(), - root: hex!("754bb593ea42d231a7ddf367640f09bbf59dc00f2c1d2003cc340e0c016b5b13") - .as_slice() - .try_into() - .expect("test vector is valid"), + root: sapling_crypto::Node::from_bytes(hex!( + "754bb593ea42d231a7ddf367640f09bbf59dc00f2c1d2003cc340e0c016b5b13" + )) + .expect("test vector is valid"), end_height: Height(558822), } } @@ -638,7 +638,7 @@ fn calculate_sapling_subtree( prev_tree: Arc, end_height: Height, tree: Arc, -) -> NoteCommitmentSubtree { +) -> NoteCommitmentSubtree { // If a subtree is completed by a note commitment in the block at `end_height`, // then that subtree can be completed in two different ways: if let Some((index, node)) = tree.completed_subtree_index_and_root() { @@ -872,7 +872,7 @@ fn calculate_orchard_subtree( /// Writes a Sapling note commitment subtree to `upgrade_db`. fn write_sapling_subtree( upgrade_db: &ZebraDb, - subtree: NoteCommitmentSubtree, + subtree: NoteCommitmentSubtree, ) { let mut batch = DiskWriteBatch::new(); @@ -882,7 +882,7 @@ fn write_sapling_subtree( .write_batch(batch) .expect("writing sapling note commitment subtrees should always succeed."); - if subtree.index.0 % 100 == 0 { + if subtree.index.0.is_multiple_of(100) { info!(end_height = ?subtree.end_height, index = ?subtree.index.0, "calculated and added sapling subtree"); } // This log happens about once per second on recent machines with SSD disks. @@ -902,7 +902,7 @@ fn write_orchard_subtree( .write_batch(batch) .expect("writing orchard note commitment subtrees should always succeed."); - if subtree.index.0 % 100 == 0 { + if subtree.index.0.is_multiple_of(100) { info!(end_height = ?subtree.end_height, index = ?subtree.index.0, "calculated and added orchard subtree"); } // This log happens about once per second on recent machines with SSD disks. diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/block_info_and_address_received.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/block_info_and_address_received.rs index ad059d7d844..96a006e3ec6 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade/block_info_and_address_received.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/block_info_and_address_received.rs @@ -157,7 +157,7 @@ impl DiskFormatUpgrade for Upgrade { for result in seq_iter { let (h, load_result) = result?; let height = Height(h); - if height.0 % 1000 == 0 { + if height.0.is_multiple_of(1000) { tracing::info!(height = ?height, "adding block info for height"); } // Get the data loaded from the parallel iterator diff --git a/zebra-state/src/service/finalized_state/tests/vectors.rs b/zebra-state/src/service/finalized_state/tests/vectors.rs index c2b035d5b1f..9e4f1abb171 100644 --- a/zebra-state/src/service/finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/finalized_state/tests/vectors.rs @@ -16,7 +16,7 @@ use zebra_chain::{ tree::NoteCommitmentTree as OrchardNoteCommitmentTree, }, sapling::{ - self, tree::legacy::LegacyNoteCommitmentTree as LegacySaplingNoteCommitmentTree, + tree::legacy::LegacyNoteCommitmentTree as LegacySaplingNoteCommitmentTree, tree::NoteCommitmentTree as SaplingNoteCommitmentTree, }, sprout::{ @@ -158,7 +158,7 @@ fn sapling_note_commitment_tree_serialization() { for (idx, cm_u_hex) in hex_commitments.iter().enumerate() { let bytes = <[u8; 32]>::from_hex(cm_u_hex).unwrap(); - let cm_u = jubjub::Fq::from_bytes(&bytes).unwrap(); + let cm_u = sapling_crypto::note::ExtractedNoteCommitment::from_bytes(&bytes).unwrap(); incremental_tree.append(cm_u).unwrap(); if random() { info!(?idx, "randomly caching root for note commitment tree index"); @@ -197,7 +197,7 @@ fn sapling_note_commitment_tree_serialization_one() { for (idx, cm_u_hex) in hex_commitments.iter().enumerate() { let bytes = <[u8; 32]>::from_hex(cm_u_hex).unwrap(); - let cm_u = jubjub::Fq::from_bytes(&bytes).unwrap(); + let cm_u = sapling_crypto::note::ExtractedNoteCommitment::from_bytes(&bytes).unwrap(); incremental_tree.append(cm_u).unwrap(); if random() { info!(?idx, "randomly caching root for note commitment tree index"); @@ -249,7 +249,7 @@ fn sapling_note_commitment_tree_serialization_pow2() { for (idx, cm_u_hex) in hex_commitments.iter().enumerate() { let bytes = <[u8; 32]>::from_hex(cm_u_hex).unwrap(); - let cm_u = jubjub::Fq::from_bytes(&bytes).unwrap(); + let cm_u = sapling_crypto::note::ExtractedNoteCommitment::from_bytes(&bytes).unwrap(); incremental_tree.append(cm_u).unwrap(); if random() { info!(?idx, "randomly caching root for note commitment tree index"); @@ -528,7 +528,7 @@ fn sapling_checks( let subtree = NoteCommitmentSubtreeData::new( Height(100000), - sapling::tree::Node::from_bytes(incremental_tree.hash()), + sapling_crypto::Node::from_bytes(incremental_tree.hash()).unwrap(), ); let serialized_subtree = subtree.as_bytes(); @@ -539,7 +539,7 @@ fn sapling_checks( ); let deserialized_subtree = - NoteCommitmentSubtreeData::::from_bytes(&serialized_subtree); + NoteCommitmentSubtreeData::::from_bytes(&serialized_subtree); assert_eq!( subtree, deserialized_subtree, diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index b4036a31e5f..49974bca15f 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -271,13 +271,13 @@ impl ZebraDb { pub(in super::super) fn sapling_subtree_by_index( &self, index: impl Into + Copy, - ) -> Option> { + ) -> Option> { let sapling_subtrees = self .db .cf_handle("sapling_note_commitment_subtree") .unwrap(); - let subtree_data: NoteCommitmentSubtreeData = + let subtree_data: NoteCommitmentSubtreeData = self.db.zs_get(&sapling_subtrees, &index.into())?; Some(subtree_data.with_index(index)) @@ -288,7 +288,7 @@ impl ZebraDb { pub fn sapling_subtree_list_by_index_range( &self, range: impl std::ops::RangeBounds, - ) -> BTreeMap> { + ) -> BTreeMap> { let sapling_subtrees = self .db .cf_handle("sapling_note_commitment_subtree") @@ -301,7 +301,7 @@ impl ZebraDb { /// Get the sapling note commitment subtress for the finalized tip. #[allow(clippy::unwrap_in_result)] - fn sapling_subtree_for_tip(&self) -> Option> { + fn sapling_subtree_for_tip(&self) -> Option> { let sapling_subtrees = self .db .cf_handle("sapling_note_commitment_subtree") @@ -309,7 +309,7 @@ impl ZebraDb { let (index, subtree_data): ( NoteCommitmentSubtreeIndex, - NoteCommitmentSubtreeData, + NoteCommitmentSubtreeData, ) = self.db.zs_last_key_value(&sapling_subtrees)?; let tip_height = self.finalized_tip_height()?; @@ -664,7 +664,7 @@ impl DiskWriteBatch { pub fn insert_sapling_subtree( &mut self, zebra_db: &ZebraDb, - subtree: &NoteCommitmentSubtree, + subtree: &NoteCommitmentSubtree, ) { let sapling_subtree_cf = zebra_db .db diff --git a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs index 26c405dd089..7486a0bfbf3 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs @@ -199,7 +199,7 @@ impl ZebraDb { // Ignore any outputs spent by blocks committed during this query output_locations .iter() - .flat_map(|&addr_out_loc| { + .filter_map(|&addr_out_loc| { Some(( addr_out_loc.unspent_output_location(), self.utxo_by_location(addr_out_loc.unspent_output_location())? diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index 5bd035792c1..bfe716c352b 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -5,10 +5,12 @@ use std::{ collections::{BTreeSet, HashMap}, mem, + path::PathBuf, sync::Arc, }; use indexmap::IndexMap; +use tokio::sync::watch; use zebra_chain::{ block::{self, Block, Hash, Height}, parameters::Network, @@ -21,11 +23,15 @@ use crate::{ error::ReconsiderError, request::{ContextuallyVerifiedBlock, FinalizableBlock}, service::{check, finalized_state::ZebraDb}, - BoxError, SemanticallyVerifiedBlock, ValidateContextError, + BoxError, SemanticallyVerifiedBlock, ValidateContextError, WatchReceiver, }; +mod backup; mod chain; +#[cfg(test)] +pub(crate) use backup::MIN_DURATION_BETWEEN_BACKUP_UPDATES; + #[cfg(test)] mod tests; @@ -123,6 +129,76 @@ impl NonFinalizedState { } } + /// Accepts an optional path to the non-finalized state backup directory and a handle to the database. + /// + /// If a backup directory path is provided: + /// - Creates a new backup directory at the provided path if none exists, + /// - Restores non-finalized blocks from the backup directory, if any, and + /// - Spawns a task that updates the non-finalized backup cache with + /// the latest non-finalized state sent to the returned watch channel. + /// + /// Returns the non-finalized state with a watch channel sender and receiver. + pub async fn with_backup( + self, + backup_dir_path: Option, + finalized_state: &ZebraDb, + should_restore_backup: bool, + ) -> ( + Self, + watch::Sender, + WatchReceiver, + ) { + let with_watch_channel = |non_finalized_state: NonFinalizedState| { + let (sender, receiver) = watch::channel(non_finalized_state.clone()); + (non_finalized_state, sender, WatchReceiver::new(receiver)) + }; + + let Some(backup_dir_path) = backup_dir_path else { + return with_watch_channel(self); + }; + + tracing::info!( + ?backup_dir_path, + "restoring non-finalized blocks from backup and spawning backup task" + ); + + let non_finalized_state = { + let backup_dir_path = backup_dir_path.clone(); + let finalized_state = finalized_state.clone(); + tokio::task::spawn_blocking(move || { + // Create a new backup directory if none exists + std::fs::create_dir_all(&backup_dir_path) + .expect("failed to create non-finalized state backup directory"); + + if should_restore_backup { + backup::restore_backup(self, &backup_dir_path, &finalized_state) + } else { + self + } + }) + .await + .expect("failed to join blocking task") + }; + + let (non_finalized_state, sender, receiver) = with_watch_channel(non_finalized_state); + + tokio::spawn(backup::run_backup_task(receiver.clone(), backup_dir_path)); + + if !non_finalized_state.is_chain_set_empty() { + let num_blocks_restored = non_finalized_state + .best_chain() + .expect("must have best chain if chain set is not empty") + .len(); + + tracing::info!( + ?num_blocks_restored, + "restored blocks from non-finalized backup cache" + ); + } + + (non_finalized_state, sender, receiver) + } + /// Is the internal state of `self` the same as `other`? /// /// [`Chain`] has a custom [`Eq`] implementation based on proof of work, @@ -429,7 +505,7 @@ impl NonFinalizedState { let mut modified_chain = Arc::unwrap_or_clone(chain_result); for block in invalidated_blocks { - modified_chain = modified_chain.push(block)?; + modified_chain = modified_chain.push(block).map_err(Box::new)?; } let (height, hash) = modified_chain.non_finalized_tip(); @@ -784,6 +860,11 @@ impl NonFinalizedState { self.chain_set.len() } + /// Returns true if this [`NonFinalizedState`] contains no chains. + pub fn is_chain_set_empty(&self) -> bool { + self.chain_count() == 0 + } + /// Return the invalidated blocks. pub fn invalidated_blocks(&self) -> IndexMap>> { self.invalidated_blocks.clone() diff --git a/zebra-state/src/service/non_finalized_state/backup.rs b/zebra-state/src/service/non_finalized_state/backup.rs new file mode 100644 index 00000000000..7833b137437 --- /dev/null +++ b/zebra-state/src/service/non_finalized_state/backup.rs @@ -0,0 +1,326 @@ +use std::{ + collections::{BTreeMap, HashMap}, + fs::DirEntry, + io::{self, ErrorKind}, + path::{Path, PathBuf}, + sync::Arc, + time::Duration, +}; + +use hex::ToHex; +use zebra_chain::{ + amount::{Amount, DeferredPoolBalanceChange}, + block::{self, Block, Height}, + serialization::{ZcashDeserializeInto, ZcashSerialize}, +}; + +use crate::{ + ContextuallyVerifiedBlock, IntoDisk, NonFinalizedState, SemanticallyVerifiedBlock, + WatchReceiver, ZebraDb, +}; + +#[cfg(not(test))] +use crate::service::write::validate_and_commit_non_finalized; + +/// The minimum duration that Zebra will wait between updates to the non-finalized state backup cache. +pub(crate) const MIN_DURATION_BETWEEN_BACKUP_UPDATES: Duration = Duration::from_secs(5); + +/// Accepts an optional path to the non-finalized state backup directory and a handle to the database. +/// +/// Looks for blocks above the finalized tip height in the backup directory (if a path was provided) and +/// attempts to commit them to the non-finalized state. +/// +/// Returns the resulting non-finalized state. +pub(super) fn restore_backup( + mut non_finalized_state: NonFinalizedState, + backup_dir_path: &PathBuf, + finalized_state: &ZebraDb, +) -> NonFinalizedState { + let mut store: BTreeMap> = BTreeMap::new(); + + for block in read_non_finalized_blocks_from_backup(backup_dir_path, finalized_state) { + store.entry(block.height).or_default().push(block); + } + + for (height, blocks) in store { + for block in blocks { + #[cfg(test)] + let commit_result = if non_finalized_state + .any_chain_contains(&block.block.header.previous_block_hash) + { + non_finalized_state.commit_block(block, finalized_state) + } else { + non_finalized_state.commit_new_chain(block, finalized_state) + }; + + #[cfg(not(test))] + let commit_result = + validate_and_commit_non_finalized(finalized_state, &mut non_finalized_state, block); + + // Re-computes the block hash in case the hash from the filename is wrong. + if let Err(commit_error) = commit_result { + tracing::warn!( + ?commit_error, + ?height, + "failed to commit non-finalized block from backup directory" + ); + } + } + } + + non_finalized_state +} + +/// Updates the non-finalized state backup cache whenever the non-finalized state changes, +/// deleting any outdated backup files and writing any blocks that are in the non-finalized +/// state but missing in the backup cache. +pub(super) async fn run_backup_task( + mut non_finalized_state_receiver: WatchReceiver, + backup_dir_path: PathBuf, +) { + let err = loop { + let rate_limit = tokio::time::sleep(MIN_DURATION_BETWEEN_BACKUP_UPDATES); + let mut backup_blocks: HashMap = { + let backup_dir_path = backup_dir_path.clone(); + tokio::task::spawn_blocking(move || list_backup_dir_entries(&backup_dir_path)) + .await + .expect("failed to join blocking task when reading in backup task") + .collect() + }; + + if let (Err(err), _) = tokio::join!(non_finalized_state_receiver.changed(), rate_limit) { + break err; + }; + + let latest_non_finalized_state = non_finalized_state_receiver.cloned_watch_data(); + + let backup_dir_path = backup_dir_path.clone(); + tokio::task::spawn_blocking(move || { + for block in latest_non_finalized_state + .chain_iter() + .flat_map(|chain| chain.blocks.values()) + // Remove blocks from `backup_blocks` that are present in the non-finalized state + .filter(|block| backup_blocks.remove(&block.hash).is_none()) + { + // This loop will typically iterate only once, but may write multiple blocks if it misses + // some non-finalized state changes while waiting for I/O ops. + write_backup_block(&backup_dir_path, block); + } + + // Remove any backup blocks that are not present in the non-finalized state + for (_, outdated_backup_block_path) in backup_blocks { + if let Err(delete_error) = std::fs::remove_file(outdated_backup_block_path) { + tracing::warn!(?delete_error, "failed to delete backup block file"); + } + } + }) + .await + .expect("failed to join blocking task when writing in backup task"); + }; + + tracing::warn!( + ?err, + "got recv error waiting on non-finalized state change, is Zebra shutting down?" + ) +} + +#[derive(Clone, Debug, PartialEq, Eq)] +struct NonFinalizedBlockBackup { + block: Arc, + deferred_pool_balance_change: Amount, +} + +impl From<&ContextuallyVerifiedBlock> for NonFinalizedBlockBackup { + fn from(cv_block: &ContextuallyVerifiedBlock) -> Self { + Self { + block: cv_block.block.clone(), + deferred_pool_balance_change: cv_block.chain_value_pool_change.deferred_amount(), + } + } +} + +impl NonFinalizedBlockBackup { + /// Encodes a [`NonFinalizedBlockBackup`] as a vector of bytes. + fn as_bytes(&self) -> Vec { + let block_bytes = self + .block + .zcash_serialize_to_vec() + .expect("verified block header version should be valid"); + + let deferred_pool_balance_change_bytes = + self.deferred_pool_balance_change.as_bytes().to_vec(); + + [deferred_pool_balance_change_bytes, block_bytes].concat() + } + + /// Constructs a new [`NonFinalizedBlockBackup`] from a vector of bytes. + #[allow(clippy::unwrap_in_result)] + fn from_bytes(bytes: Vec) -> Result { + let (deferred_pool_balance_change_bytes, block_bytes) = bytes + .split_at_checked(size_of::()) + .ok_or(io::Error::new( + ErrorKind::InvalidInput, + "input is too short", + ))?; + + Ok(Self { + block: Arc::new( + block_bytes + .zcash_deserialize_into() + .map_err(|err| io::Error::new(ErrorKind::InvalidData, err))?, + ), + deferred_pool_balance_change: Amount::from_bytes( + deferred_pool_balance_change_bytes + .try_into() + .expect("slice from `split_at_checked()` should fit in [u8; 8]"), + ) + .map_err(|err| io::Error::new(ErrorKind::InvalidData, err))?, + }) + } +} + +/// Writes a block to a file in the provided non-finalized state backup cache directory path. +fn write_backup_block(backup_dir_path: &Path, block: &ContextuallyVerifiedBlock) { + let backup_block_file_name: String = block.hash.encode_hex(); + let backup_block_file_path = backup_dir_path.join(backup_block_file_name); + let non_finalized_block_backup: NonFinalizedBlockBackup = block.into(); + + if let Err(err) = std::fs::write( + backup_block_file_path, + non_finalized_block_backup.as_bytes(), + ) { + tracing::warn!(?err, "failed to write non-finalized state backup block"); + } +} + +/// Reads blocks from the provided non-finalized state backup directory path. +/// +/// Returns any blocks that are valid and not present in the finalized state. +fn read_non_finalized_blocks_from_backup<'a>( + backup_dir_path: &PathBuf, + finalized_state: &'a ZebraDb, +) -> impl Iterator + 'a { + list_backup_dir_entries(backup_dir_path) + // It's okay to leave the file here, the backup task will delete it as long as + // the block is not added to the non-finalized state. + .filter(|&(block_hash, _)| !finalized_state.contains_hash(block_hash)) + .filter_map(|(block_hash, file_path)| match std::fs::read(file_path) { + Ok(block_bytes) => Some((block_hash, block_bytes)), + Err(err) => { + tracing::warn!(?err, "failed to open non-finalized state backup block file"); + None + } + }) + .filter_map(|(expected_block_hash, backup_block_file_contents)| { + match NonFinalizedBlockBackup::from_bytes(backup_block_file_contents) { + Ok(NonFinalizedBlockBackup { + block, + deferred_pool_balance_change, + }) if block.coinbase_height().is_some() => { + let block = SemanticallyVerifiedBlock::from(block) + .with_deferred_pool_balance_change(Some(DeferredPoolBalanceChange::new( + deferred_pool_balance_change, + ))); + if block.hash != expected_block_hash { + tracing::warn!( + block_hash = ?block.hash, + ?expected_block_hash, + "wrong block hash in file name" + ); + } + Some(block) + } + Ok(block) => { + tracing::warn!( + ?block, + "invalid non-finalized backup block, missing coinbase height" + ); + None + } + Err(err) => { + tracing::warn!( + ?err, + "failed to deserialize non-finalized backup data into block" + ); + None + } + } + }) +} + +/// Accepts a backup directory path, opens the directory, converts its entries +/// filenames to block hashes, and deletes any entries with invalid file names. +/// +/// # Panics +/// +/// If the provided path cannot be opened as a directory. +/// See [`read_backup_dir`] for more details. +fn list_backup_dir_entries( + backup_dir_path: &PathBuf, +) -> impl Iterator { + read_backup_dir(backup_dir_path).filter_map(process_backup_dir_entry) +} + +/// Accepts a backup directory path and opens the directory. +/// +/// Returns an iterator over all [`DirEntry`]s in the directory that are successfully read. +/// +/// # Panics +/// +/// If the provided path cannot be opened as a directory. +fn read_backup_dir(backup_dir_path: &PathBuf) -> impl Iterator { + std::fs::read_dir(backup_dir_path) + .expect("failed to read non-finalized state backup directory") + .filter_map(|entry| match entry { + Ok(entry) => Some(entry), + Err(io_err) => { + tracing::warn!( + ?io_err, + "failed to read DirEntry in non-finalized state backup dir" + ); + + None + } + }) +} + +/// Accepts a [`DirEntry`] from the non-finalized state backup directory and +/// parses the filename into a block hash. +/// +/// Returns the block hash and the file path if successful, or +/// returns None and deletes the file at the entry path otherwise. +fn process_backup_dir_entry(entry: DirEntry) -> Option<(block::Hash, PathBuf)> { + let delete_file = || { + if let Err(delete_error) = std::fs::remove_file(entry.path()) { + tracing::warn!(?delete_error, "failed to delete backup block file"); + } + }; + + let block_file_name = match entry.file_name().into_string() { + Ok(block_hash) => block_hash, + Err(err) => { + tracing::warn!( + ?err, + "failed to convert OsString to String, attempting to delete file" + ); + + delete_file(); + return None; + } + }; + + let block_hash: block::Hash = match block_file_name.parse() { + Ok(block_hash) => block_hash, + Err(err) => { + tracing::warn!( + ?err, + "failed to parse hex-encoded block hash from file name, attempting to delete file" + ); + + delete_file(); + return None; + } + }; + + Some((block_hash, entry.path())) +} diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index fe033749dbc..6c40ea25d06 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -177,7 +177,7 @@ pub struct ChainInner { pub(crate) sapling_anchors_by_height: BTreeMap, /// A list of Sapling subtrees completed in the non-finalized state pub(crate) sapling_subtrees: - BTreeMap>, + BTreeMap>, /// The Orchard anchors created by `blocks`. /// @@ -750,7 +750,7 @@ impl Chain { pub fn sapling_subtree( &self, hash_or_height: HashOrHeight, - ) -> Option> { + ) -> Option> { let height = hash_or_height.height_or_else(|hash| self.height_by_hash.get(&hash).cloned())?; @@ -771,7 +771,7 @@ impl Chain { pub fn sapling_subtrees_in_range( &self, range: impl std::ops::RangeBounds, - ) -> BTreeMap> { + ) -> BTreeMap> { self.sapling_subtrees .range(range) .map(|(index, subtree)| (*index, *subtree)) @@ -779,7 +779,7 @@ impl Chain { } /// Returns the Sapling [`NoteCommitmentSubtree`] if it was completed at the tip height. - pub fn sapling_subtree_for_tip(&self) -> Option> { + pub fn sapling_subtree_for_tip(&self) -> Option> { if !self.is_empty() { let tip = self.non_finalized_tip_height(); self.sapling_subtree(tip.into()) @@ -2227,8 +2227,8 @@ impl UpdateWith<(ValueBalance, Height, usize)> for Chain { } Err(value_balance_error) => Err(ValidateContextError::AddValuePool { value_balance_error, - chain_value_pools: self.chain_value_pools, - block_value_pool_change: *block_value_pool_change, + chain_value_pools: Box::new(self.chain_value_pools), + block_value_pool_change: Box::new(*block_value_pool_change), height: Some(*height), })?, }; @@ -2375,7 +2375,7 @@ impl Chain { /// Inserts the supplied Sapling note commitment subtree into the chain. pub(crate) fn insert_sapling_subtree( &mut self, - subtree: NoteCommitmentSubtree, + subtree: NoteCommitmentSubtree, ) { self.inner .sapling_subtrees diff --git a/zebra-state/src/service/non_finalized_state/tests/vectors.rs b/zebra-state/src/service/non_finalized_state/tests/vectors.rs index ff2c7502308..5f984fa29a1 100644 --- a/zebra-state/src/service/non_finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/non_finalized_state/tests/vectors.rs @@ -1,6 +1,6 @@ //! Fixed test vectors for the non-finalized state. -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use zebra_chain::{ amount::NonNegative, @@ -16,7 +16,7 @@ use crate::{ arbitrary::Prepare, service::{ finalized_state::FinalizedState, - non_finalized_state::{Chain, NonFinalizedState}, + non_finalized_state::{Chain, NonFinalizedState, MIN_DURATION_BETWEEN_BACKUP_UPDATES}, }, tests::FakeChainHelper, Config, @@ -762,3 +762,61 @@ fn commitment_is_validated_for_network_upgrade(network: Network, network_upgrade .commit_block(next_block.prepare(), &finalized_state) .unwrap(); } + +#[tokio::test] +async fn non_finalized_state_writes_blocks_to_and_restores_blocks_from_backup_cache() { + let network = Network::Mainnet; + + let finalized_state = FinalizedState::new( + &Config::ephemeral(), + &network, + #[cfg(feature = "elasticsearch")] + false, + ); + + let backup_dir_path = tempfile::Builder::new() + .prefix("zebra-non-finalized-state-backup-cache") + .tempdir() + .expect("temporary directory is created successfully") + .keep(); + + let (mut non_finalized_state, non_finalized_state_sender, _receiver) = + NonFinalizedState::new(&network) + .with_backup(Some(backup_dir_path.clone()), &finalized_state.db, false) + .await; + + let blocks = network.block_map(); + let height = NetworkUpgrade::Heartwood + .activation_height(&network) + .unwrap() + .0; + let block = Arc::new( + blocks + .get(&(height - 1)) + .expect("test vector exists") + .zcash_deserialize_into::() + .expect("block is structurally valid"), + ); + + non_finalized_state + .commit_new_chain(block.into(), &finalized_state.db) + .expect("committing test block should succeed"); + + non_finalized_state_sender + .send(non_finalized_state.clone()) + .expect("backup task should have a receiver, channel should be open"); + + // Wait for the minimum update time + tokio::time::sleep(Duration::from_secs(1) + MIN_DURATION_BETWEEN_BACKUP_UPDATES).await; + + let (non_finalized_state, _sender, _receiver) = NonFinalizedState::new(&network) + .with_backup(Some(backup_dir_path), &finalized_state.db, true) + .await; + + assert_eq!( + non_finalized_state.best_chain_len(), + Some(1), + "non-finalized state should have restored the block committed \ + to the previous non-finalized state" + ); +} diff --git a/zebra-state/src/service/queued_blocks.rs b/zebra-state/src/service/queued_blocks.rs index cbfc04eeadd..1e99716b879 100644 --- a/zebra-state/src/service/queued_blocks.rs +++ b/zebra-state/src/service/queued_blocks.rs @@ -11,8 +11,8 @@ use tracing::instrument; use zebra_chain::{block, transparent}; use crate::{ - BoxError, CheckpointVerifiedBlock, CommitSemanticallyVerifiedError, SemanticallyVerifiedBlock, - ValidateContextError, + BoxError, CheckpointVerifiedBlock, CommitSemanticallyVerifiedError, NonFinalizedState, + SemanticallyVerifiedBlock, ValidateContextError, }; #[cfg(test)] @@ -145,11 +145,11 @@ impl QueuedBlocks { let parent_hash = &expired_block.block.header.previous_block_hash; // we don't care if the receiver was dropped - let _ = expired_sender.send(Err(CommitSemanticallyVerifiedError::from( + let _ = expired_sender.send(Err(CommitSemanticallyVerifiedError::from(Box::new( ValidateContextError::PrunedBelowFinalizedTip { block_height: expired_block.height, }, - ))); + )))); // TODO: only remove UTXOs if there are no queued blocks with that UTXO // (known_utxos is best-effort, so this is ok for now) @@ -238,7 +238,7 @@ pub(crate) struct SentHashes { /// Stores a set of hashes that have been sent to the block write task but /// may not be in the finalized state yet. - sent: HashMap>, + pub sent: HashMap>, /// Known UTXOs. known_utxos: HashMap, @@ -249,6 +249,23 @@ pub(crate) struct SentHashes { } impl SentHashes { + /// Creates a new [`SentHashes`] with the block hashes and UTXOs in the provided non-finalized state. + pub fn new(non_finalized_state: &NonFinalizedState) -> Self { + let mut sent_hashes = Self::default(); + for (_, block) in non_finalized_state + .chain_iter() + .flat_map(|c| c.blocks.clone()) + { + sent_hashes.add(&block.into()); + } + + if !sent_hashes.sent.is_empty() { + sent_hashes.can_fork_chain_at_hashes = true; + } + + sent_hashes + } + /// Stores the `block`'s hash, height, and UTXOs, so they can be used to check if a block or UTXO /// is available in the state. /// diff --git a/zebra-state/src/service/read/address/utxo.rs b/zebra-state/src/service/read/address/utxo.rs index 76a42ac838d..645a126ec39 100644 --- a/zebra-state/src/service/read/address/utxo.rs +++ b/zebra-state/src/service/read/address/utxo.rs @@ -16,7 +16,12 @@ use std::{ ops::RangeInclusive, }; -use zebra_chain::{block::Height, parameters::Network, transaction, transparent}; +use derive_getters::Getters; +use zebra_chain::{ + block::{self, Height}, + parameters::Network, + transaction, transparent, +}; use crate::{ service::{ @@ -33,16 +38,23 @@ pub const ADDRESS_HEIGHTS_FULL_RANGE: RangeInclusive = Height(1)..=Heigh /// A convenience wrapper that efficiently stores unspent transparent outputs, /// and the corresponding transaction IDs. -#[derive(Clone, Debug, Default, Eq, PartialEq)] +#[derive(Clone, Debug, Default, Eq, PartialEq, Getters)] pub struct AddressUtxos { /// A set of unspent transparent outputs. + #[getter(skip)] utxos: BTreeMap, /// The transaction IDs for each [`OutputLocation`] in `utxos`. + #[getter(skip)] tx_ids: BTreeMap, /// The configured network for this state. + #[getter(skip)] network: Network, + + /// The last height and hash that was queried to produce these UTXOs, if any. + /// It will be None if the state is empty. + last_height_and_hash: Option<(block::Height, block::Hash)>, } impl AddressUtxos { @@ -51,11 +63,13 @@ impl AddressUtxos { network: &Network, utxos: BTreeMap, tx_ids: BTreeMap, + last_height_and_hash: Option<(block::Height, block::Hash)>, ) -> Self { Self { utxos, tx_ids, network: network.clone(), + last_height_and_hash, } } @@ -129,7 +143,7 @@ where // If the UTXOs are valid, return them, otherwise, retry or return an error. match chain_utxo_changes { - Ok((created_chain_utxos, spent_chain_utxos)) => { + Ok((created_chain_utxos, spent_chain_utxos, last_height)) => { debug!( chain_utxo_count = ?created_chain_utxos.len(), chain_utxo_spent = ?spent_chain_utxos.len(), @@ -140,7 +154,7 @@ where let utxos = apply_utxo_changes(finalized_utxos, created_chain_utxos, spent_chain_utxos); - let tx_ids = lookup_tx_ids_for_utxos(chain, db, &addresses, &utxos); + let tx_ids = lookup_tx_ids_for_utxos(chain.as_ref(), db, &addresses, &utxos); debug!( full_utxo_count = ?utxos.len(), @@ -150,7 +164,21 @@ where "full address UTXO response", ); - return Ok(AddressUtxos::new(network, utxos, tx_ids)); + // Get the matching hash for the given height, if any + let last_height_and_hash = last_height.and_then(|height| { + chain + .as_ref() + .and_then(|c| c.as_ref().hash_by_height(height)) + .or_else(|| db.hash(height)) + .map(|hash| (height, hash)) + }); + + return Ok(AddressUtxos::new( + network, + utxos, + tx_ids, + last_height_and_hash, + )); } Err(chain_utxo_error) => { @@ -205,10 +233,13 @@ fn finalized_address_utxos( (finalized_utxos, finalized_tip_range) } -/// Returns the UTXO changes for `addresses` in the non-finalized chain, -/// matching or overlapping the UTXOs for the `finalized_tip_range`. +/// Returns the UTXO changes (created and spent) for `addresses` in the +/// non-finalized chain, matching or overlapping the UTXOs for the +/// `finalized_tip_range`. Also returns the height of the last block in which +/// the changes were located, or None if the state is empty. /// -/// If the addresses do not exist in the non-finalized `chain`, returns an empty list. +/// If the addresses do not exist in the non-finalized `chain`, returns an empty +/// list. // // TODO: turn the return type into a struct? fn chain_transparent_utxo_changes( @@ -219,6 +250,7 @@ fn chain_transparent_utxo_changes( ( BTreeMap, BTreeSet, + Option, ), BoxError, > @@ -265,7 +297,7 @@ where }; if chain.is_none() { - if finalized_tip_status.is_ok() { + if let Ok(finalized_tip_height) = finalized_tip_status { debug!( ?finalized_tip_status, ?required_min_non_finalized_root, @@ -275,7 +307,11 @@ where finalized chain is consistent, and non-finalized chain is empty", ); - return Ok(Default::default()); + return Ok(( + Default::default(), + Default::default(), + Some(finalized_tip_height), + )); } else { // We can't compensate for inconsistent database queries, // because the non-finalized chain is empty. @@ -320,7 +356,11 @@ where non-finalized blocks have all been finalized, no new UTXO changes", ); - return Ok(Default::default()); + return Ok(( + Default::default(), + Default::default(), + Some(finalized_tip_height), + )); } } @@ -355,8 +395,8 @@ where ); } } - - Ok(chain.partial_transparent_utxo_changes(addresses)) + let (created, spent) = chain.partial_transparent_utxo_changes(addresses); + Ok((created, spent, Some(non_finalized_tip))) } /// Combines the supplied finalized and non-finalized UTXOs, diff --git a/zebra-state/src/service/read/tests/vectors.rs b/zebra-state/src/service/read/tests/vectors.rs index 972af7eb603..457359b55c3 100644 --- a/zebra-state/src/service/read/tests/vectors.rs +++ b/zebra-state/src/service/read/tests/vectors.rs @@ -6,7 +6,6 @@ use zebra_chain::{ block::{Block, Height}, orchard, parameters::Network::*, - sapling, serialization::ZcashDeserializeInto, subtree::{NoteCommitmentSubtree, NoteCommitmentSubtreeData, NoteCommitmentSubtreeIndex}, transaction, @@ -37,7 +36,8 @@ async fn empty_read_state_still_responds_to_requests() -> Result<()> { let transcript = Transcript::from(empty_state_test_cases()); let network = Mainnet; - let (_state, read_state, _latest_chain_tip, _chain_tip_change) = init_test_services(&network); + let (_state, read_state, _latest_chain_tip, _chain_tip_change) = + init_test_services(&network).await; transcript.check(read_state).await?; @@ -112,7 +112,7 @@ async fn test_read_subtrees() -> Result<()> { NoteCommitmentSubtree::new( u16::try_from(index).expect("should fit in u16"), Height(height), - sapling::tree::Node::default(), + sapling_crypto::Node::from_bytes([0; 32]).unwrap(), ) }; @@ -205,7 +205,7 @@ async fn test_read_subtrees() -> Result<()> { /// non-finalized states correctly. #[tokio::test] async fn test_sapling_subtrees() -> Result<()> { - let dummy_subtree_root = sapling::tree::Node::default(); + let dummy_subtree_root = sapling_crypto::Node::from_bytes([0; 32]).unwrap(); // Prepare the finalized state. let db_subtree = NoteCommitmentSubtree::new(0, Height(1), dummy_subtree_root); diff --git a/zebra-state/src/service/read/tree.rs b/zebra-state/src/service/read/tree.rs index 5a1239c53e7..e19248dae17 100644 --- a/zebra-state/src/service/read/tree.rs +++ b/zebra-state/src/service/read/tree.rs @@ -58,7 +58,7 @@ pub fn sapling_subtrees( chain: Option, db: &ZebraDb, range: impl std::ops::RangeBounds + Clone, -) -> BTreeMap> +) -> BTreeMap> where C: AsRef, { diff --git a/zebra-state/src/service/tests.rs b/zebra-state/src/service/tests.rs index ea759b1ab0a..20008d1169c 100644 --- a/zebra-state/src/service/tests.rs +++ b/zebra-state/src/service/tests.rs @@ -4,6 +4,7 @@ use std::{env, sync::Arc, time::Duration}; +use tokio::runtime::Runtime; use tower::{buffer::Buffer, util::BoxService}; use zebra_chain::{ @@ -260,7 +261,7 @@ async fn empty_state_still_responds_to_requests() -> Result<()> { let transcript = Transcript::from(iter); let network = Network::Mainnet; - let state = init_test(&network); + let state = init_test(&network).await; transcript.check(state).await?; @@ -406,9 +407,10 @@ proptest! { in continuous_empty_blocks_from_test_vectors(), ) { let _init_guard = zebra_test::init(); - - // We're waiting to verify each block here, so we don't need the maximum checkpoint height. - let (mut state_service, _, _, _) = StateService::new(Config::ephemeral(), &network, Height::MAX, 0); + let (mut state_service, _, _, _) = Runtime::new().unwrap().block_on(async { + // We're waiting to verify each block here, so we don't need the maximum checkpoint height. + StateService::new(Config::ephemeral(), &network, Height::MAX, 0).await + }); prop_assert_eq!(state_service.read_service.db.finalized_value_pool(), ValueBalance::zero()); prop_assert_eq!( @@ -499,8 +501,10 @@ proptest! { ) { let _init_guard = zebra_test::init(); - // We're waiting to verify each block here, so we don't need the maximum checkpoint height. - let (mut state_service, _read_only_state_service, latest_chain_tip, mut chain_tip_change) = StateService::new(Config::ephemeral(), &network, Height::MAX, 0); + let (mut state_service, _read_only_state_service, latest_chain_tip, mut chain_tip_change) = Runtime::new().unwrap().block_on(async { + // We're waiting to verify each block here, so we don't need the maximum checkpoint height. + StateService::new(Config::ephemeral(), &network, Height::MAX, 0).await + }); prop_assert_eq!(latest_chain_tip.best_tip_height(), None); prop_assert_eq!(chain_tip_change.last_tip_change(), None); diff --git a/zebra-state/src/service/write.rs b/zebra-state/src/service/write.rs index 654b9957e1b..9fa8da5ec0a 100644 --- a/zebra-state/src/service/write.rs +++ b/zebra-state/src/service/write.rs @@ -54,13 +54,18 @@ pub(crate) fn validate_and_commit_non_finalized( non_finalized_state: &mut NonFinalizedState, prepared: SemanticallyVerifiedBlock, ) -> Result<(), CommitSemanticallyVerifiedError> { - check::initial_contextual_validity(finalized_state, non_finalized_state, &prepared)?; + check::initial_contextual_validity(finalized_state, non_finalized_state, &prepared) + .map_err(Box::new)?; let parent_hash = prepared.block.header.previous_block_hash; if finalized_state.finalized_tip_hash() == parent_hash { - non_finalized_state.commit_new_chain(prepared, finalized_state)?; + non_finalized_state + .commit_new_chain(prepared, finalized_state) + .map_err(Box::new)?; } else { - non_finalized_state.commit_block(prepared, finalized_state)?; + non_finalized_state + .commit_block(prepared, finalized_state) + .map_err(Box::new)?; } Ok(()) @@ -185,6 +190,7 @@ impl BlockWriteSender { non_finalized_state: NonFinalizedState, chain_tip_sender: ChainTipSender, non_finalized_state_sender: watch::Sender, + should_use_finalized_block_write_sender: bool, ) -> ( Self, tokio::sync::mpsc::UnboundedReceiver, @@ -218,7 +224,8 @@ impl BlockWriteSender { ( Self { non_finalized: Some(non_finalized_block_write_sender), - finalized: Some(finalized_block_write_sender), + finalized: Some(finalized_block_write_sender) + .filter(|_| should_use_finalized_block_write_sender), }, invalid_block_write_reset_receiver, Some(Arc::new(task)), diff --git a/zebra-state/tests/basic.rs b/zebra-state/tests/basic.rs index 01b90a11e45..0988321edfc 100644 --- a/zebra-state/tests/basic.rs +++ b/zebra-state/tests/basic.rs @@ -81,7 +81,8 @@ async fn check_transcripts(network: Network) -> Result<(), Report> { for transcript_data in net_data { // We're not verifying UTXOs here. - let (service, _, _, _) = zebra_state::init(Config::ephemeral(), &network, Height::MAX, 0); + let (service, _, _, _) = + zebra_state::init(Config::ephemeral(), &network, Height::MAX, 0).await; let transcript = Transcript::from(transcript_data.iter().cloned()); /// SPANDOC: check the on disk service against the transcript transcript.check(service).await?; diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 32cd9a6b536..497fd2d884b 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -30,9 +30,6 @@ tower = { workspace = true, features = ["util"] } futures = { workspace = true } color-eyre = { workspace = true } -# This is a transitive dependency via color-eyre. -# Enable a feature that makes tinyvec compile much faster. -tinyvec = { workspace = true, features = ["rustc_1_55"] } humantime = { workspace = true } owo-colors = { workspace = true } @@ -45,3 +42,6 @@ tracing = { workspace = true } [dev-dependencies] tempfile = { workspace = true } + +[lints] +workspace = true diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index e45221d3d7a..c746bf0b6c5 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -67,9 +67,6 @@ openapi-generator = [ [dependencies] color-eyre = { workspace = true } -# This is a transitive dependency via color-eyre. -# Enable a feature that makes tinyvec compile much faster. -tinyvec = { workspace = true, features = ["rustc_1_55"] } structopt = { workspace = true } hex = { workspace = true } @@ -95,8 +92,6 @@ reqwest = { workspace = true, features = ["rustls-tls"], optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries tokio = { workspace = true, features = ["full"], optional = true } -jsonrpc = { workspace = true, optional = true } - zcash_primitives = { workspace = true, optional = true } zcash_protocol.workspace = true @@ -108,3 +103,5 @@ serde_yml = { workspace = true, optional = true } serde = { workspace = true, features = ["serde_derive"], optional = true } indexmap = { workspace = true } +[lints] +workspace = true diff --git a/zebra-utils/README.md b/zebra-utils/README.md index 723cba91d93..545288ac555 100644 --- a/zebra-utils/README.md +++ b/zebra-utils/README.md @@ -27,7 +27,7 @@ To find the latest checkpoints on the `main` branch: 2. From the list on the left, go to the `Integration tests` and find the `Run checkpoints-mainnet test`, then click in the `Result of checkpoints-mainnet` step. 3. Scroll down until you see the list of checkpoints. -4. Add those checkpoints to the end of `zebra-consensus/src/checkpoint/main-checkpoints.txt` +4. Add those checkpoints to the end of `zebra-chain/src/parameters/checkpoint/main-checkpoints.txt` 5. Repeat steps 2 to 4 for `Generate checkpoints testnet` 6. Open a pull request at https://github.com/ZcashFoundation/zebra/pulls @@ -54,8 +54,8 @@ cargo install --locked --features zebra-checkpoints --git https://github.com/Zca You can update the checkpoints using these commands: ```sh -zebra-checkpoints --last-checkpoint $(tail -1 zebra-consensus/src/checkpoint/main-checkpoints.txt | cut -d" " -f1) | tee --append zebra-consensus/src/checkpoint/main-checkpoints.txt & -zebra-checkpoints --last-checkpoint $(tail -1 zebra-consensus/src/checkpoint/test-checkpoints.txt | cut -d" " -f1) -- -testnet | tee --append zebra-consensus/src/checkpoint/test-checkpoints.txt & +zebra-checkpoints --last-checkpoint $(tail -1 zebra-chain/src/parameters/checkpoint/main-checkpoints.txt | cut -d" " -f1) | tee --append zebra-chain/src/parameters/checkpoint/main-checkpoints.txt & +zebra-checkpoints --last-checkpoint $(tail -1 zebra-chain/src/parameters/checkpoint/test-checkpoints.txt | cut -d" " -f1) -- -testnet | tee --append zebra-chain/src/parameters/checkpoint/test-checkpoints.txt & wait ``` @@ -74,7 +74,7 @@ You can see all the `zebra-checkpoints` options using: target/release/zebra-checkpoints --help ``` -For more details about checkpoint lists, see the [`zebra-checkpoints` README.](https://github.com/ZcashFoundation/zebra/tree/main/zebra-consensus/src/checkpoint/README.md) +For more details about checkpoint lists, see the [`zebra-checkpoints` README.](https://github.com/ZcashFoundation/zebra/tree/main/zebra-chain/src/parameters/checkpoint/README.md) #### Checkpoint Generation for Testnet diff --git a/zebra-utils/zcash-rpc-diff b/zebra-utils/zcash-rpc-diff index a2f2fe5df73..ff428bbb0a0 100755 --- a/zebra-utils/zcash-rpc-diff +++ b/zebra-utils/zcash-rpc-diff @@ -112,11 +112,11 @@ echo "$@" echo echo "Querying $ZEBRAD $ZEBRAD_NET chain at height >=$ZEBRAD_HEIGHT..." -time $ZCASH_CLI $ZEBRAD_EXTRA_ARGS -rpcport="$ZEBRAD_RPC_PORT" "$@" | jq -S > "$ZEBRAD_RESPONSE" +time $ZCASH_CLI $ZEBRAD_EXTRA_ARGS -rpcport="$ZEBRAD_RPC_PORT" "$@" | $JQ -S > "$ZEBRAD_RESPONSE" echo echo "Querying $ZCASHD $ZCASHD_NET chain at height >=$ZCASHD_HEIGHT..." -time $ZCASH_CLI $ZCASHD_EXTRA_ARGS "$@" | jq -S > "$ZCASHD_RESPONSE" +time $ZCASH_CLI $ZCASHD_EXTRA_ARGS "$@" | $JQ -S > "$ZCASHD_RESPONSE" echo echo @@ -214,8 +214,10 @@ if [ "$1" == "getaddressbalance" ]; then ZEBRAD_SUM_RESPONSE="$ZCASH_RPC_TMP_DIR/$ZEBRAD_NAME-$ZEBRAD_NET-$ZEBRAD_HEIGHT-getaddressutxos-sum.txt" ZCASHD_SUM_RESPONSE="$ZCASH_RPC_TMP_DIR/$ZCASHD_NAME-$ZCASHD_NET-$ZCASHD_HEIGHT-getaddressutxos-sum.txt" - cat "$ZEBRAD_RESPONSE" | $JQ 'map(.satoshis) | add // 0' > "$ZEBRAD_SUM_RESPONSE" - cat "$ZCASHD_RESPONSE" | $JQ 'map(.satoshis) | add // 0' > "$ZCASHD_SUM_RESPONSE" + # getaddressutxos returns either an array of UTXOs, or an object with an "utxos" field containing that array, + # depending on the `chainInfo` parameter. + cat "$ZEBRAD_RESPONSE" | $JQ 'if type == "array" then . else .utxos end | map(.satoshis) | add // 0' > "$ZEBRAD_SUM_RESPONSE" + cat "$ZCASHD_RESPONSE" | $JQ 'if type == "array" then . else .utxos end | map(.satoshis) | add // 0' > "$ZCASHD_SUM_RESPONSE" echo diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 5bed7c99f74..56671a5881e 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -185,9 +185,6 @@ tower = { workspace = true, features = ["hedge", "limit"] } pin-project = { workspace = true } color-eyre = { workspace = true, features = ["issue-url"] } -# This is a transitive dependency via color-eyre. -# Enable a feature that makes tinyvec compile much faster. -tinyvec = { workspace = true, features = ["rustc_1_55"] } thiserror = { workspace = true } @@ -306,5 +303,15 @@ zebra-test = { path = "../zebra-test", version = "1.0.1" } # zebra-utils { path = "../zebra-utils", artifact = "bin:zebra-checkpoints" } zebra-utils = { path = "../zebra-utils", version = "2.0.0" } -[lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } +[package.metadata.cargo-udeps.ignore] +# These dependencies are false positives - they are actually used +normal = [ + "console-subscriber", # Used behind the tokio-console feature flag + "zebra-utils", # Used for the zebra-checkpoints feature +] +development = [ + "zebra-utils", # Required workaround to build zebra-checkpoints binary for tests (see zebra-utils/tests/build_utils_for_zebrad_tests.rs) +] + +[lints] +workspace = true diff --git a/zebrad/src/commands/copy_state.rs b/zebrad/src/commands/copy_state.rs index f320313257a..799d113a1b5 100644 --- a/zebrad/src/commands/copy_state.rs +++ b/zebrad/src/commands/copy_state.rs @@ -145,7 +145,7 @@ impl CopyStateCmd { _target_read_only_state_service, _target_latest_chain_tip, _target_chain_tip_change, - ) = new_zs::spawn_init(target_config.clone(), network, Height::MAX, 0).await?; + ) = new_zs::init(target_config.clone(), network, Height::MAX, 0).await; let elapsed = target_start_time.elapsed(); info!(?elapsed, "finished initializing target state service"); diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index f2c6b7d1384..8f3f4015b93 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -83,7 +83,7 @@ use tower::{builder::ServiceBuilder, util::BoxService, ServiceExt}; use tracing_futures::Instrument; use zebra_chain::block::genesis::regtest_genesis_block; -use zebra_consensus::{router::BackgroundTaskHandles, ParameterCheckpoint}; +use zebra_consensus::router::BackgroundTaskHandles; use zebra_rpc::{methods::RpcImpl, server::RpcServer, SubmitBlockChannel}; use crate::{ @@ -164,14 +164,14 @@ impl StartCmd { info!("opening database, this may take a few minutes"); let (state_service, read_only_state_service, latest_chain_tip, chain_tip_change) = - zebra_state::spawn_init( + zebra_state::init( config.state.clone(), &config.network.network, max_checkpoint_height, config.sync.checkpoint_verify_concurrency_limit * (VERIFICATION_PIPELINE_SCALING_MULTIPLIER + 1), ) - .await?; + .await; info!("logging database metrics on startup"); read_only_state_service.log_db_metrics(); diff --git a/zebrad/src/components/inbound/downloads.rs b/zebrad/src/components/inbound/downloads.rs index 42214048c4c..a57bd3b0067 100644 --- a/zebrad/src/components/inbound/downloads.rs +++ b/zebrad/src/components/inbound/downloads.rs @@ -64,7 +64,7 @@ pub enum DownloadAction { /// The queue is at capacity, so this request was ignored. /// /// The sync service should discover this block later, when we are closer - /// to the tip. The queue's capacity is [`Downloads.full_verify_concurrency_limit`]. + /// to the tip. The queue's capacity is [`Downloads::full_verify_concurrency_limit`]. FullQueue, } diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index ffa2957aa5e..01cd9139b1f 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -779,7 +779,7 @@ async fn caches_getaddr_response() { // UTXO verification doesn't matter for these tests. let (state, _read_only_state_service, latest_chain_tip, _chain_tip_change) = - zebra_state::init(state_config.clone(), &network, Height::MAX, 0); + zebra_state::init(state_config.clone(), &network, Height::MAX, 0).await; let state_service = ServiceBuilder::new().buffer(1).service(state); @@ -893,7 +893,7 @@ async fn setup( // UTXO verification doesn't matter for these tests. let (state, _read_only_state_service, latest_chain_tip, mut chain_tip_change) = - zebra_state::init(state_config.clone(), &network, Height::MAX, 0); + zebra_state::init(state_config.clone(), &network, Height::MAX, 0).await; let mut state_service = ServiceBuilder::new().buffer(1).service(state); diff --git a/zebrad/src/components/inbound/tests/real_peer_set.rs b/zebrad/src/components/inbound/tests/real_peer_set.rs index a4677df5218..89320ba4845 100644 --- a/zebrad/src/components/inbound/tests/real_peer_set.rs +++ b/zebrad/src/components/inbound/tests/real_peer_set.rs @@ -644,7 +644,7 @@ async fn setup( // UTXO verification doesn't matter for these tests. let state_config = StateConfig::ephemeral(); let (state_service, _read_only_state_service, latest_chain_tip, chain_tip_change) = - zebra_state::init(state_config, &network, Height::MAX, 0); + zebra_state::init(state_config, &network, Height::MAX, 0).await; let state_service = ServiceBuilder::new().buffer(10).service(state_service); // Network @@ -838,7 +838,7 @@ mod submitblock_test { // State let state_config = StateConfig::ephemeral(); let (_state_service, _read_only_state_service, latest_chain_tip, chain_tip_change) = - zebra_state::init(state_config, &Network::Mainnet, Height::MAX, 0); + zebra_state::init(state_config, &Network::Mainnet, Height::MAX, 0).await; let config_listen_addr = "127.0.0.1:0".parse().unwrap(); diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index 3f0562cf6b4..3879b85b2bd 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -891,6 +891,34 @@ impl Service for Mempool { // all the work for this request is done in poll_ready async move { Ok(Response::CheckedForVerifiedTransactions) }.boxed() } + + // Summary statistics for the mempool: count, total size, and memory usage. + // + // Used by the `getmempoolinfo` RPC method + Request::QueueStats => { + trace!(?req, "got mempool request"); + + let size = storage.transaction_count(); + + let bytes = storage.total_serialized_size(); + + let usage = bytes; // TODO: Placeholder, should be fixed later + + // TODO: Set to Some(true) on regtest once network info is available. + let fully_notified = None; + + trace!(size, bytes, usage, "answered mempool request"); + + async move { + Ok(Response::QueueStats { + size, + bytes, + usage, + fully_notified, + }) + } + .boxed() + } }, ActiveState::Disabled => { // TODO: add the name of the request, but not the content, @@ -941,6 +969,14 @@ impl Service for Mempool { // all the work for this request is done in poll_ready Response::CheckedForVerifiedTransactions } + + // Return empty mempool stats + Request::QueueStats => Response::QueueStats { + size: 0, + bytes: 0, + usage: 0, + fully_notified: None, + }, }; async move { Ok(resp) }.boxed() diff --git a/zebrad/src/components/mempool/tests/vector.rs b/zebrad/src/components/mempool/tests/vector.rs index eba39d28423..e3940e3262d 100644 --- a/zebrad/src/components/mempool/tests/vector.rs +++ b/zebrad/src/components/mempool/tests/vector.rs @@ -177,6 +177,41 @@ async fn mempool_service_basic_single() -> Result<(), Report> { assert!(queued_responses[0].is_ok()); assert_eq!(service.tx_downloads().in_flight(), 1); + // Test `Request::QueueStats` + let response = service + .ready() + .await + .unwrap() + .call(Request::QueueStats) + .await + .unwrap(); + + let (actual_size, actual_bytes, actual_usage) = match response { + Response::QueueStats { + size, + bytes, + usage, + fully_notified: None, + } => (size, bytes, usage), + _ => unreachable!("expected QueueStats response"), + }; + + // Expected values based on storage contents + let expected_size = service.storage().transaction_count(); + let expected_bytes: usize = service + .storage() + .transactions() + .values() + .map(|tx| tx.transaction.size) + .sum(); + + // TODO: Derive memory usage when available + let expected_usage = expected_bytes; + + assert_eq!(actual_size, expected_size, "QueueStats size mismatch"); + assert_eq!(actual_bytes, expected_bytes, "QueueStats bytes mismatch"); + assert_eq!(actual_usage, expected_usage, "QueueStats usage mismatch"); + Ok(()) } @@ -399,6 +434,33 @@ async fn mempool_service_disabled() -> Result<(), Report> { MempoolError::Disabled ); + // Test if mempool returns to QueueStats request correctly when disabled + let response = service + .ready() + .await + .unwrap() + .call(Request::QueueStats) + .await + .unwrap(); + + let (size, bytes, usage, fully_notified) = match response { + Response::QueueStats { + size, + bytes, + usage, + fully_notified, + } => (size, bytes, usage, fully_notified), + _ => unreachable!("expected QueueStats response"), + }; + + assert_eq!(size, 0, "size should be zero when mempool is disabled"); + assert_eq!(bytes, 0, "bytes should be zero when mempool is disabled"); + assert_eq!(usage, 0, "usage should be zero when mempool is disabled"); + assert_eq!( + fully_notified, None, + "fully_notified should be None when mempool is disabled" + ); + Ok(()) } @@ -1132,7 +1194,7 @@ async fn setup( // UTXO verification doesn't matter here. let state_config = StateConfig::ephemeral(); let (state, _read_only_state_service, latest_chain_tip, mut chain_tip_change) = - zebra_state::init(state_config, network, Height::MAX, 0); + zebra_state::init(state_config, network, Height::MAX, 0).await; let mut state_service = ServiceBuilder::new().buffer(10).service(state); let tx_verifier = MockService::build().for_unit_tests(); diff --git a/zebrad/src/components/sync.rs b/zebrad/src/components/sync.rs index 3911056e311..400a65c8749 100644 --- a/zebrad/src/components/sync.rs +++ b/zebrad/src/components/sync.rs @@ -22,7 +22,6 @@ use zebra_chain::{ block::{self, Height, HeightDiff}, chain_tip::ChainTip, }; -use zebra_consensus::ParameterCheckpoint as _; use zebra_network::{self as zn, PeerSocketAddr}; use zebra_state as zs; diff --git a/zebrad/src/components/sync/progress.rs b/zebrad/src/components/sync/progress.rs index d861441b6ea..0131c798d0e 100644 --- a/zebrad/src/components/sync/progress.rs +++ b/zebrad/src/components/sync/progress.rs @@ -16,7 +16,6 @@ use zebra_chain::{ fmt::humantime_seconds, parameters::{Network, NetworkUpgrade, POST_BLOSSOM_POW_TARGET_SPACING}, }; -use zebra_consensus::ParameterCheckpoint as _; use zebra_state::MAX_BLOCK_REORG_HEIGHT; use crate::components::sync::SyncStatus; diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 9d3b77db90a..3e1b5c27686 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -139,6 +139,7 @@ use std::{ collections::HashSet, env, fs, panic, path::PathBuf, + sync::Arc, time::{Duration, Instant}, }; @@ -146,6 +147,7 @@ use color_eyre::{ eyre::{eyre, WrapErr}, Help, }; +use futures::{stream::FuturesUnordered, StreamExt}; use semver::Version; use serde_json::Value; use tower::ServiceExt; @@ -158,12 +160,11 @@ use zebra_chain::{ FatPointerToBftBlock, Height, }, parameters::{ - testnet::ConfiguredActivationHeights, + testnet::{ConfiguredActivationHeights, ConfiguredCheckpoints, RegtestParameters}, Network::{self, *}, NetworkUpgrade, }, }; -use zebra_consensus::ParameterCheckpoint; use zebra_node_services::rpc_client::RpcRequestClient; use zebra_rpc::{ client::{ @@ -212,6 +213,8 @@ use common::{ test_type::TestType::{self, *}, }; +use crate::common::regtest::MiningRpcMethods; + /// The maximum amount of time that we allow the creation of a future to block the `tokio` executor. /// /// This should be larger than the amount of time between thread time slices on a busy test VM. @@ -388,12 +391,18 @@ async fn db_init_outside_future_executor() -> Result<()> { let start = Instant::now(); // This test doesn't need UTXOs to be verified efficiently, because it uses an empty state. - let db_init_handle = zebra_state::spawn_init( - config.state.clone(), - &config.network.network, - Height::MAX, - 0, - ); + let db_init_handle = { + let config = config.clone(); + tokio::spawn(async move { + zebra_state::init( + config.state.clone(), + &config.network.network, + Height::MAX, + 0, + ) + .await + }) + }; // it's faster to panic if it takes longer than expected, since the executor // will wait indefinitely for blocking operation to finish once started @@ -725,6 +734,7 @@ fn valid_generated_config(command: &str, expect_stdout_line_contains: &str) -> R } /// Check if the config produced by current zebrad is stored. +#[cfg(not(target_os = "windows"))] #[tracing::instrument] #[allow(clippy::print_stdout)] fn last_config_is_stored() -> Result<()> { @@ -1795,6 +1805,7 @@ fn lwd_integration() -> Result<()> { /// /// This test might work on Windows. #[test] +#[ignore] fn sync_update_mainnet() -> Result<()> { lwd_integration_test(UpdateZebraCachedStateNoRpc) } @@ -2450,6 +2461,7 @@ async fn lwd_rpc_test() -> Result<()> { } #[test] +#[cfg(not(target_os = "windows"))] fn delete_old_databases() -> Result<()> { use std::fs::{canonicalize, create_dir}; @@ -2559,6 +2571,7 @@ async fn get_peer_info() -> Result<()> { /// /// See [`common::get_block_template_rpcs::get_block_template`] for more information. #[tokio::test] +#[ignore] async fn rpc_get_block_template() -> Result<()> { common::get_block_template_rpcs::get_block_template::run().await } @@ -2567,6 +2580,7 @@ async fn rpc_get_block_template() -> Result<()> { /// /// See [`common::get_block_template_rpcs::submit_block`] for more information. #[tokio::test] +#[ignore] async fn rpc_submit_block() -> Result<()> { common::get_block_template_rpcs::submit_block::run().await } @@ -2924,7 +2938,7 @@ async fn validate_regtest_genesis_block() { let _init_guard = zebra_test::init(); let network = Network::new_regtest(Default::default()); - let state = zebra_state::init_test(&network); + let state = zebra_state::init_test(&network).await; let ( block_verifier_router, _transaction_verifier, @@ -3301,6 +3315,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { let base_network_params = testnet::Parameters::build() // Regtest genesis hash .with_genesis_hash("029f11d80ef9765602235e1bc9727e3eb6ba20839319f761fee920d63401e327") + .with_checkpoints(false) .with_target_difficulty_limit(U256::from_big_endian(&[0x0f; 32])) .with_disable_pow(true) .with_slow_start_interval(Height::MIN) @@ -3333,7 +3348,7 @@ async fn nu6_funding_streams_and_coinbase_balance() -> Result<()> { .expect("configured mining address should be valid"); let (state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::init_test_services(&network); + zebra_state::init_test_services(&network).await; let ( block_verifier_router, @@ -3874,6 +3889,164 @@ fn check_no_git_dependencies() { } } +#[tokio::test] +async fn restores_non_finalized_state_and_commits_new_blocks() -> Result<()> { + let network = Network::new_regtest(Default::default()); + + let mut config = os_assigned_rpc_port_config(false, &network)?; + config.state.ephemeral = false; + let test_dir = testdir()?.with_config(&mut config)?; + + // Start Zebra and generate some blocks. + + tracing::info!("starting Zebra and generating some blocks"); + let mut child = test_dir.spawn_child(args!["start"])?; + // Avoid dropping the test directory and cleanup of the state cache needed by the next zebrad instance. + let test_dir = child.dir.take().expect("should have test directory"); + let rpc_address = read_listen_addr_from_logs(&mut child, OPENED_RPC_ENDPOINT_MSG)?; + // Wait for Zebra to load its state cache + tokio::time::sleep(Duration::from_secs(5)).await; + let rpc_client = RpcRequestClient::new(rpc_address); + let generated_block_hashes = rpc_client.generate(50).await?; + // Wait for non-finalized backup task to make a second write to the backup cache + tokio::time::sleep(Duration::from_secs(6)).await; + + child.kill(true)?; + // Wait for Zebra to shut down. + tokio::time::sleep(Duration::from_secs(3)).await; + // Prepare checkpoint heights/hashes + let last_hash = *generated_block_hashes + .last() + .expect("should have at least one block hash"); + let configured_checkpoints = ConfiguredCheckpoints::HeightsAndHashes(vec![ + (Height(0), network.genesis_hash()), + (Height(50), last_hash), + ]); + + // Check that Zebra will restore its non-finalized state from backup when the finalized tip is past the + // max checkpoint height and that it can still commit more blocks to its state. + + tracing::info!("restarting Zebra to check that non-finalized state is restored"); + let mut child = test_dir.spawn_child(args!["start"])?; + let test_dir = child.dir.take().expect("should have test directory"); + let rpc_address = read_listen_addr_from_logs(&mut child, OPENED_RPC_ENDPOINT_MSG)?; + let rpc_client = RpcRequestClient::new(rpc_address); + + // Wait for Zebra to load its state cache + tokio::time::sleep(Duration::from_secs(5)).await; + let blockchain_info = rpc_client.blockchain_info().await?; + tracing::info!( + ?blockchain_info, + "got blockchain info after restarting Zebra" + ); + + assert_eq!( + blockchain_info.best_block_hash(), + last_hash, + "tip block hash should match tip hash of previous zebrad instance" + ); + + tracing::info!("checking that Zebra can commit blocks after restoring non-finalized state"); + rpc_client + .generate(10) + .await + .expect("should successfully commit more blocks to the state"); + + tracing::info!("retrieving blocks to be used with configured checkpoints"); + let checkpointed_blocks = { + let mut blocks = Vec::new(); + for height in 1..=50 { + blocks.push( + rpc_client + .get_block(height) + .await + .map_err(|err| eyre!(err))? + .expect("should have block at height"), + ) + } + blocks + }; + + tracing::info!( + "restarting Zebra to check that non-finalized state is _not_ restored when \ + the finalized tip is below the max checkpoint height" + ); + child.kill(true)?; + // Wait for Zebra to shut down. + tokio::time::sleep(Duration::from_secs(3)).await; + + // Check that the non-finalized state is not restored from backup when the finalized tip height is below the + // max checkpoint height and that it can still commit more blocks to its state + + tracing::info!("restarting Zebra with configured checkpoints to check that non-finalized state is not restored"); + let network = Network::new_regtest(RegtestParameters { + checkpoints: Some(configured_checkpoints), + ..Default::default() + }); + let mut config = os_assigned_rpc_port_config(false, &network)?; + config.state.ephemeral = false; + let mut child = test_dir + .with_config(&mut config)? + .spawn_child(args!["start"])?; + let test_dir = child.dir.take().expect("should have test directory"); + let rpc_address = read_listen_addr_from_logs(&mut child, OPENED_RPC_ENDPOINT_MSG)?; + // Wait for Zebra to load its state cache + tokio::time::sleep(Duration::from_secs(5)).await; + let rpc_client = RpcRequestClient::new(rpc_address); + + assert_eq!( + rpc_client.blockchain_info().await?.best_block_hash(), + network.genesis_hash(), + "Zebra should not restore blocks from non-finalized backup if \ + its finalized tip is below the max checkpoint height" + ); + + let mut submit_block_futs: FuturesUnordered<_> = checkpointed_blocks + .into_iter() + .map(Arc::unwrap_or_clone) + .map(|block| rpc_client.submit_block(block)) + .collect(); + + while let Some(result) = submit_block_futs.next().await { + result? + } + + // Commit some blocks to check that Zebra's state will still commit blocks, and generate enough blocks + // for Zebra's finalized tip to pass the max checkpoint height. + + rpc_client + .generate(200) + .await + .expect("should successfully commit more blocks to the state"); + + child.kill(true)?; + + // Check that Zebra will can commit blocks to its state when its finalized tip is past the max checkpoint height + // and the non-finalized backup cache is disabled or empty. + + tracing::info!( + "restarting Zebra to check that blocks are committed when the non-finalized state \ + is initially empty and the finalized tip is past the max checkpoint height" + ); + config.state.should_backup_non_finalized_state = false; + let mut child = test_dir + .with_config(&mut config)? + .spawn_child(args!["start"])?; + let rpc_address = read_listen_addr_from_logs(&mut child, OPENED_RPC_ENDPOINT_MSG)?; + let rpc_client = RpcRequestClient::new(rpc_address); + + // Wait for Zebra to load its state cache + tokio::time::sleep(Duration::from_secs(5)).await; + + tracing::info!("checking that Zebra commits blocks with empty non-finalized state"); + rpc_client + .generate(10) + .await + .expect("should successfully commit more blocks to the state"); + + child.kill(true) +} + // /// Check that Zebra will disconnect from misbehaving peers. // #[tokio::test] // #[cfg(not(target_os = "windows"))] diff --git a/zebrad/tests/common/cached_state.rs b/zebrad/tests/common/cached_state.rs index 7aab8ea0c43..c5c9f71bdba 100644 --- a/zebrad/tests/common/cached_state.rs +++ b/zebrad/tests/common/cached_state.rs @@ -26,7 +26,7 @@ use zebra_state::{ChainTipChange, LatestChainTip, MAX_BLOCK_REORG_HEIGHT}; use zebra_test::command::TestChild; use crate::common::{ - launch::spawn_zebrad_for_rpc, + launch::spawn_zebrad_for_rpc_with_opts, sync::{check_sync_logs_until, MempoolBehavior, SYNC_FINISHED_REGEX}, test_type::TestType, }; @@ -73,6 +73,7 @@ pub fn wait_for_state_version_message(zebrad: &mut TestChild) -> Result( required_version: Version, extra_required_log_regexes: impl IntoIterator + std::fmt::Debug, ) -> Result<()> { - if state_version_message.contains("launching upgrade task") { - tracing::info!( - zebrad = ?zebrad.cmd, - %state_version_message, - %required_version, - ?extra_required_log_regexes, - "waiting for zebrad state upgrade..." - ); + tracing::info!( + zebrad = ?zebrad.cmd, + %state_version_message, + %required_version, + ?extra_required_log_regexes, + "waiting for zebrad state upgrade..." + ); + if state_version_message.contains("launching upgrade task") { let upgrade_pattern = format!( "marked database format as upgraded.*format_upgrade_version.*=.*{required_version}" ); @@ -109,6 +110,17 @@ pub fn wait_for_state_version_upgrade( ?upgrade_messages, "zebrad state has been upgraded" ); + } else { + let required_logs: Vec = extra_required_log_regexes.into_iter().collect(); + let upgrade_messages = zebrad.expect_stdout_line_matches_all_unordered(&required_logs)?; + tracing::info!( + zebrad = ?zebrad.cmd, + %state_version_message, + %required_version, + ?required_logs, + ?upgrade_messages, + "no zebrad upgrade needed" + ); } Ok(()) @@ -135,7 +147,7 @@ pub async fn start_state_service_with_cache_dir( }; // These tests don't need UTXOs to be verified efficiently, because they use cached states. - Ok(zebra_state::init(config, network, Height::MAX, 0)) + Ok(zebra_state::init(config, network, Height::MAX, 0).await) } /// Loads the chain tip height from the state stored in a specified directory. @@ -213,7 +225,7 @@ pub async fn raw_future_blocks( let should_sync = true; let (zebrad, zebra_rpc_address) = - spawn_zebrad_for_rpc(network.clone(), test_name, test_type, should_sync)? + spawn_zebrad_for_rpc_with_opts(network.clone(), test_name, test_type, should_sync, false)? .ok_or_else(|| eyre!("raw_future_blocks requires a cached state"))?; let rpc_address = zebra_rpc_address.expect("test type must have RPC port"); diff --git a/zebrad/tests/common/configs/v1.8.0.toml b/zebrad/tests/common/configs/v1.8.0.toml index 3efdbda4107..b178e9e783e 100644 --- a/zebrad/tests/common/configs/v1.8.0.toml +++ b/zebrad/tests/common/configs/v1.8.0.toml @@ -65,11 +65,11 @@ genesis_hash = "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08 [network.testnet_parameters.activation_heights] BeforeOverwinter = 1 -Overwinter = 207_500 -Sapling = 280_000 -Blossom = 584_000 -Heartwood = 903_800 -Canopy = 1_028_500 +Overwinter = 1 +Sapling = 1 +Blossom = 1 +Heartwood = 1 +Canopy = 1 NU5 = 1_842_420 [rpc] diff --git a/zebrad/tests/common/configs/v1.9.0.toml b/zebrad/tests/common/configs/v1.9.0.toml index 5825e10c53b..5a512795632 100644 --- a/zebrad/tests/common/configs/v1.9.0.toml +++ b/zebrad/tests/common/configs/v1.9.0.toml @@ -65,11 +65,11 @@ genesis_hash = "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08 [network.testnet_parameters.activation_heights] BeforeOverwinter = 1 -Overwinter = 207_500 -Sapling = 280_000 -Blossom = 584_000 -Heartwood = 903_800 -Canopy = 1_028_500 +Overwinter = 1 +Sapling = 1 +Blossom = 1 +Heartwood = 1 +Canopy = 1 NU5 = 1_842_420 NU6 = 2_000_000 NU7 = 2_000_001 diff --git a/zebrad/tests/common/configs/v2.3.0.toml b/zebrad/tests/common/configs/v2.3.0.toml index 99bada628ee..193659e5897 100644 --- a/zebrad/tests/common/configs/v2.3.0.toml +++ b/zebrad/tests/common/configs/v2.3.0.toml @@ -65,11 +65,11 @@ genesis_hash = "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08 [network.testnet_parameters.activation_heights] BeforeOverwinter = 1 -Overwinter = 207_500 -Sapling = 280_000 -Blossom = 584_000 -Heartwood = 903_800 -Canopy = 1_028_500 +Overwinter = 1 +Sapling = 1 +Blossom = 1 +Heartwood = 1 +Canopy = 1 NU5 = 1_842_420 NU6 = 2_000_000 "NU6.1" = 2_000_001 diff --git a/zebrad/tests/common/configs/v2.5.0-funding-streams.toml b/zebrad/tests/common/configs/v2.5.0-funding-streams.toml index 5f865d6e592..87e5d7f177f 100644 --- a/zebrad/tests/common/configs/v2.5.0-funding-streams.toml +++ b/zebrad/tests/common/configs/v2.5.0-funding-streams.toml @@ -65,11 +65,11 @@ genesis_hash = "00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08 [network.testnet_parameters.activation_heights] BeforeOverwinter = 1 -Overwinter = 207_500 -Sapling = 280_000 -Blossom = 584_000 -Heartwood = 903_800 -Canopy = 1_028_500 +Overwinter = 1 +Sapling = 1 +Blossom = 1 +Heartwood = 1 +Canopy = 1 NU5 = 1_842_420 NU6 = 2_000_000 "NU6.1" = 2_000_001 diff --git a/zebrad/tests/common/configs/v2.6.0-checkpoints.toml b/zebrad/tests/common/configs/v2.6.0-checkpoints.toml new file mode 100644 index 00000000000..fa4bfc6196e --- /dev/null +++ b/zebrad/tests/common/configs/v2.6.0-checkpoints.toml @@ -0,0 +1,99 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html +# +# CONFIGURATION SOURCES (in order of precedence, highest to lowest): +# +# 1. Environment variables with ZEBRA_ prefix (highest precedence) +# - Format: ZEBRA_SECTION__KEY (double underscore for nested keys) +# - Examples: +# - ZEBRA_NETWORK__NETWORK=Testnet +# - ZEBRA_RPC__LISTEN_ADDR=127.0.0.1:8232 +# - ZEBRA_STATE__CACHE_DIR=/path/to/cache +# - ZEBRA_TRACING__FILTER=debug +# - ZEBRA_METRICS__ENDPOINT_ADDR=0.0.0.0:9999 +# +# 2. Configuration file (TOML format) +# - At the path specified via -c flag, e.g. `zebrad -c myconfig.toml start`, or +# - At the default path in the user's preference directory (platform-dependent, see below) +# +# 3. Hard-coded defaults (lowest precedence) +# +# The user's preference directory and the default path to the `zebrad` config are platform dependent, +# based on `dirs::preference_dir`, see https://docs.rs/dirs/latest/dirs/fn.preference_dir.html : +# +# | Platform | Value | Example | +# | -------- | ------------------------------------- | ---------------------------------------------- | +# | Linux | `$XDG_CONFIG_HOME` or `$HOME/.config` | `/home/alice/.config/zebrad.toml` | +# | macOS | `$HOME/Library/Preferences` | `/Users/Alice/Library/Preferences/zebrad.toml` | +# | Windows | `{FOLDERID_RoamingAppData}` | `C:\Users\Alice\AppData\Local\zebrad.toml` | + +[consensus] +checkpoint_sync = true + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +internal_miner = false + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "[::]:18233" +max_connections_per_ip = 1 +network = "Testnet" +peerset_initial_target_size = 25 + +[network.testnet_parameters] +checkpoints = true + +[rpc] +cookie_dir = "cache_dir" +debug_force_finished_sync = false +enable_cookie_auth = true +parallel_cpu_threads = 0 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false +should_backup_non_finalized_state = true + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false diff --git a/zebrad/tests/common/configs/v2.6.0.toml b/zebrad/tests/common/configs/v2.6.0.toml new file mode 100644 index 00000000000..a2eb7a7e25b --- /dev/null +++ b/zebrad/tests/common/configs/v2.6.0.toml @@ -0,0 +1,96 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html +# +# CONFIGURATION SOURCES (in order of precedence, highest to lowest): +# +# 1. Environment variables with ZEBRA_ prefix (highest precedence) +# - Format: ZEBRA_SECTION__KEY (double underscore for nested keys) +# - Examples: +# - ZEBRA_NETWORK__NETWORK=Testnet +# - ZEBRA_RPC__LISTEN_ADDR=127.0.0.1:8232 +# - ZEBRA_STATE__CACHE_DIR=/path/to/cache +# - ZEBRA_TRACING__FILTER=debug +# - ZEBRA_METRICS__ENDPOINT_ADDR=0.0.0.0:9999 +# +# 2. Configuration file (TOML format) +# - At the path specified via -c flag, e.g. `zebrad -c myconfig.toml start`, or +# - At the default path in the user's preference directory (platform-dependent, see below) +# +# 3. Hard-coded defaults (lowest precedence) +# +# The user's preference directory and the default path to the `zebrad` config are platform dependent, +# based on `dirs::preference_dir`, see https://docs.rs/dirs/latest/dirs/fn.preference_dir.html : +# +# | Platform | Value | Example | +# | -------- | ------------------------------------- | ---------------------------------------------- | +# | Linux | `$XDG_CONFIG_HOME` or `$HOME/.config` | `/home/alice/.config/zebrad.toml` | +# | macOS | `$HOME/Library/Preferences` | `/Users/Alice/Library/Preferences/zebrad.toml` | +# | Windows | `{FOLDERID_RoamingAppData}` | `C:\Users\Alice\AppData\Local\zebrad.toml` | + +[consensus] +checkpoint_sync = true + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +internal_miner = false + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "[::]:8233" +max_connections_per_ip = 1 +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +cookie_dir = "cache_dir" +debug_force_finished_sync = false +enable_cookie_auth = true +parallel_cpu_threads = 0 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false +should_backup_non_finalized_state = true + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false diff --git a/zebrad/tests/common/launch.rs b/zebrad/tests/common/launch.rs index 85f5dbc82c5..68bdccff045 100644 --- a/zebrad/tests/common/launch.rs +++ b/zebrad/tests/common/launch.rs @@ -207,12 +207,28 @@ where } } +/// Spawns a zebrad instance on `network` to test lightwalletd with `test_type`. +/// +/// See [`spawn_zebrad_for_rpc_with_opts`] for more details. +#[tracing::instrument] +pub fn spawn_zebrad_for_rpc + Debug>( + network: Network, + test_name: S, + test_type: TestType, + use_internet_connection: bool, +) -> Result, Option)>> { + spawn_zebrad_for_rpc_with_opts(network, test_name, test_type, use_internet_connection, true) +} + /// Spawns a zebrad instance on `network` to test lightwalletd with `test_type`. /// /// If `use_internet_connection` is `false` then spawn, but without any peers. /// This prevents it from downloading blocks. Instead, set `ZEBRA_STATE__CACHE_DIR` /// to provide an initial state to the zebrad instance. /// +/// If `use_non_finalized_backup` is `false` then configure the spawned zebrad instance +/// not to cache a backup of its non-finalized state on disk. +/// /// Returns: /// - `Ok(Some(zebrad, zebra_rpc_address))` on success, /// - `Ok(None)` if the test doesn't have the required network or cached state, and @@ -220,11 +236,12 @@ where /// /// `zebra_rpc_address` is `None` if the test type doesn't need an RPC port. #[tracing::instrument] -pub fn spawn_zebrad_for_rpc + Debug>( +pub fn spawn_zebrad_for_rpc_with_opts + Debug>( network: Network, test_name: S, test_type: TestType, use_internet_connection: bool, + use_non_finalized_backup: bool, ) -> Result, Option)>> { let test_name = test_name.as_ref(); @@ -234,9 +251,10 @@ pub fn spawn_zebrad_for_rpc + Debug>( } // Get the zebrad config - let config = test_type + let mut config = test_type .zebrad_config(test_name, use_internet_connection, None, &network) .expect("already checked config")?; + config.state.should_backup_non_finalized_state = use_non_finalized_backup; let (zebrad_failure_messages, zebrad_ignore_messages) = test_type.zebrad_failure_messages(); diff --git a/zebrad/tests/common/regtest.rs b/zebrad/tests/common/regtest.rs index 758ba6df412..82da1429745 100644 --- a/zebrad/tests/common/regtest.rs +++ b/zebrad/tests/common/regtest.rs @@ -9,14 +9,18 @@ use color_eyre::eyre::{eyre, Context, Result}; use tower::BoxError; use zebra_chain::{ - block::{Block, Height}, + block::{self, Block, Height}, parameters::{testnet::ConfiguredActivationHeights, Network}, primitives::byte_array::increment_big_endian, serialization::{ZcashDeserializeInto, ZcashSerialize}, }; use zebra_node_services::rpc_client::RpcRequestClient; use zebra_rpc::{ - client::{BlockTemplateResponse, BlockTemplateTimeSource, HexData, SubmitBlockResponse}, + client::{ + BlockTemplateResponse, BlockTemplateTimeSource, GetBlockchainInfoResponse, HexData, + SubmitBlockResponse, + }, + methods::GetBlockHash, proposal_block_from_template, server::{self, OPENED_RPC_ENDPOINT_MSG}, }; @@ -81,6 +85,8 @@ pub trait MiningRpcMethods { async fn block_from_template(&self, net: &Network) -> Result<(Block, Height)>; async fn submit_block(&self, block: Block) -> Result<()>; async fn get_block(&self, height: i32) -> Result>, BoxError>; + async fn generate(&self, num_blocks: u32) -> Result>; + async fn blockchain_info(&self) -> Result; } impl MiningRpcMethods for RpcRequestClient { @@ -134,4 +140,17 @@ impl MiningRpcMethods for RpcRequestClient { Err(err) => Err(err), } } + + async fn generate(&self, num_blocks: u32) -> Result> { + self.json_result_from_call("generate", format!("[{num_blocks}]")) + .await + .map(|response: Vec| response.into_iter().map(|rsp| rsp.hash()).collect()) + .map_err(|err| eyre!(err)) + } + + async fn blockchain_info(&self) -> Result { + self.json_result_from_call("getblockchaininfo", "[]") + .await + .map_err(|err| eyre!(err)) + } } diff --git a/zebrad/tests/end_of_support.rs b/zebrad/tests/end_of_support.rs index 9b28d63ee91..dbc234921fe 100644 --- a/zebrad/tests/end_of_support.rs +++ b/zebrad/tests/end_of_support.rs @@ -5,7 +5,6 @@ use std::time::Duration; use color_eyre::eyre::Result; use zebra_chain::{block::Height, chain_tip::mock::MockChainTip, parameters::Network}; -use zebra_consensus::ParameterCheckpoint as _; use zebrad::components::sync::end_of_support::{self, EOS_PANIC_AFTER, ESTIMATED_RELEASE_HEIGHT}; // Estimated blocks per day with the current 75 seconds block spacing.